• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch(chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 
709 	return;
710 }
711 EXPORT_SYMBOL_GPL(l2cap_chan_del);
712 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)713 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
714 			      void *data)
715 {
716 	struct l2cap_chan *chan;
717 
718 	list_for_each_entry(chan, &conn->chan_l, list) {
719 		func(chan, data);
720 	}
721 }
722 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)723 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
724 		     void *data)
725 {
726 	if (!conn)
727 		return;
728 
729 	mutex_lock(&conn->chan_lock);
730 	__l2cap_chan_list(conn, func, data);
731 	mutex_unlock(&conn->chan_lock);
732 }
733 
734 EXPORT_SYMBOL_GPL(l2cap_chan_list);
735 
l2cap_conn_update_id_addr(struct work_struct * work)736 static void l2cap_conn_update_id_addr(struct work_struct *work)
737 {
738 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
739 					       id_addr_update_work);
740 	struct hci_conn *hcon = conn->hcon;
741 	struct l2cap_chan *chan;
742 
743 	mutex_lock(&conn->chan_lock);
744 
745 	list_for_each_entry(chan, &conn->chan_l, list) {
746 		l2cap_chan_lock(chan);
747 		bacpy(&chan->dst, &hcon->dst);
748 		chan->dst_type = bdaddr_dst_type(hcon);
749 		l2cap_chan_unlock(chan);
750 	}
751 
752 	mutex_unlock(&conn->chan_lock);
753 }
754 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)755 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
756 {
757 	struct l2cap_conn *conn = chan->conn;
758 	struct l2cap_le_conn_rsp rsp;
759 	u16 result;
760 
761 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
762 		result = L2CAP_CR_LE_AUTHORIZATION;
763 	else
764 		result = L2CAP_CR_LE_BAD_PSM;
765 
766 	l2cap_state_change(chan, BT_DISCONN);
767 
768 	rsp.dcid    = cpu_to_le16(chan->scid);
769 	rsp.mtu     = cpu_to_le16(chan->imtu);
770 	rsp.mps     = cpu_to_le16(chan->mps);
771 	rsp.credits = cpu_to_le16(chan->rx_credits);
772 	rsp.result  = cpu_to_le16(result);
773 
774 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
775 		       &rsp);
776 }
777 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)778 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
779 {
780 	struct l2cap_conn *conn = chan->conn;
781 	struct l2cap_ecred_conn_rsp rsp;
782 	u16 result;
783 
784 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
785 		result = L2CAP_CR_LE_AUTHORIZATION;
786 	else
787 		result = L2CAP_CR_LE_BAD_PSM;
788 
789 	l2cap_state_change(chan, BT_DISCONN);
790 
791 	memset(&rsp, 0, sizeof(rsp));
792 
793 	rsp.result  = cpu_to_le16(result);
794 
795 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
796 		       &rsp);
797 }
798 
l2cap_chan_connect_reject(struct l2cap_chan * chan)799 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
800 {
801 	struct l2cap_conn *conn = chan->conn;
802 	struct l2cap_conn_rsp rsp;
803 	u16 result;
804 
805 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
806 		result = L2CAP_CR_SEC_BLOCK;
807 	else
808 		result = L2CAP_CR_BAD_PSM;
809 
810 	l2cap_state_change(chan, BT_DISCONN);
811 
812 	rsp.scid   = cpu_to_le16(chan->dcid);
813 	rsp.dcid   = cpu_to_le16(chan->scid);
814 	rsp.result = cpu_to_le16(result);
815 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
816 
817 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
818 }
819 
l2cap_chan_close(struct l2cap_chan * chan,int reason)820 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
821 {
822 	struct l2cap_conn *conn = chan->conn;
823 
824 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
825 
826 	switch (chan->state) {
827 	case BT_LISTEN:
828 		chan->ops->teardown(chan, 0);
829 		break;
830 
831 	case BT_CONNECTED:
832 	case BT_CONFIG:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
835 			l2cap_send_disconn_req(chan, reason);
836 		} else
837 			l2cap_chan_del(chan, reason);
838 		break;
839 
840 	case BT_CONNECT2:
841 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
842 			if (conn->hcon->type == ACL_LINK)
843 				l2cap_chan_connect_reject(chan);
844 			else if (conn->hcon->type == LE_LINK) {
845 				switch (chan->mode) {
846 				case L2CAP_MODE_LE_FLOWCTL:
847 					l2cap_chan_le_connect_reject(chan);
848 					break;
849 				case L2CAP_MODE_EXT_FLOWCTL:
850 					l2cap_chan_ecred_connect_reject(chan);
851 					break;
852 				}
853 			}
854 		}
855 
856 		l2cap_chan_del(chan, reason);
857 		break;
858 
859 	case BT_CONNECT:
860 	case BT_DISCONN:
861 		l2cap_chan_del(chan, reason);
862 		break;
863 
864 	default:
865 		chan->ops->teardown(chan, 0);
866 		break;
867 	}
868 }
869 EXPORT_SYMBOL(l2cap_chan_close);
870 
l2cap_get_auth_type(struct l2cap_chan * chan)871 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
872 {
873 	switch (chan->chan_type) {
874 	case L2CAP_CHAN_RAW:
875 		switch (chan->sec_level) {
876 		case BT_SECURITY_HIGH:
877 		case BT_SECURITY_FIPS:
878 			return HCI_AT_DEDICATED_BONDING_MITM;
879 		case BT_SECURITY_MEDIUM:
880 			return HCI_AT_DEDICATED_BONDING;
881 		default:
882 			return HCI_AT_NO_BONDING;
883 		}
884 		break;
885 	case L2CAP_CHAN_CONN_LESS:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 		}
890 		if (chan->sec_level == BT_SECURITY_HIGH ||
891 		    chan->sec_level == BT_SECURITY_FIPS)
892 			return HCI_AT_NO_BONDING_MITM;
893 		else
894 			return HCI_AT_NO_BONDING;
895 		break;
896 	case L2CAP_CHAN_CONN_ORIENTED:
897 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
898 			if (chan->sec_level == BT_SECURITY_LOW)
899 				chan->sec_level = BT_SECURITY_SDP;
900 
901 			if (chan->sec_level == BT_SECURITY_HIGH ||
902 			    chan->sec_level == BT_SECURITY_FIPS)
903 				return HCI_AT_NO_BONDING_MITM;
904 			else
905 				return HCI_AT_NO_BONDING;
906 		}
907 		fallthrough;
908 
909 	default:
910 		switch (chan->sec_level) {
911 		case BT_SECURITY_HIGH:
912 		case BT_SECURITY_FIPS:
913 			return HCI_AT_GENERAL_BONDING_MITM;
914 		case BT_SECURITY_MEDIUM:
915 			return HCI_AT_GENERAL_BONDING;
916 		default:
917 			return HCI_AT_NO_BONDING;
918 		}
919 		break;
920 	}
921 }
922 
923 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)924 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
925 {
926 	struct l2cap_conn *conn = chan->conn;
927 	__u8 auth_type;
928 
929 	if (conn->hcon->type == LE_LINK)
930 		return smp_conn_security(conn->hcon, chan->sec_level);
931 
932 	auth_type = l2cap_get_auth_type(chan);
933 
934 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
935 				 initiator);
936 }
937 
l2cap_get_ident(struct l2cap_conn * conn)938 static u8 l2cap_get_ident(struct l2cap_conn *conn)
939 {
940 	u8 id;
941 
942 	/* Get next available identificator.
943 	 *    1 - 128 are used by kernel.
944 	 *  129 - 199 are reserved.
945 	 *  200 - 254 are used by utilities like l2ping, etc.
946 	 */
947 
948 	mutex_lock(&conn->ident_lock);
949 
950 	if (++conn->tx_ident > 128)
951 		conn->tx_ident = 1;
952 
953 	id = conn->tx_ident;
954 
955 	mutex_unlock(&conn->ident_lock);
956 
957 	return id;
958 }
959 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)960 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
961 			   void *data)
962 {
963 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
964 	u8 flags;
965 
966 	BT_DBG("code 0x%2.2x", code);
967 
968 	if (!skb)
969 		return;
970 
971 	/* Use NO_FLUSH if supported or we have an LE link (which does
972 	 * not support auto-flushing packets) */
973 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
974 	    conn->hcon->type == LE_LINK)
975 		flags = ACL_START_NO_FLUSH;
976 	else
977 		flags = ACL_START;
978 
979 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
980 	skb->priority = HCI_PRIO_MAX;
981 
982 	hci_send_acl(conn->hchan, skb, flags);
983 }
984 
__chan_is_moving(struct l2cap_chan * chan)985 static bool __chan_is_moving(struct l2cap_chan *chan)
986 {
987 	return chan->move_state != L2CAP_MOVE_STABLE &&
988 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
989 }
990 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)991 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
992 {
993 	struct hci_conn *hcon = chan->conn->hcon;
994 	u16 flags;
995 
996 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
997 	       skb->priority);
998 
999 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
1000 		if (chan->hs_hchan)
1001 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1002 		else
1003 			kfree_skb(skb);
1004 
1005 		return;
1006 	}
1007 
1008 	/* Use NO_FLUSH for LE links (where this is the only option) or
1009 	 * if the BR/EDR link supports it and flushing has not been
1010 	 * explicitly requested (through FLAG_FLUSHABLE).
1011 	 */
1012 	if (hcon->type == LE_LINK ||
1013 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1014 	     lmp_no_flush_capable(hcon->hdev)))
1015 		flags = ACL_START_NO_FLUSH;
1016 	else
1017 		flags = ACL_START;
1018 
1019 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1020 	hci_send_acl(chan->conn->hchan, skb, flags);
1021 }
1022 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1023 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1024 {
1025 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1026 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1027 
1028 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1029 		/* S-Frame */
1030 		control->sframe = 1;
1031 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1032 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1033 
1034 		control->sar = 0;
1035 		control->txseq = 0;
1036 	} else {
1037 		/* I-Frame */
1038 		control->sframe = 0;
1039 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1040 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1041 
1042 		control->poll = 0;
1043 		control->super = 0;
1044 	}
1045 }
1046 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1047 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1048 {
1049 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1050 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1051 
1052 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1053 		/* S-Frame */
1054 		control->sframe = 1;
1055 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1056 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1057 
1058 		control->sar = 0;
1059 		control->txseq = 0;
1060 	} else {
1061 		/* I-Frame */
1062 		control->sframe = 0;
1063 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1064 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1065 
1066 		control->poll = 0;
1067 		control->super = 0;
1068 	}
1069 }
1070 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1071 static inline void __unpack_control(struct l2cap_chan *chan,
1072 				    struct sk_buff *skb)
1073 {
1074 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1075 		__unpack_extended_control(get_unaligned_le32(skb->data),
1076 					  &bt_cb(skb)->l2cap);
1077 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1078 	} else {
1079 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1080 					  &bt_cb(skb)->l2cap);
1081 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1082 	}
1083 }
1084 
__pack_extended_control(struct l2cap_ctrl * control)1085 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1086 {
1087 	u32 packed;
1088 
1089 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1090 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1091 
1092 	if (control->sframe) {
1093 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1094 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1095 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1096 	} else {
1097 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1098 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1099 	}
1100 
1101 	return packed;
1102 }
1103 
__pack_enhanced_control(struct l2cap_ctrl * control)1104 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1105 {
1106 	u16 packed;
1107 
1108 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1109 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1110 
1111 	if (control->sframe) {
1112 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1113 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1114 		packed |= L2CAP_CTRL_FRAME_TYPE;
1115 	} else {
1116 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1117 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1118 	}
1119 
1120 	return packed;
1121 }
1122 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1123 static inline void __pack_control(struct l2cap_chan *chan,
1124 				  struct l2cap_ctrl *control,
1125 				  struct sk_buff *skb)
1126 {
1127 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1128 		put_unaligned_le32(__pack_extended_control(control),
1129 				   skb->data + L2CAP_HDR_SIZE);
1130 	} else {
1131 		put_unaligned_le16(__pack_enhanced_control(control),
1132 				   skb->data + L2CAP_HDR_SIZE);
1133 	}
1134 }
1135 
__ertm_hdr_size(struct l2cap_chan * chan)1136 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1137 {
1138 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1139 		return L2CAP_EXT_HDR_SIZE;
1140 	else
1141 		return L2CAP_ENH_HDR_SIZE;
1142 }
1143 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1144 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1145 					       u32 control)
1146 {
1147 	struct sk_buff *skb;
1148 	struct l2cap_hdr *lh;
1149 	int hlen = __ertm_hdr_size(chan);
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16)
1152 		hlen += L2CAP_FCS_SIZE;
1153 
1154 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1155 
1156 	if (!skb)
1157 		return ERR_PTR(-ENOMEM);
1158 
1159 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1160 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1161 	lh->cid = cpu_to_le16(chan->dcid);
1162 
1163 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1164 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1165 	else
1166 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1167 
1168 	if (chan->fcs == L2CAP_FCS_CRC16) {
1169 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1170 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1171 	}
1172 
1173 	skb->priority = HCI_PRIO_MAX;
1174 	return skb;
1175 }
1176 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1177 static void l2cap_send_sframe(struct l2cap_chan *chan,
1178 			      struct l2cap_ctrl *control)
1179 {
1180 	struct sk_buff *skb;
1181 	u32 control_field;
1182 
1183 	BT_DBG("chan %p, control %p", chan, control);
1184 
1185 	if (!control->sframe)
1186 		return;
1187 
1188 	if (__chan_is_moving(chan))
1189 		return;
1190 
1191 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1192 	    !control->poll)
1193 		control->final = 1;
1194 
1195 	if (control->super == L2CAP_SUPER_RR)
1196 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1197 	else if (control->super == L2CAP_SUPER_RNR)
1198 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1199 
1200 	if (control->super != L2CAP_SUPER_SREJ) {
1201 		chan->last_acked_seq = control->reqseq;
1202 		__clear_ack_timer(chan);
1203 	}
1204 
1205 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1206 	       control->final, control->poll, control->super);
1207 
1208 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1209 		control_field = __pack_extended_control(control);
1210 	else
1211 		control_field = __pack_enhanced_control(control);
1212 
1213 	skb = l2cap_create_sframe_pdu(chan, control_field);
1214 	if (!IS_ERR(skb))
1215 		l2cap_do_send(chan, skb);
1216 }
1217 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1218 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1219 {
1220 	struct l2cap_ctrl control;
1221 
1222 	BT_DBG("chan %p, poll %d", chan, poll);
1223 
1224 	memset(&control, 0, sizeof(control));
1225 	control.sframe = 1;
1226 	control.poll = poll;
1227 
1228 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1229 		control.super = L2CAP_SUPER_RNR;
1230 	else
1231 		control.super = L2CAP_SUPER_RR;
1232 
1233 	control.reqseq = chan->buffer_seq;
1234 	l2cap_send_sframe(chan, &control);
1235 }
1236 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1237 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1238 {
1239 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1240 		return true;
1241 
1242 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1243 }
1244 
__amp_capable(struct l2cap_chan * chan)1245 static bool __amp_capable(struct l2cap_chan *chan)
1246 {
1247 	struct l2cap_conn *conn = chan->conn;
1248 	struct hci_dev *hdev;
1249 	bool amp_available = false;
1250 
1251 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1252 		return false;
1253 
1254 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1255 		return false;
1256 
1257 	read_lock(&hci_dev_list_lock);
1258 	list_for_each_entry(hdev, &hci_dev_list, list) {
1259 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1260 		    test_bit(HCI_UP, &hdev->flags)) {
1261 			amp_available = true;
1262 			break;
1263 		}
1264 	}
1265 	read_unlock(&hci_dev_list_lock);
1266 
1267 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1268 		return amp_available;
1269 
1270 	return false;
1271 }
1272 
l2cap_check_efs(struct l2cap_chan * chan)1273 static bool l2cap_check_efs(struct l2cap_chan *chan)
1274 {
1275 	/* Check EFS parameters */
1276 	return true;
1277 }
1278 
l2cap_send_conn_req(struct l2cap_chan * chan)1279 void l2cap_send_conn_req(struct l2cap_chan *chan)
1280 {
1281 	struct l2cap_conn *conn = chan->conn;
1282 	struct l2cap_conn_req req;
1283 
1284 	req.scid = cpu_to_le16(chan->scid);
1285 	req.psm  = chan->psm;
1286 
1287 	chan->ident = l2cap_get_ident(conn);
1288 
1289 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1290 
1291 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1292 }
1293 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1294 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1295 {
1296 	struct l2cap_create_chan_req req;
1297 	req.scid = cpu_to_le16(chan->scid);
1298 	req.psm  = chan->psm;
1299 	req.amp_id = amp_id;
1300 
1301 	chan->ident = l2cap_get_ident(chan->conn);
1302 
1303 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1304 		       sizeof(req), &req);
1305 }
1306 
l2cap_move_setup(struct l2cap_chan * chan)1307 static void l2cap_move_setup(struct l2cap_chan *chan)
1308 {
1309 	struct sk_buff *skb;
1310 
1311 	BT_DBG("chan %p", chan);
1312 
1313 	if (chan->mode != L2CAP_MODE_ERTM)
1314 		return;
1315 
1316 	__clear_retrans_timer(chan);
1317 	__clear_monitor_timer(chan);
1318 	__clear_ack_timer(chan);
1319 
1320 	chan->retry_count = 0;
1321 	skb_queue_walk(&chan->tx_q, skb) {
1322 		if (bt_cb(skb)->l2cap.retries)
1323 			bt_cb(skb)->l2cap.retries = 1;
1324 		else
1325 			break;
1326 	}
1327 
1328 	chan->expected_tx_seq = chan->buffer_seq;
1329 
1330 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1331 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1332 	l2cap_seq_list_clear(&chan->retrans_list);
1333 	l2cap_seq_list_clear(&chan->srej_list);
1334 	skb_queue_purge(&chan->srej_q);
1335 
1336 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1337 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1338 
1339 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1340 }
1341 
l2cap_move_done(struct l2cap_chan * chan)1342 static void l2cap_move_done(struct l2cap_chan *chan)
1343 {
1344 	u8 move_role = chan->move_role;
1345 	BT_DBG("chan %p", chan);
1346 
1347 	chan->move_state = L2CAP_MOVE_STABLE;
1348 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1349 
1350 	if (chan->mode != L2CAP_MODE_ERTM)
1351 		return;
1352 
1353 	switch (move_role) {
1354 	case L2CAP_MOVE_ROLE_INITIATOR:
1355 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1356 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1357 		break;
1358 	case L2CAP_MOVE_ROLE_RESPONDER:
1359 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1360 		break;
1361 	}
1362 }
1363 
l2cap_chan_ready(struct l2cap_chan * chan)1364 static void l2cap_chan_ready(struct l2cap_chan *chan)
1365 {
1366 	/* The channel may have already been flagged as connected in
1367 	 * case of receiving data before the L2CAP info req/rsp
1368 	 * procedure is complete.
1369 	 */
1370 	if (chan->state == BT_CONNECTED)
1371 		return;
1372 
1373 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1374 	chan->conf_state = 0;
1375 	__clear_chan_timer(chan);
1376 
1377 	switch (chan->mode) {
1378 	case L2CAP_MODE_LE_FLOWCTL:
1379 	case L2CAP_MODE_EXT_FLOWCTL:
1380 		if (!chan->tx_credits)
1381 			chan->ops->suspend(chan);
1382 		break;
1383 	}
1384 
1385 	chan->state = BT_CONNECTED;
1386 
1387 	chan->ops->ready(chan);
1388 }
1389 
l2cap_le_connect(struct l2cap_chan * chan)1390 static void l2cap_le_connect(struct l2cap_chan *chan)
1391 {
1392 	struct l2cap_conn *conn = chan->conn;
1393 	struct l2cap_le_conn_req req;
1394 
1395 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1396 		return;
1397 
1398 	if (!chan->imtu)
1399 		chan->imtu = chan->conn->mtu;
1400 
1401 	l2cap_le_flowctl_init(chan, 0);
1402 
1403 	req.psm     = chan->psm;
1404 	req.scid    = cpu_to_le16(chan->scid);
1405 	req.mtu     = cpu_to_le16(chan->imtu);
1406 	req.mps     = cpu_to_le16(chan->mps);
1407 	req.credits = cpu_to_le16(chan->rx_credits);
1408 
1409 	chan->ident = l2cap_get_ident(conn);
1410 
1411 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1412 		       sizeof(req), &req);
1413 }
1414 
1415 struct l2cap_ecred_conn_data {
1416 	struct {
1417 		struct l2cap_ecred_conn_req req;
1418 		__le16 scid[5];
1419 	} __packed pdu;
1420 	struct l2cap_chan *chan;
1421 	struct pid *pid;
1422 	int count;
1423 };
1424 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1425 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1426 {
1427 	struct l2cap_ecred_conn_data *conn = data;
1428 	struct pid *pid;
1429 
1430 	if (chan == conn->chan)
1431 		return;
1432 
1433 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1434 		return;
1435 
1436 	pid = chan->ops->get_peer_pid(chan);
1437 
1438 	/* Only add deferred channels with the same PID/PSM */
1439 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1440 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1441 		return;
1442 
1443 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1444 		return;
1445 
1446 	l2cap_ecred_init(chan, 0);
1447 
1448 	/* Set the same ident so we can match on the rsp */
1449 	chan->ident = conn->chan->ident;
1450 
1451 	/* Include all channels deferred */
1452 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1453 
1454 	conn->count++;
1455 }
1456 
l2cap_ecred_connect(struct l2cap_chan * chan)1457 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1458 {
1459 	struct l2cap_conn *conn = chan->conn;
1460 	struct l2cap_ecred_conn_data data;
1461 
1462 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1463 		return;
1464 
1465 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1466 		return;
1467 
1468 	l2cap_ecred_init(chan, 0);
1469 
1470 	memset(&data, 0, sizeof(data));
1471 	data.pdu.req.psm     = chan->psm;
1472 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1473 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1474 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1475 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1476 
1477 	chan->ident = l2cap_get_ident(conn);
1478 	data.pid = chan->ops->get_peer_pid(chan);
1479 
1480 	data.count = 1;
1481 	data.chan = chan;
1482 	data.pid = chan->ops->get_peer_pid(chan);
1483 
1484 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1485 
1486 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1487 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1488 		       &data.pdu);
1489 }
1490 
l2cap_le_start(struct l2cap_chan * chan)1491 static void l2cap_le_start(struct l2cap_chan *chan)
1492 {
1493 	struct l2cap_conn *conn = chan->conn;
1494 
1495 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1496 		return;
1497 
1498 	if (!chan->psm) {
1499 		l2cap_chan_ready(chan);
1500 		return;
1501 	}
1502 
1503 	if (chan->state == BT_CONNECT) {
1504 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1505 			l2cap_ecred_connect(chan);
1506 		else
1507 			l2cap_le_connect(chan);
1508 	}
1509 }
1510 
l2cap_start_connection(struct l2cap_chan * chan)1511 static void l2cap_start_connection(struct l2cap_chan *chan)
1512 {
1513 	if (__amp_capable(chan)) {
1514 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1515 		a2mp_discover_amp(chan);
1516 	} else if (chan->conn->hcon->type == LE_LINK) {
1517 		l2cap_le_start(chan);
1518 	} else {
1519 		l2cap_send_conn_req(chan);
1520 	}
1521 }
1522 
l2cap_request_info(struct l2cap_conn * conn)1523 static void l2cap_request_info(struct l2cap_conn *conn)
1524 {
1525 	struct l2cap_info_req req;
1526 
1527 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1528 		return;
1529 
1530 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1531 
1532 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1533 	conn->info_ident = l2cap_get_ident(conn);
1534 
1535 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1536 
1537 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1538 		       sizeof(req), &req);
1539 }
1540 
l2cap_check_enc_key_size(struct hci_conn * hcon)1541 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1542 {
1543 	/* The minimum encryption key size needs to be enforced by the
1544 	 * host stack before establishing any L2CAP connections. The
1545 	 * specification in theory allows a minimum of 1, but to align
1546 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1547 	 *
1548 	 * This check might also be called for unencrypted connections
1549 	 * that have no key size requirements. Ensure that the link is
1550 	 * actually encrypted before enforcing a key size.
1551 	 */
1552 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1553 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1554 }
1555 
l2cap_do_start(struct l2cap_chan * chan)1556 static void l2cap_do_start(struct l2cap_chan *chan)
1557 {
1558 	struct l2cap_conn *conn = chan->conn;
1559 
1560 	if (conn->hcon->type == LE_LINK) {
1561 		l2cap_le_start(chan);
1562 		return;
1563 	}
1564 
1565 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1566 		l2cap_request_info(conn);
1567 		return;
1568 	}
1569 
1570 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1571 		return;
1572 
1573 	if (!l2cap_chan_check_security(chan, true) ||
1574 	    !__l2cap_no_conn_pending(chan))
1575 		return;
1576 
1577 	if (l2cap_check_enc_key_size(conn->hcon))
1578 		l2cap_start_connection(chan);
1579 	else
1580 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1581 }
1582 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1583 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1584 {
1585 	u32 local_feat_mask = l2cap_feat_mask;
1586 	if (!disable_ertm)
1587 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1588 
1589 	switch (mode) {
1590 	case L2CAP_MODE_ERTM:
1591 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1592 	case L2CAP_MODE_STREAMING:
1593 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1594 	default:
1595 		return 0x00;
1596 	}
1597 }
1598 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1599 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1600 {
1601 	struct l2cap_conn *conn = chan->conn;
1602 	struct l2cap_disconn_req req;
1603 
1604 	if (!conn)
1605 		return;
1606 
1607 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1608 		__clear_retrans_timer(chan);
1609 		__clear_monitor_timer(chan);
1610 		__clear_ack_timer(chan);
1611 	}
1612 
1613 	if (chan->scid == L2CAP_CID_A2MP) {
1614 		l2cap_state_change(chan, BT_DISCONN);
1615 		return;
1616 	}
1617 
1618 	req.dcid = cpu_to_le16(chan->dcid);
1619 	req.scid = cpu_to_le16(chan->scid);
1620 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1621 		       sizeof(req), &req);
1622 
1623 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1624 }
1625 
1626 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1627 static void l2cap_conn_start(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan, *tmp;
1630 
1631 	BT_DBG("conn %p", conn);
1632 
1633 	mutex_lock(&conn->chan_lock);
1634 
1635 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1636 		l2cap_chan_lock(chan);
1637 
1638 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1639 			l2cap_chan_ready(chan);
1640 			l2cap_chan_unlock(chan);
1641 			continue;
1642 		}
1643 
1644 		if (chan->state == BT_CONNECT) {
1645 			if (!l2cap_chan_check_security(chan, true) ||
1646 			    !__l2cap_no_conn_pending(chan)) {
1647 				l2cap_chan_unlock(chan);
1648 				continue;
1649 			}
1650 
1651 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1652 			    && test_bit(CONF_STATE2_DEVICE,
1653 					&chan->conf_state)) {
1654 				l2cap_chan_close(chan, ECONNRESET);
1655 				l2cap_chan_unlock(chan);
1656 				continue;
1657 			}
1658 
1659 			if (l2cap_check_enc_key_size(conn->hcon))
1660 				l2cap_start_connection(chan);
1661 			else
1662 				l2cap_chan_close(chan, ECONNREFUSED);
1663 
1664 		} else if (chan->state == BT_CONNECT2) {
1665 			struct l2cap_conn_rsp rsp;
1666 			char buf[128];
1667 			rsp.scid = cpu_to_le16(chan->dcid);
1668 			rsp.dcid = cpu_to_le16(chan->scid);
1669 
1670 			if (l2cap_chan_check_security(chan, false)) {
1671 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1672 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1673 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1674 					chan->ops->defer(chan);
1675 
1676 				} else {
1677 					l2cap_state_change(chan, BT_CONFIG);
1678 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1679 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1680 				}
1681 			} else {
1682 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1683 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1684 			}
1685 
1686 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1687 				       sizeof(rsp), &rsp);
1688 
1689 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1690 			    rsp.result != L2CAP_CR_SUCCESS) {
1691 				l2cap_chan_unlock(chan);
1692 				continue;
1693 			}
1694 
1695 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1696 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1697 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1698 			chan->num_conf_req++;
1699 		}
1700 
1701 		l2cap_chan_unlock(chan);
1702 	}
1703 
1704 	mutex_unlock(&conn->chan_lock);
1705 }
1706 
l2cap_le_conn_ready(struct l2cap_conn * conn)1707 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1708 {
1709 	struct hci_conn *hcon = conn->hcon;
1710 	struct hci_dev *hdev = hcon->hdev;
1711 
1712 	BT_DBG("%s conn %p", hdev->name, conn);
1713 
1714 	/* For outgoing pairing which doesn't necessarily have an
1715 	 * associated socket (e.g. mgmt_pair_device).
1716 	 */
1717 	if (hcon->out)
1718 		smp_conn_security(hcon, hcon->pending_sec_level);
1719 
1720 	/* For LE peripheral connections, make sure the connection interval
1721 	 * is in the range of the minimum and maximum interval that has
1722 	 * been configured for this connection. If not, then trigger
1723 	 * the connection update procedure.
1724 	 */
1725 	if (hcon->role == HCI_ROLE_SLAVE &&
1726 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1727 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1728 		struct l2cap_conn_param_update_req req;
1729 
1730 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1731 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1732 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1733 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1734 
1735 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1736 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1737 	}
1738 }
1739 
l2cap_conn_ready(struct l2cap_conn * conn)1740 static void l2cap_conn_ready(struct l2cap_conn *conn)
1741 {
1742 	struct l2cap_chan *chan;
1743 	struct hci_conn *hcon = conn->hcon;
1744 
1745 	BT_DBG("conn %p", conn);
1746 
1747 	if (hcon->type == ACL_LINK)
1748 		l2cap_request_info(conn);
1749 
1750 	mutex_lock(&conn->chan_lock);
1751 
1752 	list_for_each_entry(chan, &conn->chan_l, list) {
1753 
1754 		l2cap_chan_lock(chan);
1755 
1756 		if (chan->scid == L2CAP_CID_A2MP) {
1757 			l2cap_chan_unlock(chan);
1758 			continue;
1759 		}
1760 
1761 		if (hcon->type == LE_LINK) {
1762 			l2cap_le_start(chan);
1763 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1764 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1765 				l2cap_chan_ready(chan);
1766 		} else if (chan->state == BT_CONNECT) {
1767 			l2cap_do_start(chan);
1768 		}
1769 
1770 		l2cap_chan_unlock(chan);
1771 	}
1772 
1773 	mutex_unlock(&conn->chan_lock);
1774 
1775 	if (hcon->type == LE_LINK)
1776 		l2cap_le_conn_ready(conn);
1777 
1778 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1779 }
1780 
1781 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1782 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1783 {
1784 	struct l2cap_chan *chan;
1785 
1786 	BT_DBG("conn %p", conn);
1787 
1788 	mutex_lock(&conn->chan_lock);
1789 
1790 	list_for_each_entry(chan, &conn->chan_l, list) {
1791 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1792 			l2cap_chan_set_err(chan, err);
1793 	}
1794 
1795 	mutex_unlock(&conn->chan_lock);
1796 }
1797 
l2cap_info_timeout(struct work_struct * work)1798 static void l2cap_info_timeout(struct work_struct *work)
1799 {
1800 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1801 					       info_timer.work);
1802 
1803 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1804 	conn->info_ident = 0;
1805 
1806 	l2cap_conn_start(conn);
1807 }
1808 
1809 /*
1810  * l2cap_user
1811  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1812  * callback is called during registration. The ->remove callback is called
1813  * during unregistration.
1814  * An l2cap_user object can either be explicitly unregistered or when the
1815  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1816  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1817  * External modules must own a reference to the l2cap_conn object if they intend
1818  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1819  * any time if they don't.
1820  */
1821 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1822 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1823 {
1824 	struct hci_dev *hdev = conn->hcon->hdev;
1825 	int ret;
1826 
1827 	/* We need to check whether l2cap_conn is registered. If it is not, we
1828 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1829 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1830 	 * relies on the parent hci_conn object to be locked. This itself relies
1831 	 * on the hci_dev object to be locked. So we must lock the hci device
1832 	 * here, too. */
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (!list_empty(&user->list)) {
1837 		ret = -EINVAL;
1838 		goto out_unlock;
1839 	}
1840 
1841 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1842 	if (!conn->hchan) {
1843 		ret = -ENODEV;
1844 		goto out_unlock;
1845 	}
1846 
1847 	ret = user->probe(conn, user);
1848 	if (ret)
1849 		goto out_unlock;
1850 
1851 	list_add(&user->list, &conn->users);
1852 	ret = 0;
1853 
1854 out_unlock:
1855 	hci_dev_unlock(hdev);
1856 	return ret;
1857 }
1858 EXPORT_SYMBOL(l2cap_register_user);
1859 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1860 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1861 {
1862 	struct hci_dev *hdev = conn->hcon->hdev;
1863 
1864 	hci_dev_lock(hdev);
1865 
1866 	if (list_empty(&user->list))
1867 		goto out_unlock;
1868 
1869 	list_del_init(&user->list);
1870 	user->remove(conn, user);
1871 
1872 out_unlock:
1873 	hci_dev_unlock(hdev);
1874 }
1875 EXPORT_SYMBOL(l2cap_unregister_user);
1876 
l2cap_unregister_all_users(struct l2cap_conn * conn)1877 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1878 {
1879 	struct l2cap_user *user;
1880 
1881 	while (!list_empty(&conn->users)) {
1882 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1883 		list_del_init(&user->list);
1884 		user->remove(conn, user);
1885 	}
1886 }
1887 
l2cap_conn_del(struct hci_conn * hcon,int err)1888 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1889 {
1890 	struct l2cap_conn *conn = hcon->l2cap_data;
1891 	struct l2cap_chan *chan, *l;
1892 
1893 	if (!conn)
1894 		return;
1895 
1896 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1897 
1898 	kfree_skb(conn->rx_skb);
1899 
1900 	skb_queue_purge(&conn->pending_rx);
1901 
1902 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1903 	 * might block if we are running on a worker from the same workqueue
1904 	 * pending_rx_work is waiting on.
1905 	 */
1906 	if (work_pending(&conn->pending_rx_work))
1907 		cancel_work_sync(&conn->pending_rx_work);
1908 
1909 	if (work_pending(&conn->id_addr_update_work))
1910 		cancel_work_sync(&conn->id_addr_update_work);
1911 
1912 	l2cap_unregister_all_users(conn);
1913 
1914 	/* Force the connection to be immediately dropped */
1915 	hcon->disc_timeout = 0;
1916 
1917 	mutex_lock(&conn->chan_lock);
1918 
1919 	/* Kill channels */
1920 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1921 		l2cap_chan_hold(chan);
1922 		l2cap_chan_lock(chan);
1923 
1924 		l2cap_chan_del(chan, err);
1925 
1926 		chan->ops->close(chan);
1927 
1928 		l2cap_chan_unlock(chan);
1929 		l2cap_chan_put(chan);
1930 	}
1931 
1932 	mutex_unlock(&conn->chan_lock);
1933 
1934 	hci_chan_del(conn->hchan);
1935 
1936 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1937 		cancel_delayed_work_sync(&conn->info_timer);
1938 
1939 	hcon->l2cap_data = NULL;
1940 	conn->hchan = NULL;
1941 	l2cap_conn_put(conn);
1942 }
1943 
l2cap_conn_free(struct kref * ref)1944 static void l2cap_conn_free(struct kref *ref)
1945 {
1946 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1947 
1948 	hci_conn_put(conn->hcon);
1949 	kfree(conn);
1950 }
1951 
l2cap_conn_get(struct l2cap_conn * conn)1952 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1953 {
1954 	kref_get(&conn->ref);
1955 	return conn;
1956 }
1957 EXPORT_SYMBOL(l2cap_conn_get);
1958 
l2cap_conn_put(struct l2cap_conn * conn)1959 void l2cap_conn_put(struct l2cap_conn *conn)
1960 {
1961 	kref_put(&conn->ref, l2cap_conn_free);
1962 }
1963 EXPORT_SYMBOL(l2cap_conn_put);
1964 
1965 /* ---- Socket interface ---- */
1966 
1967 /* Find socket with psm and source / destination bdaddr.
1968  * Returns closest match.
1969  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1970 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1971 						   bdaddr_t *src,
1972 						   bdaddr_t *dst,
1973 						   u8 link_type)
1974 {
1975 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1976 
1977 	read_lock(&chan_list_lock);
1978 
1979 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1980 		if (state && c->state != state)
1981 			continue;
1982 
1983 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1987 			continue;
1988 
1989 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1990 			int src_match, dst_match;
1991 			int src_any, dst_any;
1992 
1993 			/* Exact match. */
1994 			src_match = !bacmp(&c->src, src);
1995 			dst_match = !bacmp(&c->dst, dst);
1996 			if (src_match && dst_match) {
1997 				if (!l2cap_chan_hold_unless_zero(c))
1998 					continue;
1999 
2000 				read_unlock(&chan_list_lock);
2001 				return c;
2002 			}
2003 
2004 			/* Closest match */
2005 			src_any = !bacmp(&c->src, BDADDR_ANY);
2006 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2007 			if ((src_match && dst_any) || (src_any && dst_match) ||
2008 			    (src_any && dst_any))
2009 				c1 = c;
2010 		}
2011 	}
2012 
2013 	if (c1)
2014 		c1 = l2cap_chan_hold_unless_zero(c1);
2015 
2016 	read_unlock(&chan_list_lock);
2017 
2018 	return c1;
2019 }
2020 
l2cap_monitor_timeout(struct work_struct * work)2021 static void l2cap_monitor_timeout(struct work_struct *work)
2022 {
2023 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2024 					       monitor_timer.work);
2025 
2026 	BT_DBG("chan %p", chan);
2027 
2028 	l2cap_chan_lock(chan);
2029 
2030 	if (!chan->conn) {
2031 		l2cap_chan_unlock(chan);
2032 		l2cap_chan_put(chan);
2033 		return;
2034 	}
2035 
2036 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2037 
2038 	l2cap_chan_unlock(chan);
2039 	l2cap_chan_put(chan);
2040 }
2041 
l2cap_retrans_timeout(struct work_struct * work)2042 static void l2cap_retrans_timeout(struct work_struct *work)
2043 {
2044 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2045 					       retrans_timer.work);
2046 
2047 	BT_DBG("chan %p", chan);
2048 
2049 	l2cap_chan_lock(chan);
2050 
2051 	if (!chan->conn) {
2052 		l2cap_chan_unlock(chan);
2053 		l2cap_chan_put(chan);
2054 		return;
2055 	}
2056 
2057 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2058 	l2cap_chan_unlock(chan);
2059 	l2cap_chan_put(chan);
2060 }
2061 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2062 static void l2cap_streaming_send(struct l2cap_chan *chan,
2063 				 struct sk_buff_head *skbs)
2064 {
2065 	struct sk_buff *skb;
2066 	struct l2cap_ctrl *control;
2067 
2068 	BT_DBG("chan %p, skbs %p", chan, skbs);
2069 
2070 	if (__chan_is_moving(chan))
2071 		return;
2072 
2073 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2074 
2075 	while (!skb_queue_empty(&chan->tx_q)) {
2076 
2077 		skb = skb_dequeue(&chan->tx_q);
2078 
2079 		bt_cb(skb)->l2cap.retries = 1;
2080 		control = &bt_cb(skb)->l2cap;
2081 
2082 		control->reqseq = 0;
2083 		control->txseq = chan->next_tx_seq;
2084 
2085 		__pack_control(chan, control, skb);
2086 
2087 		if (chan->fcs == L2CAP_FCS_CRC16) {
2088 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2089 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2090 		}
2091 
2092 		l2cap_do_send(chan, skb);
2093 
2094 		BT_DBG("Sent txseq %u", control->txseq);
2095 
2096 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2097 		chan->frames_sent++;
2098 	}
2099 }
2100 
l2cap_ertm_send(struct l2cap_chan * chan)2101 static int l2cap_ertm_send(struct l2cap_chan *chan)
2102 {
2103 	struct sk_buff *skb, *tx_skb;
2104 	struct l2cap_ctrl *control;
2105 	int sent = 0;
2106 
2107 	BT_DBG("chan %p", chan);
2108 
2109 	if (chan->state != BT_CONNECTED)
2110 		return -ENOTCONN;
2111 
2112 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2113 		return 0;
2114 
2115 	if (__chan_is_moving(chan))
2116 		return 0;
2117 
2118 	while (chan->tx_send_head &&
2119 	       chan->unacked_frames < chan->remote_tx_win &&
2120 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2121 
2122 		skb = chan->tx_send_head;
2123 
2124 		bt_cb(skb)->l2cap.retries = 1;
2125 		control = &bt_cb(skb)->l2cap;
2126 
2127 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2128 			control->final = 1;
2129 
2130 		control->reqseq = chan->buffer_seq;
2131 		chan->last_acked_seq = chan->buffer_seq;
2132 		control->txseq = chan->next_tx_seq;
2133 
2134 		__pack_control(chan, control, skb);
2135 
2136 		if (chan->fcs == L2CAP_FCS_CRC16) {
2137 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2138 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2139 		}
2140 
2141 		/* Clone after data has been modified. Data is assumed to be
2142 		   read-only (for locking purposes) on cloned sk_buffs.
2143 		 */
2144 		tx_skb = skb_clone(skb, GFP_KERNEL);
2145 
2146 		if (!tx_skb)
2147 			break;
2148 
2149 		__set_retrans_timer(chan);
2150 
2151 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2152 		chan->unacked_frames++;
2153 		chan->frames_sent++;
2154 		sent++;
2155 
2156 		if (skb_queue_is_last(&chan->tx_q, skb))
2157 			chan->tx_send_head = NULL;
2158 		else
2159 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2160 
2161 		l2cap_do_send(chan, tx_skb);
2162 		BT_DBG("Sent txseq %u", control->txseq);
2163 	}
2164 
2165 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2166 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2167 
2168 	return sent;
2169 }
2170 
l2cap_ertm_resend(struct l2cap_chan * chan)2171 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2172 {
2173 	struct l2cap_ctrl control;
2174 	struct sk_buff *skb;
2175 	struct sk_buff *tx_skb;
2176 	u16 seq;
2177 
2178 	BT_DBG("chan %p", chan);
2179 
2180 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2181 		return;
2182 
2183 	if (__chan_is_moving(chan))
2184 		return;
2185 
2186 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2187 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2188 
2189 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2190 		if (!skb) {
2191 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2192 			       seq);
2193 			continue;
2194 		}
2195 
2196 		bt_cb(skb)->l2cap.retries++;
2197 		control = bt_cb(skb)->l2cap;
2198 
2199 		if (chan->max_tx != 0 &&
2200 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2201 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2202 			l2cap_send_disconn_req(chan, ECONNRESET);
2203 			l2cap_seq_list_clear(&chan->retrans_list);
2204 			break;
2205 		}
2206 
2207 		control.reqseq = chan->buffer_seq;
2208 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2209 			control.final = 1;
2210 		else
2211 			control.final = 0;
2212 
2213 		if (skb_cloned(skb)) {
2214 			/* Cloned sk_buffs are read-only, so we need a
2215 			 * writeable copy
2216 			 */
2217 			tx_skb = skb_copy(skb, GFP_KERNEL);
2218 		} else {
2219 			tx_skb = skb_clone(skb, GFP_KERNEL);
2220 		}
2221 
2222 		if (!tx_skb) {
2223 			l2cap_seq_list_clear(&chan->retrans_list);
2224 			break;
2225 		}
2226 
2227 		/* Update skb contents */
2228 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2229 			put_unaligned_le32(__pack_extended_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		} else {
2232 			put_unaligned_le16(__pack_enhanced_control(&control),
2233 					   tx_skb->data + L2CAP_HDR_SIZE);
2234 		}
2235 
2236 		/* Update FCS */
2237 		if (chan->fcs == L2CAP_FCS_CRC16) {
2238 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2239 					tx_skb->len - L2CAP_FCS_SIZE);
2240 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2241 						L2CAP_FCS_SIZE);
2242 		}
2243 
2244 		l2cap_do_send(chan, tx_skb);
2245 
2246 		BT_DBG("Resent txseq %d", control.txseq);
2247 
2248 		chan->last_acked_seq = chan->buffer_seq;
2249 	}
2250 }
2251 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2252 static void l2cap_retransmit(struct l2cap_chan *chan,
2253 			     struct l2cap_ctrl *control)
2254 {
2255 	BT_DBG("chan %p, control %p", chan, control);
2256 
2257 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2258 	l2cap_ertm_resend(chan);
2259 }
2260 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2261 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2262 				 struct l2cap_ctrl *control)
2263 {
2264 	struct sk_buff *skb;
2265 
2266 	BT_DBG("chan %p, control %p", chan, control);
2267 
2268 	if (control->poll)
2269 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2270 
2271 	l2cap_seq_list_clear(&chan->retrans_list);
2272 
2273 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2274 		return;
2275 
2276 	if (chan->unacked_frames) {
2277 		skb_queue_walk(&chan->tx_q, skb) {
2278 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2279 			    skb == chan->tx_send_head)
2280 				break;
2281 		}
2282 
2283 		skb_queue_walk_from(&chan->tx_q, skb) {
2284 			if (skb == chan->tx_send_head)
2285 				break;
2286 
2287 			l2cap_seq_list_append(&chan->retrans_list,
2288 					      bt_cb(skb)->l2cap.txseq);
2289 		}
2290 
2291 		l2cap_ertm_resend(chan);
2292 	}
2293 }
2294 
l2cap_send_ack(struct l2cap_chan * chan)2295 static void l2cap_send_ack(struct l2cap_chan *chan)
2296 {
2297 	struct l2cap_ctrl control;
2298 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2299 					 chan->last_acked_seq);
2300 	int threshold;
2301 
2302 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2303 	       chan, chan->last_acked_seq, chan->buffer_seq);
2304 
2305 	memset(&control, 0, sizeof(control));
2306 	control.sframe = 1;
2307 
2308 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2309 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2310 		__clear_ack_timer(chan);
2311 		control.super = L2CAP_SUPER_RNR;
2312 		control.reqseq = chan->buffer_seq;
2313 		l2cap_send_sframe(chan, &control);
2314 	} else {
2315 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2316 			l2cap_ertm_send(chan);
2317 			/* If any i-frames were sent, they included an ack */
2318 			if (chan->buffer_seq == chan->last_acked_seq)
2319 				frames_to_ack = 0;
2320 		}
2321 
2322 		/* Ack now if the window is 3/4ths full.
2323 		 * Calculate without mul or div
2324 		 */
2325 		threshold = chan->ack_win;
2326 		threshold += threshold << 1;
2327 		threshold >>= 2;
2328 
2329 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2330 		       threshold);
2331 
2332 		if (frames_to_ack >= threshold) {
2333 			__clear_ack_timer(chan);
2334 			control.super = L2CAP_SUPER_RR;
2335 			control.reqseq = chan->buffer_seq;
2336 			l2cap_send_sframe(chan, &control);
2337 			frames_to_ack = 0;
2338 		}
2339 
2340 		if (frames_to_ack)
2341 			__set_ack_timer(chan);
2342 	}
2343 }
2344 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2345 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2346 					 struct msghdr *msg, int len,
2347 					 int count, struct sk_buff *skb)
2348 {
2349 	struct l2cap_conn *conn = chan->conn;
2350 	struct sk_buff **frag;
2351 	int sent = 0;
2352 
2353 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2354 		return -EFAULT;
2355 
2356 	sent += count;
2357 	len  -= count;
2358 
2359 	/* Continuation fragments (no L2CAP header) */
2360 	frag = &skb_shinfo(skb)->frag_list;
2361 	while (len) {
2362 		struct sk_buff *tmp;
2363 
2364 		count = min_t(unsigned int, conn->mtu, len);
2365 
2366 		tmp = chan->ops->alloc_skb(chan, 0, count,
2367 					   msg->msg_flags & MSG_DONTWAIT);
2368 		if (IS_ERR(tmp))
2369 			return PTR_ERR(tmp);
2370 
2371 		*frag = tmp;
2372 
2373 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2374 				   &msg->msg_iter))
2375 			return -EFAULT;
2376 
2377 		sent += count;
2378 		len  -= count;
2379 
2380 		skb->len += (*frag)->len;
2381 		skb->data_len += (*frag)->len;
2382 
2383 		frag = &(*frag)->next;
2384 	}
2385 
2386 	return sent;
2387 }
2388 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2389 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2390 						 struct msghdr *msg, size_t len)
2391 {
2392 	struct l2cap_conn *conn = chan->conn;
2393 	struct sk_buff *skb;
2394 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2395 	struct l2cap_hdr *lh;
2396 
2397 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2398 	       __le16_to_cpu(chan->psm), len);
2399 
2400 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2401 
2402 	skb = chan->ops->alloc_skb(chan, hlen, count,
2403 				   msg->msg_flags & MSG_DONTWAIT);
2404 	if (IS_ERR(skb))
2405 		return skb;
2406 
2407 	/* Create L2CAP header */
2408 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 	lh->cid = cpu_to_le16(chan->dcid);
2410 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2411 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2421 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2422 					      struct msghdr *msg, size_t len)
2423 {
2424 	struct l2cap_conn *conn = chan->conn;
2425 	struct sk_buff *skb;
2426 	int err, count;
2427 	struct l2cap_hdr *lh;
2428 
2429 	BT_DBG("chan %p len %zu", chan, len);
2430 
2431 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2432 
2433 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2434 				   msg->msg_flags & MSG_DONTWAIT);
2435 	if (IS_ERR(skb))
2436 		return skb;
2437 
2438 	/* Create L2CAP header */
2439 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2440 	lh->cid = cpu_to_le16(chan->dcid);
2441 	lh->len = cpu_to_le16(len);
2442 
2443 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 	if (unlikely(err < 0)) {
2445 		kfree_skb(skb);
2446 		return ERR_PTR(err);
2447 	}
2448 	return skb;
2449 }
2450 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2451 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2452 					       struct msghdr *msg, size_t len,
2453 					       u16 sdulen)
2454 {
2455 	struct l2cap_conn *conn = chan->conn;
2456 	struct sk_buff *skb;
2457 	int err, count, hlen;
2458 	struct l2cap_hdr *lh;
2459 
2460 	BT_DBG("chan %p len %zu", chan, len);
2461 
2462 	if (!conn)
2463 		return ERR_PTR(-ENOTCONN);
2464 
2465 	hlen = __ertm_hdr_size(chan);
2466 
2467 	if (sdulen)
2468 		hlen += L2CAP_SDULEN_SIZE;
2469 
2470 	if (chan->fcs == L2CAP_FCS_CRC16)
2471 		hlen += L2CAP_FCS_SIZE;
2472 
2473 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2474 
2475 	skb = chan->ops->alloc_skb(chan, hlen, count,
2476 				   msg->msg_flags & MSG_DONTWAIT);
2477 	if (IS_ERR(skb))
2478 		return skb;
2479 
2480 	/* Create L2CAP header */
2481 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2482 	lh->cid = cpu_to_le16(chan->dcid);
2483 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2484 
2485 	/* Control header is populated later */
2486 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2487 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2488 	else
2489 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2490 
2491 	if (sdulen)
2492 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2493 
2494 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2495 	if (unlikely(err < 0)) {
2496 		kfree_skb(skb);
2497 		return ERR_PTR(err);
2498 	}
2499 
2500 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2501 	bt_cb(skb)->l2cap.retries = 0;
2502 	return skb;
2503 }
2504 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2505 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2506 			     struct sk_buff_head *seg_queue,
2507 			     struct msghdr *msg, size_t len)
2508 {
2509 	struct sk_buff *skb;
2510 	u16 sdu_len;
2511 	size_t pdu_len;
2512 	u8 sar;
2513 
2514 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2515 
2516 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2517 	 * so fragmented skbs are not used.  The HCI layer's handling
2518 	 * of fragmented skbs is not compatible with ERTM's queueing.
2519 	 */
2520 
2521 	/* PDU size is derived from the HCI MTU */
2522 	pdu_len = chan->conn->mtu;
2523 
2524 	/* Constrain PDU size for BR/EDR connections */
2525 	if (!chan->hs_hcon)
2526 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2527 
2528 	/* Adjust for largest possible L2CAP overhead. */
2529 	if (chan->fcs)
2530 		pdu_len -= L2CAP_FCS_SIZE;
2531 
2532 	pdu_len -= __ertm_hdr_size(chan);
2533 
2534 	/* Remote device may have requested smaller PDUs */
2535 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2536 
2537 	if (len <= pdu_len) {
2538 		sar = L2CAP_SAR_UNSEGMENTED;
2539 		sdu_len = 0;
2540 		pdu_len = len;
2541 	} else {
2542 		sar = L2CAP_SAR_START;
2543 		sdu_len = len;
2544 	}
2545 
2546 	while (len > 0) {
2547 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2548 
2549 		if (IS_ERR(skb)) {
2550 			__skb_queue_purge(seg_queue);
2551 			return PTR_ERR(skb);
2552 		}
2553 
2554 		bt_cb(skb)->l2cap.sar = sar;
2555 		__skb_queue_tail(seg_queue, skb);
2556 
2557 		len -= pdu_len;
2558 		if (sdu_len)
2559 			sdu_len = 0;
2560 
2561 		if (len <= pdu_len) {
2562 			sar = L2CAP_SAR_END;
2563 			pdu_len = len;
2564 		} else {
2565 			sar = L2CAP_SAR_CONTINUE;
2566 		}
2567 	}
2568 
2569 	return 0;
2570 }
2571 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2573 						   struct msghdr *msg,
2574 						   size_t len, u16 sdulen)
2575 {
2576 	struct l2cap_conn *conn = chan->conn;
2577 	struct sk_buff *skb;
2578 	int err, count, hlen;
2579 	struct l2cap_hdr *lh;
2580 
2581 	BT_DBG("chan %p len %zu", chan, len);
2582 
2583 	if (!conn)
2584 		return ERR_PTR(-ENOTCONN);
2585 
2586 	hlen = L2CAP_HDR_SIZE;
2587 
2588 	if (sdulen)
2589 		hlen += L2CAP_SDULEN_SIZE;
2590 
2591 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2592 
2593 	skb = chan->ops->alloc_skb(chan, hlen, count,
2594 				   msg->msg_flags & MSG_DONTWAIT);
2595 	if (IS_ERR(skb))
2596 		return skb;
2597 
2598 	/* Create L2CAP header */
2599 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2600 	lh->cid = cpu_to_le16(chan->dcid);
2601 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602 
2603 	if (sdulen)
2604 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2605 
2606 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 	if (unlikely(err < 0)) {
2608 		kfree_skb(skb);
2609 		return ERR_PTR(err);
2610 	}
2611 
2612 	return skb;
2613 }
2614 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 				struct sk_buff_head *seg_queue,
2617 				struct msghdr *msg, size_t len)
2618 {
2619 	struct sk_buff *skb;
2620 	size_t pdu_len;
2621 	u16 sdu_len;
2622 
2623 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624 
2625 	sdu_len = len;
2626 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2627 
2628 	while (len > 0) {
2629 		if (len <= pdu_len)
2630 			pdu_len = len;
2631 
2632 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2633 		if (IS_ERR(skb)) {
2634 			__skb_queue_purge(seg_queue);
2635 			return PTR_ERR(skb);
2636 		}
2637 
2638 		__skb_queue_tail(seg_queue, skb);
2639 
2640 		len -= pdu_len;
2641 
2642 		if (sdu_len) {
2643 			sdu_len = 0;
2644 			pdu_len += L2CAP_SDULEN_SIZE;
2645 		}
2646 	}
2647 
2648 	return 0;
2649 }
2650 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2651 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2652 {
2653 	int sent = 0;
2654 
2655 	BT_DBG("chan %p", chan);
2656 
2657 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2658 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2659 		chan->tx_credits--;
2660 		sent++;
2661 	}
2662 
2663 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2664 	       skb_queue_len(&chan->tx_q));
2665 }
2666 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2667 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2668 {
2669 	struct sk_buff *skb;
2670 	int err;
2671 	struct sk_buff_head seg_queue;
2672 
2673 	if (!chan->conn)
2674 		return -ENOTCONN;
2675 
2676 	/* Connectionless channel */
2677 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2678 		skb = l2cap_create_connless_pdu(chan, msg, len);
2679 		if (IS_ERR(skb))
2680 			return PTR_ERR(skb);
2681 
2682 		/* Channel lock is released before requesting new skb and then
2683 		 * reacquired thus we need to recheck channel state.
2684 		 */
2685 		if (chan->state != BT_CONNECTED) {
2686 			kfree_skb(skb);
2687 			return -ENOTCONN;
2688 		}
2689 
2690 		l2cap_do_send(chan, skb);
2691 		return len;
2692 	}
2693 
2694 	switch (chan->mode) {
2695 	case L2CAP_MODE_LE_FLOWCTL:
2696 	case L2CAP_MODE_EXT_FLOWCTL:
2697 		/* Check outgoing MTU */
2698 		if (len > chan->omtu)
2699 			return -EMSGSIZE;
2700 
2701 		__skb_queue_head_init(&seg_queue);
2702 
2703 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2704 
2705 		if (chan->state != BT_CONNECTED) {
2706 			__skb_queue_purge(&seg_queue);
2707 			err = -ENOTCONN;
2708 		}
2709 
2710 		if (err)
2711 			return err;
2712 
2713 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2714 
2715 		l2cap_le_flowctl_send(chan);
2716 
2717 		if (!chan->tx_credits)
2718 			chan->ops->suspend(chan);
2719 
2720 		err = len;
2721 
2722 		break;
2723 
2724 	case L2CAP_MODE_BASIC:
2725 		/* Check outgoing MTU */
2726 		if (len > chan->omtu)
2727 			return -EMSGSIZE;
2728 
2729 		/* Create a basic PDU */
2730 		skb = l2cap_create_basic_pdu(chan, msg, len);
2731 		if (IS_ERR(skb))
2732 			return PTR_ERR(skb);
2733 
2734 		/* Channel lock is released before requesting new skb and then
2735 		 * reacquired thus we need to recheck channel state.
2736 		 */
2737 		if (chan->state != BT_CONNECTED) {
2738 			kfree_skb(skb);
2739 			return -ENOTCONN;
2740 		}
2741 
2742 		l2cap_do_send(chan, skb);
2743 		err = len;
2744 		break;
2745 
2746 	case L2CAP_MODE_ERTM:
2747 	case L2CAP_MODE_STREAMING:
2748 		/* Check outgoing MTU */
2749 		if (len > chan->omtu) {
2750 			err = -EMSGSIZE;
2751 			break;
2752 		}
2753 
2754 		__skb_queue_head_init(&seg_queue);
2755 
2756 		/* Do segmentation before calling in to the state machine,
2757 		 * since it's possible to block while waiting for memory
2758 		 * allocation.
2759 		 */
2760 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2761 
2762 		/* The channel could have been closed while segmenting,
2763 		 * check that it is still connected.
2764 		 */
2765 		if (chan->state != BT_CONNECTED) {
2766 			__skb_queue_purge(&seg_queue);
2767 			err = -ENOTCONN;
2768 		}
2769 
2770 		if (err)
2771 			break;
2772 
2773 		if (chan->mode == L2CAP_MODE_ERTM)
2774 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2775 		else
2776 			l2cap_streaming_send(chan, &seg_queue);
2777 
2778 		err = len;
2779 
2780 		/* If the skbs were not queued for sending, they'll still be in
2781 		 * seg_queue and need to be purged.
2782 		 */
2783 		__skb_queue_purge(&seg_queue);
2784 		break;
2785 
2786 	default:
2787 		BT_DBG("bad state %1.1x", chan->mode);
2788 		err = -EBADFD;
2789 	}
2790 
2791 	return err;
2792 }
2793 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2794 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2795 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2796 {
2797 	struct l2cap_ctrl control;
2798 	u16 seq;
2799 
2800 	BT_DBG("chan %p, txseq %u", chan, txseq);
2801 
2802 	memset(&control, 0, sizeof(control));
2803 	control.sframe = 1;
2804 	control.super = L2CAP_SUPER_SREJ;
2805 
2806 	for (seq = chan->expected_tx_seq; seq != txseq;
2807 	     seq = __next_seq(chan, seq)) {
2808 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2809 			control.reqseq = seq;
2810 			l2cap_send_sframe(chan, &control);
2811 			l2cap_seq_list_append(&chan->srej_list, seq);
2812 		}
2813 	}
2814 
2815 	chan->expected_tx_seq = __next_seq(chan, txseq);
2816 }
2817 
l2cap_send_srej_tail(struct l2cap_chan * chan)2818 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2819 {
2820 	struct l2cap_ctrl control;
2821 
2822 	BT_DBG("chan %p", chan);
2823 
2824 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2825 		return;
2826 
2827 	memset(&control, 0, sizeof(control));
2828 	control.sframe = 1;
2829 	control.super = L2CAP_SUPER_SREJ;
2830 	control.reqseq = chan->srej_list.tail;
2831 	l2cap_send_sframe(chan, &control);
2832 }
2833 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2834 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2835 {
2836 	struct l2cap_ctrl control;
2837 	u16 initial_head;
2838 	u16 seq;
2839 
2840 	BT_DBG("chan %p, txseq %u", chan, txseq);
2841 
2842 	memset(&control, 0, sizeof(control));
2843 	control.sframe = 1;
2844 	control.super = L2CAP_SUPER_SREJ;
2845 
2846 	/* Capture initial list head to allow only one pass through the list. */
2847 	initial_head = chan->srej_list.head;
2848 
2849 	do {
2850 		seq = l2cap_seq_list_pop(&chan->srej_list);
2851 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2852 			break;
2853 
2854 		control.reqseq = seq;
2855 		l2cap_send_sframe(chan, &control);
2856 		l2cap_seq_list_append(&chan->srej_list, seq);
2857 	} while (chan->srej_list.head != initial_head);
2858 }
2859 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2860 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2861 {
2862 	struct sk_buff *acked_skb;
2863 	u16 ackseq;
2864 
2865 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2866 
2867 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2868 		return;
2869 
2870 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2871 	       chan->expected_ack_seq, chan->unacked_frames);
2872 
2873 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2874 	     ackseq = __next_seq(chan, ackseq)) {
2875 
2876 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2877 		if (acked_skb) {
2878 			skb_unlink(acked_skb, &chan->tx_q);
2879 			kfree_skb(acked_skb);
2880 			chan->unacked_frames--;
2881 		}
2882 	}
2883 
2884 	chan->expected_ack_seq = reqseq;
2885 
2886 	if (chan->unacked_frames == 0)
2887 		__clear_retrans_timer(chan);
2888 
2889 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2890 }
2891 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2892 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2893 {
2894 	BT_DBG("chan %p", chan);
2895 
2896 	chan->expected_tx_seq = chan->buffer_seq;
2897 	l2cap_seq_list_clear(&chan->srej_list);
2898 	skb_queue_purge(&chan->srej_q);
2899 	chan->rx_state = L2CAP_RX_STATE_RECV;
2900 }
2901 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2902 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2903 				struct l2cap_ctrl *control,
2904 				struct sk_buff_head *skbs, u8 event)
2905 {
2906 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2907 	       event);
2908 
2909 	switch (event) {
2910 	case L2CAP_EV_DATA_REQUEST:
2911 		if (chan->tx_send_head == NULL)
2912 			chan->tx_send_head = skb_peek(skbs);
2913 
2914 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2915 		l2cap_ertm_send(chan);
2916 		break;
2917 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2918 		BT_DBG("Enter LOCAL_BUSY");
2919 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2920 
2921 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2922 			/* The SREJ_SENT state must be aborted if we are to
2923 			 * enter the LOCAL_BUSY state.
2924 			 */
2925 			l2cap_abort_rx_srej_sent(chan);
2926 		}
2927 
2928 		l2cap_send_ack(chan);
2929 
2930 		break;
2931 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2932 		BT_DBG("Exit LOCAL_BUSY");
2933 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2934 
2935 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2936 			struct l2cap_ctrl local_control;
2937 
2938 			memset(&local_control, 0, sizeof(local_control));
2939 			local_control.sframe = 1;
2940 			local_control.super = L2CAP_SUPER_RR;
2941 			local_control.poll = 1;
2942 			local_control.reqseq = chan->buffer_seq;
2943 			l2cap_send_sframe(chan, &local_control);
2944 
2945 			chan->retry_count = 1;
2946 			__set_monitor_timer(chan);
2947 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2948 		}
2949 		break;
2950 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2951 		l2cap_process_reqseq(chan, control->reqseq);
2952 		break;
2953 	case L2CAP_EV_EXPLICIT_POLL:
2954 		l2cap_send_rr_or_rnr(chan, 1);
2955 		chan->retry_count = 1;
2956 		__set_monitor_timer(chan);
2957 		__clear_ack_timer(chan);
2958 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2959 		break;
2960 	case L2CAP_EV_RETRANS_TO:
2961 		l2cap_send_rr_or_rnr(chan, 1);
2962 		chan->retry_count = 1;
2963 		__set_monitor_timer(chan);
2964 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2965 		break;
2966 	case L2CAP_EV_RECV_FBIT:
2967 		/* Nothing to process */
2968 		break;
2969 	default:
2970 		break;
2971 	}
2972 }
2973 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2974 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2975 				  struct l2cap_ctrl *control,
2976 				  struct sk_buff_head *skbs, u8 event)
2977 {
2978 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2979 	       event);
2980 
2981 	switch (event) {
2982 	case L2CAP_EV_DATA_REQUEST:
2983 		if (chan->tx_send_head == NULL)
2984 			chan->tx_send_head = skb_peek(skbs);
2985 		/* Queue data, but don't send. */
2986 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2987 		break;
2988 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2989 		BT_DBG("Enter LOCAL_BUSY");
2990 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2991 
2992 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2993 			/* The SREJ_SENT state must be aborted if we are to
2994 			 * enter the LOCAL_BUSY state.
2995 			 */
2996 			l2cap_abort_rx_srej_sent(chan);
2997 		}
2998 
2999 		l2cap_send_ack(chan);
3000 
3001 		break;
3002 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3003 		BT_DBG("Exit LOCAL_BUSY");
3004 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3005 
3006 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3007 			struct l2cap_ctrl local_control;
3008 			memset(&local_control, 0, sizeof(local_control));
3009 			local_control.sframe = 1;
3010 			local_control.super = L2CAP_SUPER_RR;
3011 			local_control.poll = 1;
3012 			local_control.reqseq = chan->buffer_seq;
3013 			l2cap_send_sframe(chan, &local_control);
3014 
3015 			chan->retry_count = 1;
3016 			__set_monitor_timer(chan);
3017 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3018 		}
3019 		break;
3020 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3021 		l2cap_process_reqseq(chan, control->reqseq);
3022 		fallthrough;
3023 
3024 	case L2CAP_EV_RECV_FBIT:
3025 		if (control && control->final) {
3026 			__clear_monitor_timer(chan);
3027 			if (chan->unacked_frames > 0)
3028 				__set_retrans_timer(chan);
3029 			chan->retry_count = 0;
3030 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3031 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3032 		}
3033 		break;
3034 	case L2CAP_EV_EXPLICIT_POLL:
3035 		/* Ignore */
3036 		break;
3037 	case L2CAP_EV_MONITOR_TO:
3038 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3039 			l2cap_send_rr_or_rnr(chan, 1);
3040 			__set_monitor_timer(chan);
3041 			chan->retry_count++;
3042 		} else {
3043 			l2cap_send_disconn_req(chan, ECONNABORTED);
3044 		}
3045 		break;
3046 	default:
3047 		break;
3048 	}
3049 }
3050 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3051 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3052 		     struct sk_buff_head *skbs, u8 event)
3053 {
3054 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3055 	       chan, control, skbs, event, chan->tx_state);
3056 
3057 	switch (chan->tx_state) {
3058 	case L2CAP_TX_STATE_XMIT:
3059 		l2cap_tx_state_xmit(chan, control, skbs, event);
3060 		break;
3061 	case L2CAP_TX_STATE_WAIT_F:
3062 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3063 		break;
3064 	default:
3065 		/* Ignore event */
3066 		break;
3067 	}
3068 }
3069 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3070 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3071 			     struct l2cap_ctrl *control)
3072 {
3073 	BT_DBG("chan %p, control %p", chan, control);
3074 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3075 }
3076 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3077 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3078 				  struct l2cap_ctrl *control)
3079 {
3080 	BT_DBG("chan %p, control %p", chan, control);
3081 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3082 }
3083 
3084 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3085 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3086 {
3087 	struct sk_buff *nskb;
3088 	struct l2cap_chan *chan;
3089 
3090 	BT_DBG("conn %p", conn);
3091 
3092 	mutex_lock(&conn->chan_lock);
3093 
3094 	list_for_each_entry(chan, &conn->chan_l, list) {
3095 		if (chan->chan_type != L2CAP_CHAN_RAW)
3096 			continue;
3097 
3098 		/* Don't send frame to the channel it came from */
3099 		if (bt_cb(skb)->l2cap.chan == chan)
3100 			continue;
3101 
3102 		nskb = skb_clone(skb, GFP_KERNEL);
3103 		if (!nskb)
3104 			continue;
3105 		if (chan->ops->recv(chan, nskb))
3106 			kfree_skb(nskb);
3107 	}
3108 
3109 	mutex_unlock(&conn->chan_lock);
3110 }
3111 
3112 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3113 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3114 				       u8 ident, u16 dlen, void *data)
3115 {
3116 	struct sk_buff *skb, **frag;
3117 	struct l2cap_cmd_hdr *cmd;
3118 	struct l2cap_hdr *lh;
3119 	int len, count;
3120 
3121 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3122 	       conn, code, ident, dlen);
3123 
3124 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3125 		return NULL;
3126 
3127 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3128 	count = min_t(unsigned int, conn->mtu, len);
3129 
3130 	skb = bt_skb_alloc(count, GFP_KERNEL);
3131 	if (!skb)
3132 		return NULL;
3133 
3134 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3135 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3136 
3137 	if (conn->hcon->type == LE_LINK)
3138 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3139 	else
3140 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3141 
3142 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3143 	cmd->code  = code;
3144 	cmd->ident = ident;
3145 	cmd->len   = cpu_to_le16(dlen);
3146 
3147 	if (dlen) {
3148 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3149 		skb_put_data(skb, data, count);
3150 		data += count;
3151 	}
3152 
3153 	len -= skb->len;
3154 
3155 	/* Continuation fragments (no L2CAP header) */
3156 	frag = &skb_shinfo(skb)->frag_list;
3157 	while (len) {
3158 		count = min_t(unsigned int, conn->mtu, len);
3159 
3160 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3161 		if (!*frag)
3162 			goto fail;
3163 
3164 		skb_put_data(*frag, data, count);
3165 
3166 		len  -= count;
3167 		data += count;
3168 
3169 		frag = &(*frag)->next;
3170 	}
3171 
3172 	return skb;
3173 
3174 fail:
3175 	kfree_skb(skb);
3176 	return NULL;
3177 }
3178 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3179 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3180 				     unsigned long *val)
3181 {
3182 	struct l2cap_conf_opt *opt = *ptr;
3183 	int len;
3184 
3185 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3186 	*ptr += len;
3187 
3188 	*type = opt->type;
3189 	*olen = opt->len;
3190 
3191 	switch (opt->len) {
3192 	case 1:
3193 		*val = *((u8 *) opt->val);
3194 		break;
3195 
3196 	case 2:
3197 		*val = get_unaligned_le16(opt->val);
3198 		break;
3199 
3200 	case 4:
3201 		*val = get_unaligned_le32(opt->val);
3202 		break;
3203 
3204 	default:
3205 		*val = (unsigned long) opt->val;
3206 		break;
3207 	}
3208 
3209 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3210 	return len;
3211 }
3212 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3213 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3214 {
3215 	struct l2cap_conf_opt *opt = *ptr;
3216 
3217 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3218 
3219 	if (size < L2CAP_CONF_OPT_SIZE + len)
3220 		return;
3221 
3222 	opt->type = type;
3223 	opt->len  = len;
3224 
3225 	switch (len) {
3226 	case 1:
3227 		*((u8 *) opt->val)  = val;
3228 		break;
3229 
3230 	case 2:
3231 		put_unaligned_le16(val, opt->val);
3232 		break;
3233 
3234 	case 4:
3235 		put_unaligned_le32(val, opt->val);
3236 		break;
3237 
3238 	default:
3239 		memcpy(opt->val, (void *) val, len);
3240 		break;
3241 	}
3242 
3243 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3244 }
3245 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3246 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3247 {
3248 	struct l2cap_conf_efs efs;
3249 
3250 	switch (chan->mode) {
3251 	case L2CAP_MODE_ERTM:
3252 		efs.id		= chan->local_id;
3253 		efs.stype	= chan->local_stype;
3254 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3255 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3256 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3257 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3258 		break;
3259 
3260 	case L2CAP_MODE_STREAMING:
3261 		efs.id		= 1;
3262 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3263 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3264 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3265 		efs.acc_lat	= 0;
3266 		efs.flush_to	= 0;
3267 		break;
3268 
3269 	default:
3270 		return;
3271 	}
3272 
3273 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3274 			   (unsigned long) &efs, size);
3275 }
3276 
l2cap_ack_timeout(struct work_struct * work)3277 static void l2cap_ack_timeout(struct work_struct *work)
3278 {
3279 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3280 					       ack_timer.work);
3281 	u16 frames_to_ack;
3282 
3283 	BT_DBG("chan %p", chan);
3284 
3285 	l2cap_chan_lock(chan);
3286 
3287 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3288 				     chan->last_acked_seq);
3289 
3290 	if (frames_to_ack)
3291 		l2cap_send_rr_or_rnr(chan, 0);
3292 
3293 	l2cap_chan_unlock(chan);
3294 	l2cap_chan_put(chan);
3295 }
3296 
l2cap_ertm_init(struct l2cap_chan * chan)3297 int l2cap_ertm_init(struct l2cap_chan *chan)
3298 {
3299 	int err;
3300 
3301 	chan->next_tx_seq = 0;
3302 	chan->expected_tx_seq = 0;
3303 	chan->expected_ack_seq = 0;
3304 	chan->unacked_frames = 0;
3305 	chan->buffer_seq = 0;
3306 	chan->frames_sent = 0;
3307 	chan->last_acked_seq = 0;
3308 	chan->sdu = NULL;
3309 	chan->sdu_last_frag = NULL;
3310 	chan->sdu_len = 0;
3311 
3312 	skb_queue_head_init(&chan->tx_q);
3313 
3314 	chan->local_amp_id = AMP_ID_BREDR;
3315 	chan->move_id = AMP_ID_BREDR;
3316 	chan->move_state = L2CAP_MOVE_STABLE;
3317 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3318 
3319 	if (chan->mode != L2CAP_MODE_ERTM)
3320 		return 0;
3321 
3322 	chan->rx_state = L2CAP_RX_STATE_RECV;
3323 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3324 
3325 	skb_queue_head_init(&chan->srej_q);
3326 
3327 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3328 	if (err < 0)
3329 		return err;
3330 
3331 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3332 	if (err < 0)
3333 		l2cap_seq_list_free(&chan->srej_list);
3334 
3335 	return err;
3336 }
3337 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3338 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3339 {
3340 	switch (mode) {
3341 	case L2CAP_MODE_STREAMING:
3342 	case L2CAP_MODE_ERTM:
3343 		if (l2cap_mode_supported(mode, remote_feat_mask))
3344 			return mode;
3345 		fallthrough;
3346 	default:
3347 		return L2CAP_MODE_BASIC;
3348 	}
3349 }
3350 
__l2cap_ews_supported(struct l2cap_conn * conn)3351 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3352 {
3353 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3354 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3355 }
3356 
__l2cap_efs_supported(struct l2cap_conn * conn)3357 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3358 {
3359 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3360 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3361 }
3362 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3363 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3364 				      struct l2cap_conf_rfc *rfc)
3365 {
3366 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3367 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3368 
3369 		/* Class 1 devices have must have ERTM timeouts
3370 		 * exceeding the Link Supervision Timeout.  The
3371 		 * default Link Supervision Timeout for AMP
3372 		 * controllers is 10 seconds.
3373 		 *
3374 		 * Class 1 devices use 0xffffffff for their
3375 		 * best-effort flush timeout, so the clamping logic
3376 		 * will result in a timeout that meets the above
3377 		 * requirement.  ERTM timeouts are 16-bit values, so
3378 		 * the maximum timeout is 65.535 seconds.
3379 		 */
3380 
3381 		/* Convert timeout to milliseconds and round */
3382 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3383 
3384 		/* This is the recommended formula for class 2 devices
3385 		 * that start ERTM timers when packets are sent to the
3386 		 * controller.
3387 		 */
3388 		ertm_to = 3 * ertm_to + 500;
3389 
3390 		if (ertm_to > 0xffff)
3391 			ertm_to = 0xffff;
3392 
3393 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3394 		rfc->monitor_timeout = rfc->retrans_timeout;
3395 	} else {
3396 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3397 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3398 	}
3399 }
3400 
l2cap_txwin_setup(struct l2cap_chan * chan)3401 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3402 {
3403 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3404 	    __l2cap_ews_supported(chan->conn)) {
3405 		/* use extended control field */
3406 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3407 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3408 	} else {
3409 		chan->tx_win = min_t(u16, chan->tx_win,
3410 				     L2CAP_DEFAULT_TX_WINDOW);
3411 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3412 	}
3413 	chan->ack_win = chan->tx_win;
3414 }
3415 
l2cap_mtu_auto(struct l2cap_chan * chan)3416 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3417 {
3418 	struct hci_conn *conn = chan->conn->hcon;
3419 
3420 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3421 
3422 	/* The 2-DH1 packet has between 2 and 56 information bytes
3423 	 * (including the 2-byte payload header)
3424 	 */
3425 	if (!(conn->pkt_type & HCI_2DH1))
3426 		chan->imtu = 54;
3427 
3428 	/* The 3-DH1 packet has between 2 and 85 information bytes
3429 	 * (including the 2-byte payload header)
3430 	 */
3431 	if (!(conn->pkt_type & HCI_3DH1))
3432 		chan->imtu = 83;
3433 
3434 	/* The 2-DH3 packet has between 2 and 369 information bytes
3435 	 * (including the 2-byte payload header)
3436 	 */
3437 	if (!(conn->pkt_type & HCI_2DH3))
3438 		chan->imtu = 367;
3439 
3440 	/* The 3-DH3 packet has between 2 and 554 information bytes
3441 	 * (including the 2-byte payload header)
3442 	 */
3443 	if (!(conn->pkt_type & HCI_3DH3))
3444 		chan->imtu = 552;
3445 
3446 	/* The 2-DH5 packet has between 2 and 681 information bytes
3447 	 * (including the 2-byte payload header)
3448 	 */
3449 	if (!(conn->pkt_type & HCI_2DH5))
3450 		chan->imtu = 679;
3451 
3452 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3453 	 * (including the 2-byte payload header)
3454 	 */
3455 	if (!(conn->pkt_type & HCI_3DH5))
3456 		chan->imtu = 1021;
3457 }
3458 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3459 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3460 {
3461 	struct l2cap_conf_req *req = data;
3462 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3463 	void *ptr = req->data;
3464 	void *endptr = data + data_size;
3465 	u16 size;
3466 
3467 	BT_DBG("chan %p", chan);
3468 
3469 	if (chan->num_conf_req || chan->num_conf_rsp)
3470 		goto done;
3471 
3472 	switch (chan->mode) {
3473 	case L2CAP_MODE_STREAMING:
3474 	case L2CAP_MODE_ERTM:
3475 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3476 			break;
3477 
3478 		if (__l2cap_efs_supported(chan->conn))
3479 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3480 
3481 		fallthrough;
3482 	default:
3483 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3484 		break;
3485 	}
3486 
3487 done:
3488 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3489 		if (!chan->imtu)
3490 			l2cap_mtu_auto(chan);
3491 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3492 				   endptr - ptr);
3493 	}
3494 
3495 	switch (chan->mode) {
3496 	case L2CAP_MODE_BASIC:
3497 		if (disable_ertm)
3498 			break;
3499 
3500 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3501 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3502 			break;
3503 
3504 		rfc.mode            = L2CAP_MODE_BASIC;
3505 		rfc.txwin_size      = 0;
3506 		rfc.max_transmit    = 0;
3507 		rfc.retrans_timeout = 0;
3508 		rfc.monitor_timeout = 0;
3509 		rfc.max_pdu_size    = 0;
3510 
3511 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3512 				   (unsigned long) &rfc, endptr - ptr);
3513 		break;
3514 
3515 	case L2CAP_MODE_ERTM:
3516 		rfc.mode            = L2CAP_MODE_ERTM;
3517 		rfc.max_transmit    = chan->max_tx;
3518 
3519 		__l2cap_set_ertm_timeouts(chan, &rfc);
3520 
3521 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3522 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3523 			     L2CAP_FCS_SIZE);
3524 		rfc.max_pdu_size = cpu_to_le16(size);
3525 
3526 		l2cap_txwin_setup(chan);
3527 
3528 		rfc.txwin_size = min_t(u16, chan->tx_win,
3529 				       L2CAP_DEFAULT_TX_WINDOW);
3530 
3531 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3532 				   (unsigned long) &rfc, endptr - ptr);
3533 
3534 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3535 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3536 
3537 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3538 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3539 					   chan->tx_win, endptr - ptr);
3540 
3541 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3542 			if (chan->fcs == L2CAP_FCS_NONE ||
3543 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3544 				chan->fcs = L2CAP_FCS_NONE;
3545 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3546 						   chan->fcs, endptr - ptr);
3547 			}
3548 		break;
3549 
3550 	case L2CAP_MODE_STREAMING:
3551 		l2cap_txwin_setup(chan);
3552 		rfc.mode            = L2CAP_MODE_STREAMING;
3553 		rfc.txwin_size      = 0;
3554 		rfc.max_transmit    = 0;
3555 		rfc.retrans_timeout = 0;
3556 		rfc.monitor_timeout = 0;
3557 
3558 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3559 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3560 			     L2CAP_FCS_SIZE);
3561 		rfc.max_pdu_size = cpu_to_le16(size);
3562 
3563 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3564 				   (unsigned long) &rfc, endptr - ptr);
3565 
3566 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3567 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3568 
3569 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3570 			if (chan->fcs == L2CAP_FCS_NONE ||
3571 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3572 				chan->fcs = L2CAP_FCS_NONE;
3573 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3574 						   chan->fcs, endptr - ptr);
3575 			}
3576 		break;
3577 	}
3578 
3579 	req->dcid  = cpu_to_le16(chan->dcid);
3580 	req->flags = cpu_to_le16(0);
3581 
3582 	return ptr - data;
3583 }
3584 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3585 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3586 {
3587 	struct l2cap_conf_rsp *rsp = data;
3588 	void *ptr = rsp->data;
3589 	void *endptr = data + data_size;
3590 	void *req = chan->conf_req;
3591 	int len = chan->conf_len;
3592 	int type, hint, olen;
3593 	unsigned long val;
3594 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3595 	struct l2cap_conf_efs efs;
3596 	u8 remote_efs = 0;
3597 	u16 mtu = L2CAP_DEFAULT_MTU;
3598 	u16 result = L2CAP_CONF_SUCCESS;
3599 	u16 size;
3600 
3601 	BT_DBG("chan %p", chan);
3602 
3603 	while (len >= L2CAP_CONF_OPT_SIZE) {
3604 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3605 		if (len < 0)
3606 			break;
3607 
3608 		hint  = type & L2CAP_CONF_HINT;
3609 		type &= L2CAP_CONF_MASK;
3610 
3611 		switch (type) {
3612 		case L2CAP_CONF_MTU:
3613 			if (olen != 2)
3614 				break;
3615 			mtu = val;
3616 			break;
3617 
3618 		case L2CAP_CONF_FLUSH_TO:
3619 			if (olen != 2)
3620 				break;
3621 			chan->flush_to = val;
3622 			break;
3623 
3624 		case L2CAP_CONF_QOS:
3625 			break;
3626 
3627 		case L2CAP_CONF_RFC:
3628 			if (olen != sizeof(rfc))
3629 				break;
3630 			memcpy(&rfc, (void *) val, olen);
3631 			break;
3632 
3633 		case L2CAP_CONF_FCS:
3634 			if (olen != 1)
3635 				break;
3636 			if (val == L2CAP_FCS_NONE)
3637 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3638 			break;
3639 
3640 		case L2CAP_CONF_EFS:
3641 			if (olen != sizeof(efs))
3642 				break;
3643 			remote_efs = 1;
3644 			memcpy(&efs, (void *) val, olen);
3645 			break;
3646 
3647 		case L2CAP_CONF_EWS:
3648 			if (olen != 2)
3649 				break;
3650 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3651 				return -ECONNREFUSED;
3652 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3653 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3654 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3655 			chan->remote_tx_win = val;
3656 			break;
3657 
3658 		default:
3659 			if (hint)
3660 				break;
3661 			result = L2CAP_CONF_UNKNOWN;
3662 			*((u8 *) ptr++) = type;
3663 			break;
3664 		}
3665 	}
3666 
3667 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3668 		goto done;
3669 
3670 	switch (chan->mode) {
3671 	case L2CAP_MODE_STREAMING:
3672 	case L2CAP_MODE_ERTM:
3673 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3674 			chan->mode = l2cap_select_mode(rfc.mode,
3675 						       chan->conn->feat_mask);
3676 			break;
3677 		}
3678 
3679 		if (remote_efs) {
3680 			if (__l2cap_efs_supported(chan->conn))
3681 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3682 			else
3683 				return -ECONNREFUSED;
3684 		}
3685 
3686 		if (chan->mode != rfc.mode)
3687 			return -ECONNREFUSED;
3688 
3689 		break;
3690 	}
3691 
3692 done:
3693 	if (chan->mode != rfc.mode) {
3694 		result = L2CAP_CONF_UNACCEPT;
3695 		rfc.mode = chan->mode;
3696 
3697 		if (chan->num_conf_rsp == 1)
3698 			return -ECONNREFUSED;
3699 
3700 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3701 				   (unsigned long) &rfc, endptr - ptr);
3702 	}
3703 
3704 	if (result == L2CAP_CONF_SUCCESS) {
3705 		/* Configure output options and let the other side know
3706 		 * which ones we don't like. */
3707 
3708 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3709 			result = L2CAP_CONF_UNACCEPT;
3710 		else {
3711 			chan->omtu = mtu;
3712 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3713 		}
3714 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3715 
3716 		if (remote_efs) {
3717 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3718 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3719 			    efs.stype != chan->local_stype) {
3720 
3721 				result = L2CAP_CONF_UNACCEPT;
3722 
3723 				if (chan->num_conf_req >= 1)
3724 					return -ECONNREFUSED;
3725 
3726 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3727 						   sizeof(efs),
3728 						   (unsigned long) &efs, endptr - ptr);
3729 			} else {
3730 				/* Send PENDING Conf Rsp */
3731 				result = L2CAP_CONF_PENDING;
3732 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3733 			}
3734 		}
3735 
3736 		switch (rfc.mode) {
3737 		case L2CAP_MODE_BASIC:
3738 			chan->fcs = L2CAP_FCS_NONE;
3739 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3740 			break;
3741 
3742 		case L2CAP_MODE_ERTM:
3743 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3744 				chan->remote_tx_win = rfc.txwin_size;
3745 			else
3746 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3747 
3748 			chan->remote_max_tx = rfc.max_transmit;
3749 
3750 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3751 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3752 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3753 			rfc.max_pdu_size = cpu_to_le16(size);
3754 			chan->remote_mps = size;
3755 
3756 			__l2cap_set_ertm_timeouts(chan, &rfc);
3757 
3758 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3759 
3760 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3761 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3762 
3763 			if (remote_efs &&
3764 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3765 				chan->remote_id = efs.id;
3766 				chan->remote_stype = efs.stype;
3767 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3768 				chan->remote_flush_to =
3769 					le32_to_cpu(efs.flush_to);
3770 				chan->remote_acc_lat =
3771 					le32_to_cpu(efs.acc_lat);
3772 				chan->remote_sdu_itime =
3773 					le32_to_cpu(efs.sdu_itime);
3774 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3775 						   sizeof(efs),
3776 						   (unsigned long) &efs, endptr - ptr);
3777 			}
3778 			break;
3779 
3780 		case L2CAP_MODE_STREAMING:
3781 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3782 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3783 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3784 			rfc.max_pdu_size = cpu_to_le16(size);
3785 			chan->remote_mps = size;
3786 
3787 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3788 
3789 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3790 					   (unsigned long) &rfc, endptr - ptr);
3791 
3792 			break;
3793 
3794 		default:
3795 			result = L2CAP_CONF_UNACCEPT;
3796 
3797 			memset(&rfc, 0, sizeof(rfc));
3798 			rfc.mode = chan->mode;
3799 		}
3800 
3801 		if (result == L2CAP_CONF_SUCCESS)
3802 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3803 	}
3804 	rsp->scid   = cpu_to_le16(chan->dcid);
3805 	rsp->result = cpu_to_le16(result);
3806 	rsp->flags  = cpu_to_le16(0);
3807 
3808 	return ptr - data;
3809 }
3810 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3811 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3812 				void *data, size_t size, u16 *result)
3813 {
3814 	struct l2cap_conf_req *req = data;
3815 	void *ptr = req->data;
3816 	void *endptr = data + size;
3817 	int type, olen;
3818 	unsigned long val;
3819 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3820 	struct l2cap_conf_efs efs;
3821 
3822 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3823 
3824 	while (len >= L2CAP_CONF_OPT_SIZE) {
3825 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3826 		if (len < 0)
3827 			break;
3828 
3829 		switch (type) {
3830 		case L2CAP_CONF_MTU:
3831 			if (olen != 2)
3832 				break;
3833 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3834 				*result = L2CAP_CONF_UNACCEPT;
3835 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3836 			} else
3837 				chan->imtu = val;
3838 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3839 					   endptr - ptr);
3840 			break;
3841 
3842 		case L2CAP_CONF_FLUSH_TO:
3843 			if (olen != 2)
3844 				break;
3845 			chan->flush_to = val;
3846 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3847 					   chan->flush_to, endptr - ptr);
3848 			break;
3849 
3850 		case L2CAP_CONF_RFC:
3851 			if (olen != sizeof(rfc))
3852 				break;
3853 			memcpy(&rfc, (void *)val, olen);
3854 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3855 			    rfc.mode != chan->mode)
3856 				return -ECONNREFUSED;
3857 			chan->fcs = 0;
3858 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3859 					   (unsigned long) &rfc, endptr - ptr);
3860 			break;
3861 
3862 		case L2CAP_CONF_EWS:
3863 			if (olen != 2)
3864 				break;
3865 			chan->ack_win = min_t(u16, val, chan->ack_win);
3866 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3867 					   chan->tx_win, endptr - ptr);
3868 			break;
3869 
3870 		case L2CAP_CONF_EFS:
3871 			if (olen != sizeof(efs))
3872 				break;
3873 			memcpy(&efs, (void *)val, olen);
3874 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3875 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3876 			    efs.stype != chan->local_stype)
3877 				return -ECONNREFUSED;
3878 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3879 					   (unsigned long) &efs, endptr - ptr);
3880 			break;
3881 
3882 		case L2CAP_CONF_FCS:
3883 			if (olen != 1)
3884 				break;
3885 			if (*result == L2CAP_CONF_PENDING)
3886 				if (val == L2CAP_FCS_NONE)
3887 					set_bit(CONF_RECV_NO_FCS,
3888 						&chan->conf_state);
3889 			break;
3890 		}
3891 	}
3892 
3893 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3894 		return -ECONNREFUSED;
3895 
3896 	chan->mode = rfc.mode;
3897 
3898 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3899 		switch (rfc.mode) {
3900 		case L2CAP_MODE_ERTM:
3901 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3902 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3903 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3904 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3905 				chan->ack_win = min_t(u16, chan->ack_win,
3906 						      rfc.txwin_size);
3907 
3908 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3909 				chan->local_msdu = le16_to_cpu(efs.msdu);
3910 				chan->local_sdu_itime =
3911 					le32_to_cpu(efs.sdu_itime);
3912 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3913 				chan->local_flush_to =
3914 					le32_to_cpu(efs.flush_to);
3915 			}
3916 			break;
3917 
3918 		case L2CAP_MODE_STREAMING:
3919 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3920 		}
3921 	}
3922 
3923 	req->dcid   = cpu_to_le16(chan->dcid);
3924 	req->flags  = cpu_to_le16(0);
3925 
3926 	return ptr - data;
3927 }
3928 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3929 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3930 				u16 result, u16 flags)
3931 {
3932 	struct l2cap_conf_rsp *rsp = data;
3933 	void *ptr = rsp->data;
3934 
3935 	BT_DBG("chan %p", chan);
3936 
3937 	rsp->scid   = cpu_to_le16(chan->dcid);
3938 	rsp->result = cpu_to_le16(result);
3939 	rsp->flags  = cpu_to_le16(flags);
3940 
3941 	return ptr - data;
3942 }
3943 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3944 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3945 {
3946 	struct l2cap_le_conn_rsp rsp;
3947 	struct l2cap_conn *conn = chan->conn;
3948 
3949 	BT_DBG("chan %p", chan);
3950 
3951 	rsp.dcid    = cpu_to_le16(chan->scid);
3952 	rsp.mtu     = cpu_to_le16(chan->imtu);
3953 	rsp.mps     = cpu_to_le16(chan->mps);
3954 	rsp.credits = cpu_to_le16(chan->rx_credits);
3955 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3956 
3957 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3958 		       &rsp);
3959 }
3960 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3961 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3962 {
3963 	struct {
3964 		struct l2cap_ecred_conn_rsp rsp;
3965 		__le16 dcid[5];
3966 	} __packed pdu;
3967 	struct l2cap_conn *conn = chan->conn;
3968 	u16 ident = chan->ident;
3969 	int i = 0;
3970 
3971 	if (!ident)
3972 		return;
3973 
3974 	BT_DBG("chan %p ident %d", chan, ident);
3975 
3976 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3977 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3978 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3979 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3980 
3981 	mutex_lock(&conn->chan_lock);
3982 
3983 	list_for_each_entry(chan, &conn->chan_l, list) {
3984 		if (chan->ident != ident)
3985 			continue;
3986 
3987 		/* Reset ident so only one response is sent */
3988 		chan->ident = 0;
3989 
3990 		/* Include all channels pending with the same ident */
3991 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3992 	}
3993 
3994 	mutex_unlock(&conn->chan_lock);
3995 
3996 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3997 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3998 }
3999 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4000 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4001 {
4002 	struct l2cap_conn_rsp rsp;
4003 	struct l2cap_conn *conn = chan->conn;
4004 	u8 buf[128];
4005 	u8 rsp_code;
4006 
4007 	rsp.scid   = cpu_to_le16(chan->dcid);
4008 	rsp.dcid   = cpu_to_le16(chan->scid);
4009 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4010 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4011 
4012 	if (chan->hs_hcon)
4013 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4014 	else
4015 		rsp_code = L2CAP_CONN_RSP;
4016 
4017 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4018 
4019 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4020 
4021 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4022 		return;
4023 
4024 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4025 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4026 	chan->num_conf_req++;
4027 }
4028 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4029 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4030 {
4031 	int type, olen;
4032 	unsigned long val;
4033 	/* Use sane default values in case a misbehaving remote device
4034 	 * did not send an RFC or extended window size option.
4035 	 */
4036 	u16 txwin_ext = chan->ack_win;
4037 	struct l2cap_conf_rfc rfc = {
4038 		.mode = chan->mode,
4039 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4040 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4041 		.max_pdu_size = cpu_to_le16(chan->imtu),
4042 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4043 	};
4044 
4045 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4046 
4047 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4048 		return;
4049 
4050 	while (len >= L2CAP_CONF_OPT_SIZE) {
4051 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4052 		if (len < 0)
4053 			break;
4054 
4055 		switch (type) {
4056 		case L2CAP_CONF_RFC:
4057 			if (olen != sizeof(rfc))
4058 				break;
4059 			memcpy(&rfc, (void *)val, olen);
4060 			break;
4061 		case L2CAP_CONF_EWS:
4062 			if (olen != 2)
4063 				break;
4064 			txwin_ext = val;
4065 			break;
4066 		}
4067 	}
4068 
4069 	switch (rfc.mode) {
4070 	case L2CAP_MODE_ERTM:
4071 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4072 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4073 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4074 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4075 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4076 		else
4077 			chan->ack_win = min_t(u16, chan->ack_win,
4078 					      rfc.txwin_size);
4079 		break;
4080 	case L2CAP_MODE_STREAMING:
4081 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4082 	}
4083 }
4084 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4085 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4086 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4087 				    u8 *data)
4088 {
4089 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4090 
4091 	if (cmd_len < sizeof(*rej))
4092 		return -EPROTO;
4093 
4094 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4095 		return 0;
4096 
4097 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4098 	    cmd->ident == conn->info_ident) {
4099 		cancel_delayed_work(&conn->info_timer);
4100 
4101 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4102 		conn->info_ident = 0;
4103 
4104 		l2cap_conn_start(conn);
4105 	}
4106 
4107 	return 0;
4108 }
4109 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4110 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4111 					struct l2cap_cmd_hdr *cmd,
4112 					u8 *data, u8 rsp_code, u8 amp_id)
4113 {
4114 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4115 	struct l2cap_conn_rsp rsp;
4116 	struct l2cap_chan *chan = NULL, *pchan;
4117 	int result, status = L2CAP_CS_NO_INFO;
4118 
4119 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4120 	__le16 psm = req->psm;
4121 
4122 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4123 
4124 	/* Check if we have socket listening on psm */
4125 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4126 					 &conn->hcon->dst, ACL_LINK);
4127 	if (!pchan) {
4128 		result = L2CAP_CR_BAD_PSM;
4129 		goto sendresp;
4130 	}
4131 
4132 	mutex_lock(&conn->chan_lock);
4133 	l2cap_chan_lock(pchan);
4134 
4135 	/* Check if the ACL is secure enough (if not SDP) */
4136 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4137 	    !hci_conn_check_link_mode(conn->hcon)) {
4138 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4139 		result = L2CAP_CR_SEC_BLOCK;
4140 		goto response;
4141 	}
4142 
4143 	result = L2CAP_CR_NO_MEM;
4144 
4145 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4146 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4147 		result = L2CAP_CR_INVALID_SCID;
4148 		goto response;
4149 	}
4150 
4151 	/* Check if we already have channel with that dcid */
4152 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4153 		result = L2CAP_CR_SCID_IN_USE;
4154 		goto response;
4155 	}
4156 
4157 	chan = pchan->ops->new_connection(pchan);
4158 	if (!chan)
4159 		goto response;
4160 
4161 	/* For certain devices (ex: HID mouse), support for authentication,
4162 	 * pairing and bonding is optional. For such devices, inorder to avoid
4163 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4164 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4165 	 */
4166 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4167 
4168 	bacpy(&chan->src, &conn->hcon->src);
4169 	bacpy(&chan->dst, &conn->hcon->dst);
4170 	chan->src_type = bdaddr_src_type(conn->hcon);
4171 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4172 	chan->psm  = psm;
4173 	chan->dcid = scid;
4174 	chan->local_amp_id = amp_id;
4175 
4176 	__l2cap_chan_add(conn, chan);
4177 
4178 	dcid = chan->scid;
4179 
4180 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4181 
4182 	chan->ident = cmd->ident;
4183 
4184 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4185 		if (l2cap_chan_check_security(chan, false)) {
4186 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4187 				l2cap_state_change(chan, BT_CONNECT2);
4188 				result = L2CAP_CR_PEND;
4189 				status = L2CAP_CS_AUTHOR_PEND;
4190 				chan->ops->defer(chan);
4191 			} else {
4192 				/* Force pending result for AMP controllers.
4193 				 * The connection will succeed after the
4194 				 * physical link is up.
4195 				 */
4196 				if (amp_id == AMP_ID_BREDR) {
4197 					l2cap_state_change(chan, BT_CONFIG);
4198 					result = L2CAP_CR_SUCCESS;
4199 				} else {
4200 					l2cap_state_change(chan, BT_CONNECT2);
4201 					result = L2CAP_CR_PEND;
4202 				}
4203 				status = L2CAP_CS_NO_INFO;
4204 			}
4205 		} else {
4206 			l2cap_state_change(chan, BT_CONNECT2);
4207 			result = L2CAP_CR_PEND;
4208 			status = L2CAP_CS_AUTHEN_PEND;
4209 		}
4210 	} else {
4211 		l2cap_state_change(chan, BT_CONNECT2);
4212 		result = L2CAP_CR_PEND;
4213 		status = L2CAP_CS_NO_INFO;
4214 	}
4215 
4216 response:
4217 	l2cap_chan_unlock(pchan);
4218 	mutex_unlock(&conn->chan_lock);
4219 	l2cap_chan_put(pchan);
4220 
4221 sendresp:
4222 	rsp.scid   = cpu_to_le16(scid);
4223 	rsp.dcid   = cpu_to_le16(dcid);
4224 	rsp.result = cpu_to_le16(result);
4225 	rsp.status = cpu_to_le16(status);
4226 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4227 
4228 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4229 		struct l2cap_info_req info;
4230 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4231 
4232 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4233 		conn->info_ident = l2cap_get_ident(conn);
4234 
4235 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4236 
4237 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4238 			       sizeof(info), &info);
4239 	}
4240 
4241 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4242 	    result == L2CAP_CR_SUCCESS) {
4243 		u8 buf[128];
4244 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4245 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4246 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4247 		chan->num_conf_req++;
4248 	}
4249 
4250 	return chan;
4251 }
4252 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4253 static int l2cap_connect_req(struct l2cap_conn *conn,
4254 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4255 {
4256 	struct hci_dev *hdev = conn->hcon->hdev;
4257 	struct hci_conn *hcon = conn->hcon;
4258 
4259 	if (cmd_len < sizeof(struct l2cap_conn_req))
4260 		return -EPROTO;
4261 
4262 	hci_dev_lock(hdev);
4263 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4264 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4265 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4266 	hci_dev_unlock(hdev);
4267 
4268 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4269 	return 0;
4270 }
4271 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4272 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4273 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4274 				    u8 *data)
4275 {
4276 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4277 	u16 scid, dcid, result, status;
4278 	struct l2cap_chan *chan;
4279 	u8 req[128];
4280 	int err;
4281 
4282 	if (cmd_len < sizeof(*rsp))
4283 		return -EPROTO;
4284 
4285 	scid   = __le16_to_cpu(rsp->scid);
4286 	dcid   = __le16_to_cpu(rsp->dcid);
4287 	result = __le16_to_cpu(rsp->result);
4288 	status = __le16_to_cpu(rsp->status);
4289 
4290 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4291 	       dcid, scid, result, status);
4292 
4293 	mutex_lock(&conn->chan_lock);
4294 
4295 	if (scid) {
4296 		chan = __l2cap_get_chan_by_scid(conn, scid);
4297 		if (!chan) {
4298 			err = -EBADSLT;
4299 			goto unlock;
4300 		}
4301 	} else {
4302 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4303 		if (!chan) {
4304 			err = -EBADSLT;
4305 			goto unlock;
4306 		}
4307 	}
4308 
4309 	chan = l2cap_chan_hold_unless_zero(chan);
4310 	if (!chan) {
4311 		err = -EBADSLT;
4312 		goto unlock;
4313 	}
4314 
4315 	err = 0;
4316 
4317 	l2cap_chan_lock(chan);
4318 
4319 	switch (result) {
4320 	case L2CAP_CR_SUCCESS:
4321 		l2cap_state_change(chan, BT_CONFIG);
4322 		chan->ident = 0;
4323 		chan->dcid = dcid;
4324 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4325 
4326 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4327 			break;
4328 
4329 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4330 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4331 		chan->num_conf_req++;
4332 		break;
4333 
4334 	case L2CAP_CR_PEND:
4335 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4336 		break;
4337 
4338 	default:
4339 		l2cap_chan_del(chan, ECONNREFUSED);
4340 		break;
4341 	}
4342 
4343 	l2cap_chan_unlock(chan);
4344 	l2cap_chan_put(chan);
4345 
4346 unlock:
4347 	mutex_unlock(&conn->chan_lock);
4348 
4349 	return err;
4350 }
4351 
set_default_fcs(struct l2cap_chan * chan)4352 static inline void set_default_fcs(struct l2cap_chan *chan)
4353 {
4354 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4355 	 * sides request it.
4356 	 */
4357 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4358 		chan->fcs = L2CAP_FCS_NONE;
4359 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4360 		chan->fcs = L2CAP_FCS_CRC16;
4361 }
4362 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4363 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4364 				    u8 ident, u16 flags)
4365 {
4366 	struct l2cap_conn *conn = chan->conn;
4367 
4368 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4369 	       flags);
4370 
4371 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4372 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4373 
4374 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4375 		       l2cap_build_conf_rsp(chan, data,
4376 					    L2CAP_CONF_SUCCESS, flags), data);
4377 }
4378 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4379 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4380 				   u16 scid, u16 dcid)
4381 {
4382 	struct l2cap_cmd_rej_cid rej;
4383 
4384 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4385 	rej.scid = __cpu_to_le16(scid);
4386 	rej.dcid = __cpu_to_le16(dcid);
4387 
4388 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4389 }
4390 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4391 static inline int l2cap_config_req(struct l2cap_conn *conn,
4392 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4393 				   u8 *data)
4394 {
4395 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4396 	u16 dcid, flags;
4397 	u8 rsp[64];
4398 	struct l2cap_chan *chan;
4399 	int len, err = 0;
4400 
4401 	if (cmd_len < sizeof(*req))
4402 		return -EPROTO;
4403 
4404 	dcid  = __le16_to_cpu(req->dcid);
4405 	flags = __le16_to_cpu(req->flags);
4406 
4407 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4408 
4409 	chan = l2cap_get_chan_by_scid(conn, dcid);
4410 	if (!chan) {
4411 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4412 		return 0;
4413 	}
4414 
4415 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4416 	    chan->state != BT_CONNECTED) {
4417 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4418 				       chan->dcid);
4419 		goto unlock;
4420 	}
4421 
4422 	/* Reject if config buffer is too small. */
4423 	len = cmd_len - sizeof(*req);
4424 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4425 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4426 			       l2cap_build_conf_rsp(chan, rsp,
4427 			       L2CAP_CONF_REJECT, flags), rsp);
4428 		goto unlock;
4429 	}
4430 
4431 	/* Store config. */
4432 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4433 	chan->conf_len += len;
4434 
4435 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4436 		/* Incomplete config. Send empty response. */
4437 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4438 			       l2cap_build_conf_rsp(chan, rsp,
4439 			       L2CAP_CONF_SUCCESS, flags), rsp);
4440 		goto unlock;
4441 	}
4442 
4443 	/* Complete config. */
4444 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4445 	if (len < 0) {
4446 		l2cap_send_disconn_req(chan, ECONNRESET);
4447 		goto unlock;
4448 	}
4449 
4450 	chan->ident = cmd->ident;
4451 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4452 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4453 		chan->num_conf_rsp++;
4454 
4455 	/* Reset config buffer. */
4456 	chan->conf_len = 0;
4457 
4458 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4459 		goto unlock;
4460 
4461 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4462 		set_default_fcs(chan);
4463 
4464 		if (chan->mode == L2CAP_MODE_ERTM ||
4465 		    chan->mode == L2CAP_MODE_STREAMING)
4466 			err = l2cap_ertm_init(chan);
4467 
4468 		if (err < 0)
4469 			l2cap_send_disconn_req(chan, -err);
4470 		else
4471 			l2cap_chan_ready(chan);
4472 
4473 		goto unlock;
4474 	}
4475 
4476 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4477 		u8 buf[64];
4478 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4479 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4480 		chan->num_conf_req++;
4481 	}
4482 
4483 	/* Got Conf Rsp PENDING from remote side and assume we sent
4484 	   Conf Rsp PENDING in the code above */
4485 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4486 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4487 
4488 		/* check compatibility */
4489 
4490 		/* Send rsp for BR/EDR channel */
4491 		if (!chan->hs_hcon)
4492 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4493 		else
4494 			chan->ident = cmd->ident;
4495 	}
4496 
4497 unlock:
4498 	l2cap_chan_unlock(chan);
4499 	l2cap_chan_put(chan);
4500 	return err;
4501 }
4502 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4503 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4504 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4505 				   u8 *data)
4506 {
4507 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4508 	u16 scid, flags, result;
4509 	struct l2cap_chan *chan;
4510 	int len = cmd_len - sizeof(*rsp);
4511 	int err = 0;
4512 
4513 	if (cmd_len < sizeof(*rsp))
4514 		return -EPROTO;
4515 
4516 	scid   = __le16_to_cpu(rsp->scid);
4517 	flags  = __le16_to_cpu(rsp->flags);
4518 	result = __le16_to_cpu(rsp->result);
4519 
4520 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4521 	       result, len);
4522 
4523 	chan = l2cap_get_chan_by_scid(conn, scid);
4524 	if (!chan)
4525 		return 0;
4526 
4527 	switch (result) {
4528 	case L2CAP_CONF_SUCCESS:
4529 		l2cap_conf_rfc_get(chan, rsp->data, len);
4530 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4531 		break;
4532 
4533 	case L2CAP_CONF_PENDING:
4534 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4535 
4536 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4537 			char buf[64];
4538 
4539 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4540 						   buf, sizeof(buf), &result);
4541 			if (len < 0) {
4542 				l2cap_send_disconn_req(chan, ECONNRESET);
4543 				goto done;
4544 			}
4545 
4546 			if (!chan->hs_hcon) {
4547 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4548 							0);
4549 			} else {
4550 				if (l2cap_check_efs(chan)) {
4551 					amp_create_logical_link(chan);
4552 					chan->ident = cmd->ident;
4553 				}
4554 			}
4555 		}
4556 		goto done;
4557 
4558 	case L2CAP_CONF_UNACCEPT:
4559 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4560 			char req[64];
4561 
4562 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4563 				l2cap_send_disconn_req(chan, ECONNRESET);
4564 				goto done;
4565 			}
4566 
4567 			/* throw out any old stored conf requests */
4568 			result = L2CAP_CONF_SUCCESS;
4569 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4570 						   req, sizeof(req), &result);
4571 			if (len < 0) {
4572 				l2cap_send_disconn_req(chan, ECONNRESET);
4573 				goto done;
4574 			}
4575 
4576 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4577 				       L2CAP_CONF_REQ, len, req);
4578 			chan->num_conf_req++;
4579 			if (result != L2CAP_CONF_SUCCESS)
4580 				goto done;
4581 			break;
4582 		}
4583 		fallthrough;
4584 
4585 	default:
4586 		l2cap_chan_set_err(chan, ECONNRESET);
4587 
4588 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4589 		l2cap_send_disconn_req(chan, ECONNRESET);
4590 		goto done;
4591 	}
4592 
4593 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4594 		goto done;
4595 
4596 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4597 
4598 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4599 		set_default_fcs(chan);
4600 
4601 		if (chan->mode == L2CAP_MODE_ERTM ||
4602 		    chan->mode == L2CAP_MODE_STREAMING)
4603 			err = l2cap_ertm_init(chan);
4604 
4605 		if (err < 0)
4606 			l2cap_send_disconn_req(chan, -err);
4607 		else
4608 			l2cap_chan_ready(chan);
4609 	}
4610 
4611 done:
4612 	l2cap_chan_unlock(chan);
4613 	l2cap_chan_put(chan);
4614 	return err;
4615 }
4616 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4617 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4618 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4619 				       u8 *data)
4620 {
4621 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4622 	struct l2cap_disconn_rsp rsp;
4623 	u16 dcid, scid;
4624 	struct l2cap_chan *chan;
4625 
4626 	if (cmd_len != sizeof(*req))
4627 		return -EPROTO;
4628 
4629 	scid = __le16_to_cpu(req->scid);
4630 	dcid = __le16_to_cpu(req->dcid);
4631 
4632 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4633 
4634 	mutex_lock(&conn->chan_lock);
4635 
4636 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4637 	if (!chan) {
4638 		mutex_unlock(&conn->chan_lock);
4639 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4640 		return 0;
4641 	}
4642 
4643 	l2cap_chan_hold(chan);
4644 	l2cap_chan_lock(chan);
4645 
4646 	rsp.dcid = cpu_to_le16(chan->scid);
4647 	rsp.scid = cpu_to_le16(chan->dcid);
4648 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4649 
4650 	chan->ops->set_shutdown(chan);
4651 
4652 	l2cap_chan_del(chan, ECONNRESET);
4653 
4654 	chan->ops->close(chan);
4655 
4656 	l2cap_chan_unlock(chan);
4657 	l2cap_chan_put(chan);
4658 
4659 	mutex_unlock(&conn->chan_lock);
4660 
4661 	return 0;
4662 }
4663 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4664 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4665 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4666 				       u8 *data)
4667 {
4668 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4669 	u16 dcid, scid;
4670 	struct l2cap_chan *chan;
4671 
4672 	if (cmd_len != sizeof(*rsp))
4673 		return -EPROTO;
4674 
4675 	scid = __le16_to_cpu(rsp->scid);
4676 	dcid = __le16_to_cpu(rsp->dcid);
4677 
4678 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4679 
4680 	mutex_lock(&conn->chan_lock);
4681 
4682 	chan = __l2cap_get_chan_by_scid(conn, scid);
4683 	if (!chan) {
4684 		mutex_unlock(&conn->chan_lock);
4685 		return 0;
4686 	}
4687 
4688 	l2cap_chan_hold(chan);
4689 	l2cap_chan_lock(chan);
4690 
4691 	if (chan->state != BT_DISCONN) {
4692 		l2cap_chan_unlock(chan);
4693 		l2cap_chan_put(chan);
4694 		mutex_unlock(&conn->chan_lock);
4695 		return 0;
4696 	}
4697 
4698 	l2cap_chan_del(chan, 0);
4699 
4700 	chan->ops->close(chan);
4701 
4702 	l2cap_chan_unlock(chan);
4703 	l2cap_chan_put(chan);
4704 
4705 	mutex_unlock(&conn->chan_lock);
4706 
4707 	return 0;
4708 }
4709 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4710 static inline int l2cap_information_req(struct l2cap_conn *conn,
4711 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4712 					u8 *data)
4713 {
4714 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4715 	u16 type;
4716 
4717 	if (cmd_len != sizeof(*req))
4718 		return -EPROTO;
4719 
4720 	type = __le16_to_cpu(req->type);
4721 
4722 	BT_DBG("type 0x%4.4x", type);
4723 
4724 	if (type == L2CAP_IT_FEAT_MASK) {
4725 		u8 buf[8];
4726 		u32 feat_mask = l2cap_feat_mask;
4727 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4728 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4729 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4730 		if (!disable_ertm)
4731 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4732 				| L2CAP_FEAT_FCS;
4733 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4734 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4735 				| L2CAP_FEAT_EXT_WINDOW;
4736 
4737 		put_unaligned_le32(feat_mask, rsp->data);
4738 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4739 			       buf);
4740 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4741 		u8 buf[12];
4742 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4743 
4744 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4745 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4746 		rsp->data[0] = conn->local_fixed_chan;
4747 		memset(rsp->data + 1, 0, 7);
4748 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4749 			       buf);
4750 	} else {
4751 		struct l2cap_info_rsp rsp;
4752 		rsp.type   = cpu_to_le16(type);
4753 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4754 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4755 			       &rsp);
4756 	}
4757 
4758 	return 0;
4759 }
4760 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4761 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4762 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4763 					u8 *data)
4764 {
4765 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4766 	u16 type, result;
4767 
4768 	if (cmd_len < sizeof(*rsp))
4769 		return -EPROTO;
4770 
4771 	type   = __le16_to_cpu(rsp->type);
4772 	result = __le16_to_cpu(rsp->result);
4773 
4774 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4775 
4776 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4777 	if (cmd->ident != conn->info_ident ||
4778 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4779 		return 0;
4780 
4781 	cancel_delayed_work(&conn->info_timer);
4782 
4783 	if (result != L2CAP_IR_SUCCESS) {
4784 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4785 		conn->info_ident = 0;
4786 
4787 		l2cap_conn_start(conn);
4788 
4789 		return 0;
4790 	}
4791 
4792 	switch (type) {
4793 	case L2CAP_IT_FEAT_MASK:
4794 		conn->feat_mask = get_unaligned_le32(rsp->data);
4795 
4796 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4797 			struct l2cap_info_req req;
4798 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4799 
4800 			conn->info_ident = l2cap_get_ident(conn);
4801 
4802 			l2cap_send_cmd(conn, conn->info_ident,
4803 				       L2CAP_INFO_REQ, sizeof(req), &req);
4804 		} else {
4805 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4806 			conn->info_ident = 0;
4807 
4808 			l2cap_conn_start(conn);
4809 		}
4810 		break;
4811 
4812 	case L2CAP_IT_FIXED_CHAN:
4813 		conn->remote_fixed_chan = rsp->data[0];
4814 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4815 		conn->info_ident = 0;
4816 
4817 		l2cap_conn_start(conn);
4818 		break;
4819 	}
4820 
4821 	return 0;
4822 }
4823 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4824 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4825 				    struct l2cap_cmd_hdr *cmd,
4826 				    u16 cmd_len, void *data)
4827 {
4828 	struct l2cap_create_chan_req *req = data;
4829 	struct l2cap_create_chan_rsp rsp;
4830 	struct l2cap_chan *chan;
4831 	struct hci_dev *hdev;
4832 	u16 psm, scid;
4833 
4834 	if (cmd_len != sizeof(*req))
4835 		return -EPROTO;
4836 
4837 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4838 		return -EINVAL;
4839 
4840 	psm = le16_to_cpu(req->psm);
4841 	scid = le16_to_cpu(req->scid);
4842 
4843 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4844 
4845 	/* For controller id 0 make BR/EDR connection */
4846 	if (req->amp_id == AMP_ID_BREDR) {
4847 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4848 			      req->amp_id);
4849 		return 0;
4850 	}
4851 
4852 	/* Validate AMP controller id */
4853 	hdev = hci_dev_get(req->amp_id);
4854 	if (!hdev)
4855 		goto error;
4856 
4857 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4858 		hci_dev_put(hdev);
4859 		goto error;
4860 	}
4861 
4862 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4863 			     req->amp_id);
4864 	if (chan) {
4865 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4866 		struct hci_conn *hs_hcon;
4867 
4868 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4869 						  &conn->hcon->dst);
4870 		if (!hs_hcon) {
4871 			hci_dev_put(hdev);
4872 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4873 					       chan->dcid);
4874 			return 0;
4875 		}
4876 
4877 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4878 
4879 		mgr->bredr_chan = chan;
4880 		chan->hs_hcon = hs_hcon;
4881 		chan->fcs = L2CAP_FCS_NONE;
4882 		conn->mtu = hdev->block_mtu;
4883 	}
4884 
4885 	hci_dev_put(hdev);
4886 
4887 	return 0;
4888 
4889 error:
4890 	rsp.dcid = 0;
4891 	rsp.scid = cpu_to_le16(scid);
4892 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4893 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4894 
4895 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4896 		       sizeof(rsp), &rsp);
4897 
4898 	return 0;
4899 }
4900 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4901 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4902 {
4903 	struct l2cap_move_chan_req req;
4904 	u8 ident;
4905 
4906 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4907 
4908 	ident = l2cap_get_ident(chan->conn);
4909 	chan->ident = ident;
4910 
4911 	req.icid = cpu_to_le16(chan->scid);
4912 	req.dest_amp_id = dest_amp_id;
4913 
4914 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4915 		       &req);
4916 
4917 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4918 }
4919 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4920 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4921 {
4922 	struct l2cap_move_chan_rsp rsp;
4923 
4924 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4925 
4926 	rsp.icid = cpu_to_le16(chan->dcid);
4927 	rsp.result = cpu_to_le16(result);
4928 
4929 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4930 		       sizeof(rsp), &rsp);
4931 }
4932 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4933 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4934 {
4935 	struct l2cap_move_chan_cfm cfm;
4936 
4937 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4938 
4939 	chan->ident = l2cap_get_ident(chan->conn);
4940 
4941 	cfm.icid = cpu_to_le16(chan->scid);
4942 	cfm.result = cpu_to_le16(result);
4943 
4944 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4945 		       sizeof(cfm), &cfm);
4946 
4947 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4948 }
4949 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4950 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4951 {
4952 	struct l2cap_move_chan_cfm cfm;
4953 
4954 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4955 
4956 	cfm.icid = cpu_to_le16(icid);
4957 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4958 
4959 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4960 		       sizeof(cfm), &cfm);
4961 }
4962 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4963 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4964 					 u16 icid)
4965 {
4966 	struct l2cap_move_chan_cfm_rsp rsp;
4967 
4968 	BT_DBG("icid 0x%4.4x", icid);
4969 
4970 	rsp.icid = cpu_to_le16(icid);
4971 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4972 }
4973 
__release_logical_link(struct l2cap_chan * chan)4974 static void __release_logical_link(struct l2cap_chan *chan)
4975 {
4976 	chan->hs_hchan = NULL;
4977 	chan->hs_hcon = NULL;
4978 
4979 	/* Placeholder - release the logical link */
4980 }
4981 
l2cap_logical_fail(struct l2cap_chan * chan)4982 static void l2cap_logical_fail(struct l2cap_chan *chan)
4983 {
4984 	/* Logical link setup failed */
4985 	if (chan->state != BT_CONNECTED) {
4986 		/* Create channel failure, disconnect */
4987 		l2cap_send_disconn_req(chan, ECONNRESET);
4988 		return;
4989 	}
4990 
4991 	switch (chan->move_role) {
4992 	case L2CAP_MOVE_ROLE_RESPONDER:
4993 		l2cap_move_done(chan);
4994 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4995 		break;
4996 	case L2CAP_MOVE_ROLE_INITIATOR:
4997 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4998 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4999 			/* Remote has only sent pending or
5000 			 * success responses, clean up
5001 			 */
5002 			l2cap_move_done(chan);
5003 		}
5004 
5005 		/* Other amp move states imply that the move
5006 		 * has already aborted
5007 		 */
5008 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5009 		break;
5010 	}
5011 }
5012 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5013 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5014 					struct hci_chan *hchan)
5015 {
5016 	struct l2cap_conf_rsp rsp;
5017 
5018 	chan->hs_hchan = hchan;
5019 	chan->hs_hcon->l2cap_data = chan->conn;
5020 
5021 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5022 
5023 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5024 		int err;
5025 
5026 		set_default_fcs(chan);
5027 
5028 		err = l2cap_ertm_init(chan);
5029 		if (err < 0)
5030 			l2cap_send_disconn_req(chan, -err);
5031 		else
5032 			l2cap_chan_ready(chan);
5033 	}
5034 }
5035 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5036 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5037 				      struct hci_chan *hchan)
5038 {
5039 	chan->hs_hcon = hchan->conn;
5040 	chan->hs_hcon->l2cap_data = chan->conn;
5041 
5042 	BT_DBG("move_state %d", chan->move_state);
5043 
5044 	switch (chan->move_state) {
5045 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5046 		/* Move confirm will be sent after a success
5047 		 * response is received
5048 		 */
5049 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5050 		break;
5051 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5052 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5053 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5054 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5055 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5056 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5057 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5058 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5059 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5060 		}
5061 		break;
5062 	default:
5063 		/* Move was not in expected state, free the channel */
5064 		__release_logical_link(chan);
5065 
5066 		chan->move_state = L2CAP_MOVE_STABLE;
5067 	}
5068 }
5069 
5070 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5071 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5072 		       u8 status)
5073 {
5074 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5075 
5076 	if (status) {
5077 		l2cap_logical_fail(chan);
5078 		__release_logical_link(chan);
5079 		return;
5080 	}
5081 
5082 	if (chan->state != BT_CONNECTED) {
5083 		/* Ignore logical link if channel is on BR/EDR */
5084 		if (chan->local_amp_id != AMP_ID_BREDR)
5085 			l2cap_logical_finish_create(chan, hchan);
5086 	} else {
5087 		l2cap_logical_finish_move(chan, hchan);
5088 	}
5089 }
5090 
l2cap_move_start(struct l2cap_chan * chan)5091 void l2cap_move_start(struct l2cap_chan *chan)
5092 {
5093 	BT_DBG("chan %p", chan);
5094 
5095 	if (chan->local_amp_id == AMP_ID_BREDR) {
5096 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5097 			return;
5098 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5099 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5100 		/* Placeholder - start physical link setup */
5101 	} else {
5102 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5103 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5104 		chan->move_id = 0;
5105 		l2cap_move_setup(chan);
5106 		l2cap_send_move_chan_req(chan, 0);
5107 	}
5108 }
5109 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5110 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5111 			    u8 local_amp_id, u8 remote_amp_id)
5112 {
5113 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5114 	       local_amp_id, remote_amp_id);
5115 
5116 	chan->fcs = L2CAP_FCS_NONE;
5117 
5118 	/* Outgoing channel on AMP */
5119 	if (chan->state == BT_CONNECT) {
5120 		if (result == L2CAP_CR_SUCCESS) {
5121 			chan->local_amp_id = local_amp_id;
5122 			l2cap_send_create_chan_req(chan, remote_amp_id);
5123 		} else {
5124 			/* Revert to BR/EDR connect */
5125 			l2cap_send_conn_req(chan);
5126 		}
5127 
5128 		return;
5129 	}
5130 
5131 	/* Incoming channel on AMP */
5132 	if (__l2cap_no_conn_pending(chan)) {
5133 		struct l2cap_conn_rsp rsp;
5134 		char buf[128];
5135 		rsp.scid = cpu_to_le16(chan->dcid);
5136 		rsp.dcid = cpu_to_le16(chan->scid);
5137 
5138 		if (result == L2CAP_CR_SUCCESS) {
5139 			/* Send successful response */
5140 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5141 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5142 		} else {
5143 			/* Send negative response */
5144 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5145 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5146 		}
5147 
5148 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5149 			       sizeof(rsp), &rsp);
5150 
5151 		if (result == L2CAP_CR_SUCCESS) {
5152 			l2cap_state_change(chan, BT_CONFIG);
5153 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5154 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5155 				       L2CAP_CONF_REQ,
5156 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5157 			chan->num_conf_req++;
5158 		}
5159 	}
5160 }
5161 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5162 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5163 				   u8 remote_amp_id)
5164 {
5165 	l2cap_move_setup(chan);
5166 	chan->move_id = local_amp_id;
5167 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5168 
5169 	l2cap_send_move_chan_req(chan, remote_amp_id);
5170 }
5171 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5172 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5173 {
5174 	struct hci_chan *hchan = NULL;
5175 
5176 	/* Placeholder - get hci_chan for logical link */
5177 
5178 	if (hchan) {
5179 		if (hchan->state == BT_CONNECTED) {
5180 			/* Logical link is ready to go */
5181 			chan->hs_hcon = hchan->conn;
5182 			chan->hs_hcon->l2cap_data = chan->conn;
5183 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5184 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5185 
5186 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5187 		} else {
5188 			/* Wait for logical link to be ready */
5189 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5190 		}
5191 	} else {
5192 		/* Logical link not available */
5193 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5194 	}
5195 }
5196 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5197 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5198 {
5199 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5200 		u8 rsp_result;
5201 		if (result == -EINVAL)
5202 			rsp_result = L2CAP_MR_BAD_ID;
5203 		else
5204 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5205 
5206 		l2cap_send_move_chan_rsp(chan, rsp_result);
5207 	}
5208 
5209 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5210 	chan->move_state = L2CAP_MOVE_STABLE;
5211 
5212 	/* Restart data transmission */
5213 	l2cap_ertm_send(chan);
5214 }
5215 
5216 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5217 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5218 {
5219 	u8 local_amp_id = chan->local_amp_id;
5220 	u8 remote_amp_id = chan->remote_amp_id;
5221 
5222 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5223 	       chan, result, local_amp_id, remote_amp_id);
5224 
5225 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5226 		return;
5227 
5228 	if (chan->state != BT_CONNECTED) {
5229 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5230 	} else if (result != L2CAP_MR_SUCCESS) {
5231 		l2cap_do_move_cancel(chan, result);
5232 	} else {
5233 		switch (chan->move_role) {
5234 		case L2CAP_MOVE_ROLE_INITIATOR:
5235 			l2cap_do_move_initiate(chan, local_amp_id,
5236 					       remote_amp_id);
5237 			break;
5238 		case L2CAP_MOVE_ROLE_RESPONDER:
5239 			l2cap_do_move_respond(chan, result);
5240 			break;
5241 		default:
5242 			l2cap_do_move_cancel(chan, result);
5243 			break;
5244 		}
5245 	}
5246 }
5247 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5248 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5249 					 struct l2cap_cmd_hdr *cmd,
5250 					 u16 cmd_len, void *data)
5251 {
5252 	struct l2cap_move_chan_req *req = data;
5253 	struct l2cap_move_chan_rsp rsp;
5254 	struct l2cap_chan *chan;
5255 	u16 icid = 0;
5256 	u16 result = L2CAP_MR_NOT_ALLOWED;
5257 
5258 	if (cmd_len != sizeof(*req))
5259 		return -EPROTO;
5260 
5261 	icid = le16_to_cpu(req->icid);
5262 
5263 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5264 
5265 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5266 		return -EINVAL;
5267 
5268 	chan = l2cap_get_chan_by_dcid(conn, icid);
5269 	if (!chan) {
5270 		rsp.icid = cpu_to_le16(icid);
5271 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5272 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5273 			       sizeof(rsp), &rsp);
5274 		return 0;
5275 	}
5276 
5277 	chan->ident = cmd->ident;
5278 
5279 	if (chan->scid < L2CAP_CID_DYN_START ||
5280 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5281 	    (chan->mode != L2CAP_MODE_ERTM &&
5282 	     chan->mode != L2CAP_MODE_STREAMING)) {
5283 		result = L2CAP_MR_NOT_ALLOWED;
5284 		goto send_move_response;
5285 	}
5286 
5287 	if (chan->local_amp_id == req->dest_amp_id) {
5288 		result = L2CAP_MR_SAME_ID;
5289 		goto send_move_response;
5290 	}
5291 
5292 	if (req->dest_amp_id != AMP_ID_BREDR) {
5293 		struct hci_dev *hdev;
5294 		hdev = hci_dev_get(req->dest_amp_id);
5295 		if (!hdev || hdev->dev_type != HCI_AMP ||
5296 		    !test_bit(HCI_UP, &hdev->flags)) {
5297 			if (hdev)
5298 				hci_dev_put(hdev);
5299 
5300 			result = L2CAP_MR_BAD_ID;
5301 			goto send_move_response;
5302 		}
5303 		hci_dev_put(hdev);
5304 	}
5305 
5306 	/* Detect a move collision.  Only send a collision response
5307 	 * if this side has "lost", otherwise proceed with the move.
5308 	 * The winner has the larger bd_addr.
5309 	 */
5310 	if ((__chan_is_moving(chan) ||
5311 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5312 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5313 		result = L2CAP_MR_COLLISION;
5314 		goto send_move_response;
5315 	}
5316 
5317 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5318 	l2cap_move_setup(chan);
5319 	chan->move_id = req->dest_amp_id;
5320 
5321 	if (req->dest_amp_id == AMP_ID_BREDR) {
5322 		/* Moving to BR/EDR */
5323 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5324 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5325 			result = L2CAP_MR_PEND;
5326 		} else {
5327 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5328 			result = L2CAP_MR_SUCCESS;
5329 		}
5330 	} else {
5331 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5332 		/* Placeholder - uncomment when amp functions are available */
5333 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5334 		result = L2CAP_MR_PEND;
5335 	}
5336 
5337 send_move_response:
5338 	l2cap_send_move_chan_rsp(chan, result);
5339 
5340 	l2cap_chan_unlock(chan);
5341 	l2cap_chan_put(chan);
5342 
5343 	return 0;
5344 }
5345 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5346 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5347 {
5348 	struct l2cap_chan *chan;
5349 	struct hci_chan *hchan = NULL;
5350 
5351 	chan = l2cap_get_chan_by_scid(conn, icid);
5352 	if (!chan) {
5353 		l2cap_send_move_chan_cfm_icid(conn, icid);
5354 		return;
5355 	}
5356 
5357 	__clear_chan_timer(chan);
5358 	if (result == L2CAP_MR_PEND)
5359 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5360 
5361 	switch (chan->move_state) {
5362 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5363 		/* Move confirm will be sent when logical link
5364 		 * is complete.
5365 		 */
5366 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5367 		break;
5368 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5369 		if (result == L2CAP_MR_PEND) {
5370 			break;
5371 		} else if (test_bit(CONN_LOCAL_BUSY,
5372 				    &chan->conn_state)) {
5373 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5374 		} else {
5375 			/* Logical link is up or moving to BR/EDR,
5376 			 * proceed with move
5377 			 */
5378 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5379 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5380 		}
5381 		break;
5382 	case L2CAP_MOVE_WAIT_RSP:
5383 		/* Moving to AMP */
5384 		if (result == L2CAP_MR_SUCCESS) {
5385 			/* Remote is ready, send confirm immediately
5386 			 * after logical link is ready
5387 			 */
5388 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5389 		} else {
5390 			/* Both logical link and move success
5391 			 * are required to confirm
5392 			 */
5393 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5394 		}
5395 
5396 		/* Placeholder - get hci_chan for logical link */
5397 		if (!hchan) {
5398 			/* Logical link not available */
5399 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5400 			break;
5401 		}
5402 
5403 		/* If the logical link is not yet connected, do not
5404 		 * send confirmation.
5405 		 */
5406 		if (hchan->state != BT_CONNECTED)
5407 			break;
5408 
5409 		/* Logical link is already ready to go */
5410 
5411 		chan->hs_hcon = hchan->conn;
5412 		chan->hs_hcon->l2cap_data = chan->conn;
5413 
5414 		if (result == L2CAP_MR_SUCCESS) {
5415 			/* Can confirm now */
5416 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5417 		} else {
5418 			/* Now only need move success
5419 			 * to confirm
5420 			 */
5421 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5422 		}
5423 
5424 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5425 		break;
5426 	default:
5427 		/* Any other amp move state means the move failed. */
5428 		chan->move_id = chan->local_amp_id;
5429 		l2cap_move_done(chan);
5430 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5431 	}
5432 
5433 	l2cap_chan_unlock(chan);
5434 	l2cap_chan_put(chan);
5435 }
5436 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5437 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5438 			    u16 result)
5439 {
5440 	struct l2cap_chan *chan;
5441 
5442 	chan = l2cap_get_chan_by_ident(conn, ident);
5443 	if (!chan) {
5444 		/* Could not locate channel, icid is best guess */
5445 		l2cap_send_move_chan_cfm_icid(conn, icid);
5446 		return;
5447 	}
5448 
5449 	__clear_chan_timer(chan);
5450 
5451 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5452 		if (result == L2CAP_MR_COLLISION) {
5453 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5454 		} else {
5455 			/* Cleanup - cancel move */
5456 			chan->move_id = chan->local_amp_id;
5457 			l2cap_move_done(chan);
5458 		}
5459 	}
5460 
5461 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5462 
5463 	l2cap_chan_unlock(chan);
5464 	l2cap_chan_put(chan);
5465 }
5466 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5467 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5468 				  struct l2cap_cmd_hdr *cmd,
5469 				  u16 cmd_len, void *data)
5470 {
5471 	struct l2cap_move_chan_rsp *rsp = data;
5472 	u16 icid, result;
5473 
5474 	if (cmd_len != sizeof(*rsp))
5475 		return -EPROTO;
5476 
5477 	icid = le16_to_cpu(rsp->icid);
5478 	result = le16_to_cpu(rsp->result);
5479 
5480 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5481 
5482 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5483 		l2cap_move_continue(conn, icid, result);
5484 	else
5485 		l2cap_move_fail(conn, cmd->ident, icid, result);
5486 
5487 	return 0;
5488 }
5489 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5490 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5491 				      struct l2cap_cmd_hdr *cmd,
5492 				      u16 cmd_len, void *data)
5493 {
5494 	struct l2cap_move_chan_cfm *cfm = data;
5495 	struct l2cap_chan *chan;
5496 	u16 icid, result;
5497 
5498 	if (cmd_len != sizeof(*cfm))
5499 		return -EPROTO;
5500 
5501 	icid = le16_to_cpu(cfm->icid);
5502 	result = le16_to_cpu(cfm->result);
5503 
5504 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5505 
5506 	chan = l2cap_get_chan_by_dcid(conn, icid);
5507 	if (!chan) {
5508 		/* Spec requires a response even if the icid was not found */
5509 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5510 		return 0;
5511 	}
5512 
5513 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5514 		if (result == L2CAP_MC_CONFIRMED) {
5515 			chan->local_amp_id = chan->move_id;
5516 			if (chan->local_amp_id == AMP_ID_BREDR)
5517 				__release_logical_link(chan);
5518 		} else {
5519 			chan->move_id = chan->local_amp_id;
5520 		}
5521 
5522 		l2cap_move_done(chan);
5523 	}
5524 
5525 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5526 
5527 	l2cap_chan_unlock(chan);
5528 	l2cap_chan_put(chan);
5529 
5530 	return 0;
5531 }
5532 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5533 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5534 						 struct l2cap_cmd_hdr *cmd,
5535 						 u16 cmd_len, void *data)
5536 {
5537 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5538 	struct l2cap_chan *chan;
5539 	u16 icid;
5540 
5541 	if (cmd_len != sizeof(*rsp))
5542 		return -EPROTO;
5543 
5544 	icid = le16_to_cpu(rsp->icid);
5545 
5546 	BT_DBG("icid 0x%4.4x", icid);
5547 
5548 	chan = l2cap_get_chan_by_scid(conn, icid);
5549 	if (!chan)
5550 		return 0;
5551 
5552 	__clear_chan_timer(chan);
5553 
5554 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5555 		chan->local_amp_id = chan->move_id;
5556 
5557 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5558 			__release_logical_link(chan);
5559 
5560 		l2cap_move_done(chan);
5561 	}
5562 
5563 	l2cap_chan_unlock(chan);
5564 	l2cap_chan_put(chan);
5565 
5566 	return 0;
5567 }
5568 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5569 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5570 					      struct l2cap_cmd_hdr *cmd,
5571 					      u16 cmd_len, u8 *data)
5572 {
5573 	struct hci_conn *hcon = conn->hcon;
5574 	struct l2cap_conn_param_update_req *req;
5575 	struct l2cap_conn_param_update_rsp rsp;
5576 	u16 min, max, latency, to_multiplier;
5577 	int err;
5578 
5579 	if (hcon->role != HCI_ROLE_MASTER)
5580 		return -EINVAL;
5581 
5582 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5583 		return -EPROTO;
5584 
5585 	req = (struct l2cap_conn_param_update_req *) data;
5586 	min		= __le16_to_cpu(req->min);
5587 	max		= __le16_to_cpu(req->max);
5588 	latency		= __le16_to_cpu(req->latency);
5589 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5590 
5591 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5592 	       min, max, latency, to_multiplier);
5593 
5594 	memset(&rsp, 0, sizeof(rsp));
5595 
5596 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5597 	if (err)
5598 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5599 	else
5600 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5601 
5602 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5603 		       sizeof(rsp), &rsp);
5604 
5605 	if (!err) {
5606 		u8 store_hint;
5607 
5608 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5609 						to_multiplier);
5610 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5611 				    store_hint, min, max, latency,
5612 				    to_multiplier);
5613 
5614 	}
5615 
5616 	return 0;
5617 }
5618 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5619 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5620 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5621 				u8 *data)
5622 {
5623 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5624 	struct hci_conn *hcon = conn->hcon;
5625 	u16 dcid, mtu, mps, credits, result;
5626 	struct l2cap_chan *chan;
5627 	int err, sec_level;
5628 
5629 	if (cmd_len < sizeof(*rsp))
5630 		return -EPROTO;
5631 
5632 	dcid    = __le16_to_cpu(rsp->dcid);
5633 	mtu     = __le16_to_cpu(rsp->mtu);
5634 	mps     = __le16_to_cpu(rsp->mps);
5635 	credits = __le16_to_cpu(rsp->credits);
5636 	result  = __le16_to_cpu(rsp->result);
5637 
5638 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5639 					   dcid < L2CAP_CID_DYN_START ||
5640 					   dcid > L2CAP_CID_LE_DYN_END))
5641 		return -EPROTO;
5642 
5643 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5644 	       dcid, mtu, mps, credits, result);
5645 
5646 	mutex_lock(&conn->chan_lock);
5647 
5648 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5649 	if (!chan) {
5650 		err = -EBADSLT;
5651 		goto unlock;
5652 	}
5653 
5654 	err = 0;
5655 
5656 	l2cap_chan_lock(chan);
5657 
5658 	switch (result) {
5659 	case L2CAP_CR_LE_SUCCESS:
5660 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5661 			err = -EBADSLT;
5662 			break;
5663 		}
5664 
5665 		chan->ident = 0;
5666 		chan->dcid = dcid;
5667 		chan->omtu = mtu;
5668 		chan->remote_mps = mps;
5669 		chan->tx_credits = credits;
5670 		l2cap_chan_ready(chan);
5671 		break;
5672 
5673 	case L2CAP_CR_LE_AUTHENTICATION:
5674 	case L2CAP_CR_LE_ENCRYPTION:
5675 		/* If we already have MITM protection we can't do
5676 		 * anything.
5677 		 */
5678 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5679 			l2cap_chan_del(chan, ECONNREFUSED);
5680 			break;
5681 		}
5682 
5683 		sec_level = hcon->sec_level + 1;
5684 		if (chan->sec_level < sec_level)
5685 			chan->sec_level = sec_level;
5686 
5687 		/* We'll need to send a new Connect Request */
5688 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5689 
5690 		smp_conn_security(hcon, chan->sec_level);
5691 		break;
5692 
5693 	default:
5694 		l2cap_chan_del(chan, ECONNREFUSED);
5695 		break;
5696 	}
5697 
5698 	l2cap_chan_unlock(chan);
5699 
5700 unlock:
5701 	mutex_unlock(&conn->chan_lock);
5702 
5703 	return err;
5704 }
5705 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5706 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5707 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5708 				      u8 *data)
5709 {
5710 	int err = 0;
5711 
5712 	switch (cmd->code) {
5713 	case L2CAP_COMMAND_REJ:
5714 		l2cap_command_rej(conn, cmd, cmd_len, data);
5715 		break;
5716 
5717 	case L2CAP_CONN_REQ:
5718 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5719 		break;
5720 
5721 	case L2CAP_CONN_RSP:
5722 	case L2CAP_CREATE_CHAN_RSP:
5723 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5724 		break;
5725 
5726 	case L2CAP_CONF_REQ:
5727 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5728 		break;
5729 
5730 	case L2CAP_CONF_RSP:
5731 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5732 		break;
5733 
5734 	case L2CAP_DISCONN_REQ:
5735 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5736 		break;
5737 
5738 	case L2CAP_DISCONN_RSP:
5739 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5740 		break;
5741 
5742 	case L2CAP_ECHO_REQ:
5743 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5744 		break;
5745 
5746 	case L2CAP_ECHO_RSP:
5747 		break;
5748 
5749 	case L2CAP_INFO_REQ:
5750 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5751 		break;
5752 
5753 	case L2CAP_INFO_RSP:
5754 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5755 		break;
5756 
5757 	case L2CAP_CREATE_CHAN_REQ:
5758 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5759 		break;
5760 
5761 	case L2CAP_MOVE_CHAN_REQ:
5762 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5763 		break;
5764 
5765 	case L2CAP_MOVE_CHAN_RSP:
5766 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5767 		break;
5768 
5769 	case L2CAP_MOVE_CHAN_CFM:
5770 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5771 		break;
5772 
5773 	case L2CAP_MOVE_CHAN_CFM_RSP:
5774 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5775 		break;
5776 
5777 	default:
5778 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5779 		err = -EINVAL;
5780 		break;
5781 	}
5782 
5783 	return err;
5784 }
5785 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5786 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5787 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5788 				u8 *data)
5789 {
5790 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5791 	struct l2cap_le_conn_rsp rsp;
5792 	struct l2cap_chan *chan, *pchan;
5793 	u16 dcid, scid, credits, mtu, mps;
5794 	__le16 psm;
5795 	u8 result;
5796 
5797 	if (cmd_len != sizeof(*req))
5798 		return -EPROTO;
5799 
5800 	scid = __le16_to_cpu(req->scid);
5801 	mtu  = __le16_to_cpu(req->mtu);
5802 	mps  = __le16_to_cpu(req->mps);
5803 	psm  = req->psm;
5804 	dcid = 0;
5805 	credits = 0;
5806 
5807 	if (mtu < 23 || mps < 23)
5808 		return -EPROTO;
5809 
5810 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5811 	       scid, mtu, mps);
5812 
5813 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5814 	 * page 1059:
5815 	 *
5816 	 * Valid range: 0x0001-0x00ff
5817 	 *
5818 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5819 	 */
5820 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5821 		result = L2CAP_CR_LE_BAD_PSM;
5822 		chan = NULL;
5823 		goto response;
5824 	}
5825 
5826 	/* Check if we have socket listening on psm */
5827 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5828 					 &conn->hcon->dst, LE_LINK);
5829 	if (!pchan) {
5830 		result = L2CAP_CR_LE_BAD_PSM;
5831 		chan = NULL;
5832 		goto response;
5833 	}
5834 
5835 	mutex_lock(&conn->chan_lock);
5836 	l2cap_chan_lock(pchan);
5837 
5838 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5839 				     SMP_ALLOW_STK)) {
5840 		result = L2CAP_CR_LE_AUTHENTICATION;
5841 		chan = NULL;
5842 		goto response_unlock;
5843 	}
5844 
5845 	/* Check for valid dynamic CID range */
5846 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5847 		result = L2CAP_CR_LE_INVALID_SCID;
5848 		chan = NULL;
5849 		goto response_unlock;
5850 	}
5851 
5852 	/* Check if we already have channel with that dcid */
5853 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5854 		result = L2CAP_CR_LE_SCID_IN_USE;
5855 		chan = NULL;
5856 		goto response_unlock;
5857 	}
5858 
5859 	chan = pchan->ops->new_connection(pchan);
5860 	if (!chan) {
5861 		result = L2CAP_CR_LE_NO_MEM;
5862 		goto response_unlock;
5863 	}
5864 
5865 	bacpy(&chan->src, &conn->hcon->src);
5866 	bacpy(&chan->dst, &conn->hcon->dst);
5867 	chan->src_type = bdaddr_src_type(conn->hcon);
5868 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5869 	chan->psm  = psm;
5870 	chan->dcid = scid;
5871 	chan->omtu = mtu;
5872 	chan->remote_mps = mps;
5873 
5874 	__l2cap_chan_add(conn, chan);
5875 
5876 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5877 
5878 	dcid = chan->scid;
5879 	credits = chan->rx_credits;
5880 
5881 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5882 
5883 	chan->ident = cmd->ident;
5884 
5885 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5886 		l2cap_state_change(chan, BT_CONNECT2);
5887 		/* The following result value is actually not defined
5888 		 * for LE CoC but we use it to let the function know
5889 		 * that it should bail out after doing its cleanup
5890 		 * instead of sending a response.
5891 		 */
5892 		result = L2CAP_CR_PEND;
5893 		chan->ops->defer(chan);
5894 	} else {
5895 		l2cap_chan_ready(chan);
5896 		result = L2CAP_CR_LE_SUCCESS;
5897 	}
5898 
5899 response_unlock:
5900 	l2cap_chan_unlock(pchan);
5901 	mutex_unlock(&conn->chan_lock);
5902 	l2cap_chan_put(pchan);
5903 
5904 	if (result == L2CAP_CR_PEND)
5905 		return 0;
5906 
5907 response:
5908 	if (chan) {
5909 		rsp.mtu = cpu_to_le16(chan->imtu);
5910 		rsp.mps = cpu_to_le16(chan->mps);
5911 	} else {
5912 		rsp.mtu = 0;
5913 		rsp.mps = 0;
5914 	}
5915 
5916 	rsp.dcid    = cpu_to_le16(dcid);
5917 	rsp.credits = cpu_to_le16(credits);
5918 	rsp.result  = cpu_to_le16(result);
5919 
5920 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5921 
5922 	return 0;
5923 }
5924 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5925 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5926 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5927 				   u8 *data)
5928 {
5929 	struct l2cap_le_credits *pkt;
5930 	struct l2cap_chan *chan;
5931 	u16 cid, credits, max_credits;
5932 
5933 	if (cmd_len != sizeof(*pkt))
5934 		return -EPROTO;
5935 
5936 	pkt = (struct l2cap_le_credits *) data;
5937 	cid	= __le16_to_cpu(pkt->cid);
5938 	credits	= __le16_to_cpu(pkt->credits);
5939 
5940 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5941 
5942 	chan = l2cap_get_chan_by_dcid(conn, cid);
5943 	if (!chan)
5944 		return -EBADSLT;
5945 
5946 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5947 	if (credits > max_credits) {
5948 		BT_ERR("LE credits overflow");
5949 		l2cap_send_disconn_req(chan, ECONNRESET);
5950 
5951 		/* Return 0 so that we don't trigger an unnecessary
5952 		 * command reject packet.
5953 		 */
5954 		goto unlock;
5955 	}
5956 
5957 	chan->tx_credits += credits;
5958 
5959 	/* Resume sending */
5960 	l2cap_le_flowctl_send(chan);
5961 
5962 	if (chan->tx_credits)
5963 		chan->ops->resume(chan);
5964 
5965 unlock:
5966 	l2cap_chan_unlock(chan);
5967 	l2cap_chan_put(chan);
5968 
5969 	return 0;
5970 }
5971 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5972 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5973 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5974 				       u8 *data)
5975 {
5976 	struct l2cap_ecred_conn_req *req = (void *) data;
5977 	struct {
5978 		struct l2cap_ecred_conn_rsp rsp;
5979 		__le16 dcid[5];
5980 	} __packed pdu;
5981 	struct l2cap_chan *chan, *pchan;
5982 	u16 mtu, mps;
5983 	__le16 psm;
5984 	u8 result, len = 0;
5985 	int i, num_scid;
5986 	bool defer = false;
5987 
5988 	if (!enable_ecred)
5989 		return -EINVAL;
5990 
5991 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5992 		result = L2CAP_CR_LE_INVALID_PARAMS;
5993 		goto response;
5994 	}
5995 
5996 	mtu  = __le16_to_cpu(req->mtu);
5997 	mps  = __le16_to_cpu(req->mps);
5998 
5999 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6000 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6001 		goto response;
6002 	}
6003 
6004 	psm  = req->psm;
6005 
6006 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6007 	 * page 1059:
6008 	 *
6009 	 * Valid range: 0x0001-0x00ff
6010 	 *
6011 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6012 	 */
6013 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6014 		result = L2CAP_CR_LE_BAD_PSM;
6015 		goto response;
6016 	}
6017 
6018 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6019 
6020 	memset(&pdu, 0, sizeof(pdu));
6021 
6022 	/* Check if we have socket listening on psm */
6023 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6024 					 &conn->hcon->dst, LE_LINK);
6025 	if (!pchan) {
6026 		result = L2CAP_CR_LE_BAD_PSM;
6027 		goto response;
6028 	}
6029 
6030 	mutex_lock(&conn->chan_lock);
6031 	l2cap_chan_lock(pchan);
6032 
6033 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6034 				     SMP_ALLOW_STK)) {
6035 		result = L2CAP_CR_LE_AUTHENTICATION;
6036 		goto unlock;
6037 	}
6038 
6039 	result = L2CAP_CR_LE_SUCCESS;
6040 	cmd_len -= sizeof(*req);
6041 	num_scid = cmd_len / sizeof(u16);
6042 
6043 	for (i = 0; i < num_scid; i++) {
6044 		u16 scid = __le16_to_cpu(req->scid[i]);
6045 
6046 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6047 
6048 		pdu.dcid[i] = 0x0000;
6049 		len += sizeof(*pdu.dcid);
6050 
6051 		/* Check for valid dynamic CID range */
6052 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6053 			result = L2CAP_CR_LE_INVALID_SCID;
6054 			continue;
6055 		}
6056 
6057 		/* Check if we already have channel with that dcid */
6058 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6059 			result = L2CAP_CR_LE_SCID_IN_USE;
6060 			continue;
6061 		}
6062 
6063 		chan = pchan->ops->new_connection(pchan);
6064 		if (!chan) {
6065 			result = L2CAP_CR_LE_NO_MEM;
6066 			continue;
6067 		}
6068 
6069 		bacpy(&chan->src, &conn->hcon->src);
6070 		bacpy(&chan->dst, &conn->hcon->dst);
6071 		chan->src_type = bdaddr_src_type(conn->hcon);
6072 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6073 		chan->psm  = psm;
6074 		chan->dcid = scid;
6075 		chan->omtu = mtu;
6076 		chan->remote_mps = mps;
6077 
6078 		__l2cap_chan_add(conn, chan);
6079 
6080 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6081 
6082 		/* Init response */
6083 		if (!pdu.rsp.credits) {
6084 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6085 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6086 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6087 		}
6088 
6089 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6090 
6091 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6092 
6093 		chan->ident = cmd->ident;
6094 
6095 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6096 			l2cap_state_change(chan, BT_CONNECT2);
6097 			defer = true;
6098 			chan->ops->defer(chan);
6099 		} else {
6100 			l2cap_chan_ready(chan);
6101 		}
6102 	}
6103 
6104 unlock:
6105 	l2cap_chan_unlock(pchan);
6106 	mutex_unlock(&conn->chan_lock);
6107 	l2cap_chan_put(pchan);
6108 
6109 response:
6110 	pdu.rsp.result = cpu_to_le16(result);
6111 
6112 	if (defer)
6113 		return 0;
6114 
6115 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6116 		       sizeof(pdu.rsp) + len, &pdu);
6117 
6118 	return 0;
6119 }
6120 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6121 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6122 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6123 				       u8 *data)
6124 {
6125 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6126 	struct hci_conn *hcon = conn->hcon;
6127 	u16 mtu, mps, credits, result;
6128 	struct l2cap_chan *chan, *tmp;
6129 	int err = 0, sec_level;
6130 	int i = 0;
6131 
6132 	if (cmd_len < sizeof(*rsp))
6133 		return -EPROTO;
6134 
6135 	mtu     = __le16_to_cpu(rsp->mtu);
6136 	mps     = __le16_to_cpu(rsp->mps);
6137 	credits = __le16_to_cpu(rsp->credits);
6138 	result  = __le16_to_cpu(rsp->result);
6139 
6140 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6141 	       result);
6142 
6143 	mutex_lock(&conn->chan_lock);
6144 
6145 	cmd_len -= sizeof(*rsp);
6146 
6147 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6148 		u16 dcid;
6149 
6150 		if (chan->ident != cmd->ident ||
6151 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6152 		    chan->state == BT_CONNECTED)
6153 			continue;
6154 
6155 		l2cap_chan_lock(chan);
6156 
6157 		/* Check that there is a dcid for each pending channel */
6158 		if (cmd_len < sizeof(dcid)) {
6159 			l2cap_chan_del(chan, ECONNREFUSED);
6160 			l2cap_chan_unlock(chan);
6161 			continue;
6162 		}
6163 
6164 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6165 		cmd_len -= sizeof(u16);
6166 
6167 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6168 
6169 		/* Check if dcid is already in use */
6170 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6171 			/* If a device receives a
6172 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6173 			 * already-assigned Destination CID, then both the
6174 			 * original channel and the new channel shall be
6175 			 * immediately discarded and not used.
6176 			 */
6177 			l2cap_chan_del(chan, ECONNREFUSED);
6178 			l2cap_chan_unlock(chan);
6179 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6180 			l2cap_chan_lock(chan);
6181 			l2cap_chan_del(chan, ECONNRESET);
6182 			l2cap_chan_unlock(chan);
6183 			continue;
6184 		}
6185 
6186 		switch (result) {
6187 		case L2CAP_CR_LE_AUTHENTICATION:
6188 		case L2CAP_CR_LE_ENCRYPTION:
6189 			/* If we already have MITM protection we can't do
6190 			 * anything.
6191 			 */
6192 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6193 				l2cap_chan_del(chan, ECONNREFUSED);
6194 				break;
6195 			}
6196 
6197 			sec_level = hcon->sec_level + 1;
6198 			if (chan->sec_level < sec_level)
6199 				chan->sec_level = sec_level;
6200 
6201 			/* We'll need to send a new Connect Request */
6202 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6203 
6204 			smp_conn_security(hcon, chan->sec_level);
6205 			break;
6206 
6207 		case L2CAP_CR_LE_BAD_PSM:
6208 			l2cap_chan_del(chan, ECONNREFUSED);
6209 			break;
6210 
6211 		default:
6212 			/* If dcid was not set it means channels was refused */
6213 			if (!dcid) {
6214 				l2cap_chan_del(chan, ECONNREFUSED);
6215 				break;
6216 			}
6217 
6218 			chan->ident = 0;
6219 			chan->dcid = dcid;
6220 			chan->omtu = mtu;
6221 			chan->remote_mps = mps;
6222 			chan->tx_credits = credits;
6223 			l2cap_chan_ready(chan);
6224 			break;
6225 		}
6226 
6227 		l2cap_chan_unlock(chan);
6228 	}
6229 
6230 	mutex_unlock(&conn->chan_lock);
6231 
6232 	return err;
6233 }
6234 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6235 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6236 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6237 					 u8 *data)
6238 {
6239 	struct l2cap_ecred_reconf_req *req = (void *) data;
6240 	struct l2cap_ecred_reconf_rsp rsp;
6241 	u16 mtu, mps, result;
6242 	struct l2cap_chan *chan;
6243 	int i, num_scid;
6244 
6245 	if (!enable_ecred)
6246 		return -EINVAL;
6247 
6248 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6249 		result = L2CAP_CR_LE_INVALID_PARAMS;
6250 		goto respond;
6251 	}
6252 
6253 	mtu = __le16_to_cpu(req->mtu);
6254 	mps = __le16_to_cpu(req->mps);
6255 
6256 	BT_DBG("mtu %u mps %u", mtu, mps);
6257 
6258 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6259 		result = L2CAP_RECONF_INVALID_MTU;
6260 		goto respond;
6261 	}
6262 
6263 	if (mps < L2CAP_ECRED_MIN_MPS) {
6264 		result = L2CAP_RECONF_INVALID_MPS;
6265 		goto respond;
6266 	}
6267 
6268 	cmd_len -= sizeof(*req);
6269 	num_scid = cmd_len / sizeof(u16);
6270 	result = L2CAP_RECONF_SUCCESS;
6271 
6272 	for (i = 0; i < num_scid; i++) {
6273 		u16 scid;
6274 
6275 		scid = __le16_to_cpu(req->scid[i]);
6276 		if (!scid)
6277 			return -EPROTO;
6278 
6279 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6280 		if (!chan)
6281 			continue;
6282 
6283 		/* If the MTU value is decreased for any of the included
6284 		 * channels, then the receiver shall disconnect all
6285 		 * included channels.
6286 		 */
6287 		if (chan->omtu > mtu) {
6288 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6289 			       chan->omtu, mtu);
6290 			result = L2CAP_RECONF_INVALID_MTU;
6291 		}
6292 
6293 		chan->omtu = mtu;
6294 		chan->remote_mps = mps;
6295 	}
6296 
6297 respond:
6298 	rsp.result = cpu_to_le16(result);
6299 
6300 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6301 		       &rsp);
6302 
6303 	return 0;
6304 }
6305 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6306 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6307 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6308 					 u8 *data)
6309 {
6310 	struct l2cap_chan *chan, *tmp;
6311 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6312 	u16 result;
6313 
6314 	if (cmd_len < sizeof(*rsp))
6315 		return -EPROTO;
6316 
6317 	result = __le16_to_cpu(rsp->result);
6318 
6319 	BT_DBG("result 0x%4.4x", rsp->result);
6320 
6321 	if (!result)
6322 		return 0;
6323 
6324 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6325 		if (chan->ident != cmd->ident)
6326 			continue;
6327 
6328 		l2cap_chan_del(chan, ECONNRESET);
6329 	}
6330 
6331 	return 0;
6332 }
6333 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6334 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6335 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6336 				       u8 *data)
6337 {
6338 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6339 	struct l2cap_chan *chan;
6340 
6341 	if (cmd_len < sizeof(*rej))
6342 		return -EPROTO;
6343 
6344 	mutex_lock(&conn->chan_lock);
6345 
6346 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6347 	if (!chan)
6348 		goto done;
6349 
6350 	l2cap_chan_lock(chan);
6351 	l2cap_chan_del(chan, ECONNREFUSED);
6352 	l2cap_chan_unlock(chan);
6353 
6354 done:
6355 	mutex_unlock(&conn->chan_lock);
6356 	return 0;
6357 }
6358 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6359 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6360 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6361 				   u8 *data)
6362 {
6363 	int err = 0;
6364 
6365 	switch (cmd->code) {
6366 	case L2CAP_COMMAND_REJ:
6367 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6368 		break;
6369 
6370 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6371 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6372 		break;
6373 
6374 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6375 		break;
6376 
6377 	case L2CAP_LE_CONN_RSP:
6378 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6379 		break;
6380 
6381 	case L2CAP_LE_CONN_REQ:
6382 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6383 		break;
6384 
6385 	case L2CAP_LE_CREDITS:
6386 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6387 		break;
6388 
6389 	case L2CAP_ECRED_CONN_REQ:
6390 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6391 		break;
6392 
6393 	case L2CAP_ECRED_CONN_RSP:
6394 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6395 		break;
6396 
6397 	case L2CAP_ECRED_RECONF_REQ:
6398 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6399 		break;
6400 
6401 	case L2CAP_ECRED_RECONF_RSP:
6402 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6403 		break;
6404 
6405 	case L2CAP_DISCONN_REQ:
6406 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6407 		break;
6408 
6409 	case L2CAP_DISCONN_RSP:
6410 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6411 		break;
6412 
6413 	default:
6414 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6415 		err = -EINVAL;
6416 		break;
6417 	}
6418 
6419 	return err;
6420 }
6421 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6422 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6423 					struct sk_buff *skb)
6424 {
6425 	struct hci_conn *hcon = conn->hcon;
6426 	struct l2cap_cmd_hdr *cmd;
6427 	u16 len;
6428 	int err;
6429 
6430 	if (hcon->type != LE_LINK)
6431 		goto drop;
6432 
6433 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6434 		goto drop;
6435 
6436 	cmd = (void *) skb->data;
6437 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6438 
6439 	len = le16_to_cpu(cmd->len);
6440 
6441 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6442 
6443 	if (len != skb->len || !cmd->ident) {
6444 		BT_DBG("corrupted command");
6445 		goto drop;
6446 	}
6447 
6448 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6449 	if (err) {
6450 		struct l2cap_cmd_rej_unk rej;
6451 
6452 		BT_ERR("Wrong link type (%d)", err);
6453 
6454 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6455 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6456 			       sizeof(rej), &rej);
6457 	}
6458 
6459 drop:
6460 	kfree_skb(skb);
6461 }
6462 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6463 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6464 				     struct sk_buff *skb)
6465 {
6466 	struct hci_conn *hcon = conn->hcon;
6467 	struct l2cap_cmd_hdr *cmd;
6468 	int err;
6469 
6470 	l2cap_raw_recv(conn, skb);
6471 
6472 	if (hcon->type != ACL_LINK)
6473 		goto drop;
6474 
6475 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6476 		u16 len;
6477 
6478 		cmd = (void *) skb->data;
6479 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6480 
6481 		len = le16_to_cpu(cmd->len);
6482 
6483 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6484 		       cmd->ident);
6485 
6486 		if (len > skb->len || !cmd->ident) {
6487 			BT_DBG("corrupted command");
6488 			break;
6489 		}
6490 
6491 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6492 		if (err) {
6493 			struct l2cap_cmd_rej_unk rej;
6494 
6495 			BT_ERR("Wrong link type (%d)", err);
6496 
6497 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6498 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6499 				       sizeof(rej), &rej);
6500 		}
6501 
6502 		skb_pull(skb, len);
6503 	}
6504 
6505 drop:
6506 	kfree_skb(skb);
6507 }
6508 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6509 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6510 {
6511 	u16 our_fcs, rcv_fcs;
6512 	int hdr_size;
6513 
6514 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6515 		hdr_size = L2CAP_EXT_HDR_SIZE;
6516 	else
6517 		hdr_size = L2CAP_ENH_HDR_SIZE;
6518 
6519 	if (chan->fcs == L2CAP_FCS_CRC16) {
6520 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6521 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6522 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6523 
6524 		if (our_fcs != rcv_fcs)
6525 			return -EBADMSG;
6526 	}
6527 	return 0;
6528 }
6529 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6530 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6531 {
6532 	struct l2cap_ctrl control;
6533 
6534 	BT_DBG("chan %p", chan);
6535 
6536 	memset(&control, 0, sizeof(control));
6537 	control.sframe = 1;
6538 	control.final = 1;
6539 	control.reqseq = chan->buffer_seq;
6540 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6541 
6542 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6543 		control.super = L2CAP_SUPER_RNR;
6544 		l2cap_send_sframe(chan, &control);
6545 	}
6546 
6547 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6548 	    chan->unacked_frames > 0)
6549 		__set_retrans_timer(chan);
6550 
6551 	/* Send pending iframes */
6552 	l2cap_ertm_send(chan);
6553 
6554 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6555 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6556 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6557 		 * send it now.
6558 		 */
6559 		control.super = L2CAP_SUPER_RR;
6560 		l2cap_send_sframe(chan, &control);
6561 	}
6562 }
6563 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6564 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6565 			    struct sk_buff **last_frag)
6566 {
6567 	/* skb->len reflects data in skb as well as all fragments
6568 	 * skb->data_len reflects only data in fragments
6569 	 */
6570 	if (!skb_has_frag_list(skb))
6571 		skb_shinfo(skb)->frag_list = new_frag;
6572 
6573 	new_frag->next = NULL;
6574 
6575 	(*last_frag)->next = new_frag;
6576 	*last_frag = new_frag;
6577 
6578 	skb->len += new_frag->len;
6579 	skb->data_len += new_frag->len;
6580 	skb->truesize += new_frag->truesize;
6581 }
6582 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6583 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6584 				struct l2cap_ctrl *control)
6585 {
6586 	int err = -EINVAL;
6587 
6588 	switch (control->sar) {
6589 	case L2CAP_SAR_UNSEGMENTED:
6590 		if (chan->sdu)
6591 			break;
6592 
6593 		err = chan->ops->recv(chan, skb);
6594 		break;
6595 
6596 	case L2CAP_SAR_START:
6597 		if (chan->sdu)
6598 			break;
6599 
6600 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6601 			break;
6602 
6603 		chan->sdu_len = get_unaligned_le16(skb->data);
6604 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6605 
6606 		if (chan->sdu_len > chan->imtu) {
6607 			err = -EMSGSIZE;
6608 			break;
6609 		}
6610 
6611 		if (skb->len >= chan->sdu_len)
6612 			break;
6613 
6614 		chan->sdu = skb;
6615 		chan->sdu_last_frag = skb;
6616 
6617 		skb = NULL;
6618 		err = 0;
6619 		break;
6620 
6621 	case L2CAP_SAR_CONTINUE:
6622 		if (!chan->sdu)
6623 			break;
6624 
6625 		append_skb_frag(chan->sdu, skb,
6626 				&chan->sdu_last_frag);
6627 		skb = NULL;
6628 
6629 		if (chan->sdu->len >= chan->sdu_len)
6630 			break;
6631 
6632 		err = 0;
6633 		break;
6634 
6635 	case L2CAP_SAR_END:
6636 		if (!chan->sdu)
6637 			break;
6638 
6639 		append_skb_frag(chan->sdu, skb,
6640 				&chan->sdu_last_frag);
6641 		skb = NULL;
6642 
6643 		if (chan->sdu->len != chan->sdu_len)
6644 			break;
6645 
6646 		err = chan->ops->recv(chan, chan->sdu);
6647 
6648 		if (!err) {
6649 			/* Reassembly complete */
6650 			chan->sdu = NULL;
6651 			chan->sdu_last_frag = NULL;
6652 			chan->sdu_len = 0;
6653 		}
6654 		break;
6655 	}
6656 
6657 	if (err) {
6658 		kfree_skb(skb);
6659 		kfree_skb(chan->sdu);
6660 		chan->sdu = NULL;
6661 		chan->sdu_last_frag = NULL;
6662 		chan->sdu_len = 0;
6663 	}
6664 
6665 	return err;
6666 }
6667 
l2cap_resegment(struct l2cap_chan * chan)6668 static int l2cap_resegment(struct l2cap_chan *chan)
6669 {
6670 	/* Placeholder */
6671 	return 0;
6672 }
6673 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6674 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6675 {
6676 	u8 event;
6677 
6678 	if (chan->mode != L2CAP_MODE_ERTM)
6679 		return;
6680 
6681 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6682 	l2cap_tx(chan, NULL, NULL, event);
6683 }
6684 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6685 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6686 {
6687 	int err = 0;
6688 	/* Pass sequential frames to l2cap_reassemble_sdu()
6689 	 * until a gap is encountered.
6690 	 */
6691 
6692 	BT_DBG("chan %p", chan);
6693 
6694 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6695 		struct sk_buff *skb;
6696 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6697 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6698 
6699 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6700 
6701 		if (!skb)
6702 			break;
6703 
6704 		skb_unlink(skb, &chan->srej_q);
6705 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6706 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6707 		if (err)
6708 			break;
6709 	}
6710 
6711 	if (skb_queue_empty(&chan->srej_q)) {
6712 		chan->rx_state = L2CAP_RX_STATE_RECV;
6713 		l2cap_send_ack(chan);
6714 	}
6715 
6716 	return err;
6717 }
6718 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6719 static void l2cap_handle_srej(struct l2cap_chan *chan,
6720 			      struct l2cap_ctrl *control)
6721 {
6722 	struct sk_buff *skb;
6723 
6724 	BT_DBG("chan %p, control %p", chan, control);
6725 
6726 	if (control->reqseq == chan->next_tx_seq) {
6727 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6728 		l2cap_send_disconn_req(chan, ECONNRESET);
6729 		return;
6730 	}
6731 
6732 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6733 
6734 	if (skb == NULL) {
6735 		BT_DBG("Seq %d not available for retransmission",
6736 		       control->reqseq);
6737 		return;
6738 	}
6739 
6740 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6741 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6742 		l2cap_send_disconn_req(chan, ECONNRESET);
6743 		return;
6744 	}
6745 
6746 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6747 
6748 	if (control->poll) {
6749 		l2cap_pass_to_tx(chan, control);
6750 
6751 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6752 		l2cap_retransmit(chan, control);
6753 		l2cap_ertm_send(chan);
6754 
6755 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6756 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6757 			chan->srej_save_reqseq = control->reqseq;
6758 		}
6759 	} else {
6760 		l2cap_pass_to_tx_fbit(chan, control);
6761 
6762 		if (control->final) {
6763 			if (chan->srej_save_reqseq != control->reqseq ||
6764 			    !test_and_clear_bit(CONN_SREJ_ACT,
6765 						&chan->conn_state))
6766 				l2cap_retransmit(chan, control);
6767 		} else {
6768 			l2cap_retransmit(chan, control);
6769 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6770 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6771 				chan->srej_save_reqseq = control->reqseq;
6772 			}
6773 		}
6774 	}
6775 }
6776 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6777 static void l2cap_handle_rej(struct l2cap_chan *chan,
6778 			     struct l2cap_ctrl *control)
6779 {
6780 	struct sk_buff *skb;
6781 
6782 	BT_DBG("chan %p, control %p", chan, control);
6783 
6784 	if (control->reqseq == chan->next_tx_seq) {
6785 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6786 		l2cap_send_disconn_req(chan, ECONNRESET);
6787 		return;
6788 	}
6789 
6790 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6791 
6792 	if (chan->max_tx && skb &&
6793 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6794 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6795 		l2cap_send_disconn_req(chan, ECONNRESET);
6796 		return;
6797 	}
6798 
6799 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6800 
6801 	l2cap_pass_to_tx(chan, control);
6802 
6803 	if (control->final) {
6804 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6805 			l2cap_retransmit_all(chan, control);
6806 	} else {
6807 		l2cap_retransmit_all(chan, control);
6808 		l2cap_ertm_send(chan);
6809 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6810 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6811 	}
6812 }
6813 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6814 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6815 {
6816 	BT_DBG("chan %p, txseq %d", chan, txseq);
6817 
6818 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6819 	       chan->expected_tx_seq);
6820 
6821 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6822 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6823 		    chan->tx_win) {
6824 			/* See notes below regarding "double poll" and
6825 			 * invalid packets.
6826 			 */
6827 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6828 				BT_DBG("Invalid/Ignore - after SREJ");
6829 				return L2CAP_TXSEQ_INVALID_IGNORE;
6830 			} else {
6831 				BT_DBG("Invalid - in window after SREJ sent");
6832 				return L2CAP_TXSEQ_INVALID;
6833 			}
6834 		}
6835 
6836 		if (chan->srej_list.head == txseq) {
6837 			BT_DBG("Expected SREJ");
6838 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6839 		}
6840 
6841 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6842 			BT_DBG("Duplicate SREJ - txseq already stored");
6843 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6844 		}
6845 
6846 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6847 			BT_DBG("Unexpected SREJ - not requested");
6848 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6849 		}
6850 	}
6851 
6852 	if (chan->expected_tx_seq == txseq) {
6853 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6854 		    chan->tx_win) {
6855 			BT_DBG("Invalid - txseq outside tx window");
6856 			return L2CAP_TXSEQ_INVALID;
6857 		} else {
6858 			BT_DBG("Expected");
6859 			return L2CAP_TXSEQ_EXPECTED;
6860 		}
6861 	}
6862 
6863 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6864 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6865 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6866 		return L2CAP_TXSEQ_DUPLICATE;
6867 	}
6868 
6869 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6870 		/* A source of invalid packets is a "double poll" condition,
6871 		 * where delays cause us to send multiple poll packets.  If
6872 		 * the remote stack receives and processes both polls,
6873 		 * sequence numbers can wrap around in such a way that a
6874 		 * resent frame has a sequence number that looks like new data
6875 		 * with a sequence gap.  This would trigger an erroneous SREJ
6876 		 * request.
6877 		 *
6878 		 * Fortunately, this is impossible with a tx window that's
6879 		 * less than half of the maximum sequence number, which allows
6880 		 * invalid frames to be safely ignored.
6881 		 *
6882 		 * With tx window sizes greater than half of the tx window
6883 		 * maximum, the frame is invalid and cannot be ignored.  This
6884 		 * causes a disconnect.
6885 		 */
6886 
6887 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6888 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6889 			return L2CAP_TXSEQ_INVALID_IGNORE;
6890 		} else {
6891 			BT_DBG("Invalid - txseq outside tx window");
6892 			return L2CAP_TXSEQ_INVALID;
6893 		}
6894 	} else {
6895 		BT_DBG("Unexpected - txseq indicates missing frames");
6896 		return L2CAP_TXSEQ_UNEXPECTED;
6897 	}
6898 }
6899 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6900 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6901 			       struct l2cap_ctrl *control,
6902 			       struct sk_buff *skb, u8 event)
6903 {
6904 	struct l2cap_ctrl local_control;
6905 	int err = 0;
6906 	bool skb_in_use = false;
6907 
6908 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6909 	       event);
6910 
6911 	switch (event) {
6912 	case L2CAP_EV_RECV_IFRAME:
6913 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6914 		case L2CAP_TXSEQ_EXPECTED:
6915 			l2cap_pass_to_tx(chan, control);
6916 
6917 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6918 				BT_DBG("Busy, discarding expected seq %d",
6919 				       control->txseq);
6920 				break;
6921 			}
6922 
6923 			chan->expected_tx_seq = __next_seq(chan,
6924 							   control->txseq);
6925 
6926 			chan->buffer_seq = chan->expected_tx_seq;
6927 			skb_in_use = true;
6928 
6929 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6930 			 * control, so make a copy in advance to use it after
6931 			 * l2cap_reassemble_sdu returns and to avoid the race
6932 			 * condition, for example:
6933 			 *
6934 			 * The current thread calls:
6935 			 *   l2cap_reassemble_sdu
6936 			 *     chan->ops->recv == l2cap_sock_recv_cb
6937 			 *       __sock_queue_rcv_skb
6938 			 * Another thread calls:
6939 			 *   bt_sock_recvmsg
6940 			 *     skb_recv_datagram
6941 			 *     skb_free_datagram
6942 			 * Then the current thread tries to access control, but
6943 			 * it was freed by skb_free_datagram.
6944 			 */
6945 			local_control = *control;
6946 			err = l2cap_reassemble_sdu(chan, skb, control);
6947 			if (err)
6948 				break;
6949 
6950 			if (local_control.final) {
6951 				if (!test_and_clear_bit(CONN_REJ_ACT,
6952 							&chan->conn_state)) {
6953 					local_control.final = 0;
6954 					l2cap_retransmit_all(chan, &local_control);
6955 					l2cap_ertm_send(chan);
6956 				}
6957 			}
6958 
6959 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6960 				l2cap_send_ack(chan);
6961 			break;
6962 		case L2CAP_TXSEQ_UNEXPECTED:
6963 			l2cap_pass_to_tx(chan, control);
6964 
6965 			/* Can't issue SREJ frames in the local busy state.
6966 			 * Drop this frame, it will be seen as missing
6967 			 * when local busy is exited.
6968 			 */
6969 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6970 				BT_DBG("Busy, discarding unexpected seq %d",
6971 				       control->txseq);
6972 				break;
6973 			}
6974 
6975 			/* There was a gap in the sequence, so an SREJ
6976 			 * must be sent for each missing frame.  The
6977 			 * current frame is stored for later use.
6978 			 */
6979 			skb_queue_tail(&chan->srej_q, skb);
6980 			skb_in_use = true;
6981 			BT_DBG("Queued %p (queue len %d)", skb,
6982 			       skb_queue_len(&chan->srej_q));
6983 
6984 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6985 			l2cap_seq_list_clear(&chan->srej_list);
6986 			l2cap_send_srej(chan, control->txseq);
6987 
6988 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6989 			break;
6990 		case L2CAP_TXSEQ_DUPLICATE:
6991 			l2cap_pass_to_tx(chan, control);
6992 			break;
6993 		case L2CAP_TXSEQ_INVALID_IGNORE:
6994 			break;
6995 		case L2CAP_TXSEQ_INVALID:
6996 		default:
6997 			l2cap_send_disconn_req(chan, ECONNRESET);
6998 			break;
6999 		}
7000 		break;
7001 	case L2CAP_EV_RECV_RR:
7002 		l2cap_pass_to_tx(chan, control);
7003 		if (control->final) {
7004 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7005 
7006 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7007 			    !__chan_is_moving(chan)) {
7008 				control->final = 0;
7009 				l2cap_retransmit_all(chan, control);
7010 			}
7011 
7012 			l2cap_ertm_send(chan);
7013 		} else if (control->poll) {
7014 			l2cap_send_i_or_rr_or_rnr(chan);
7015 		} else {
7016 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7017 					       &chan->conn_state) &&
7018 			    chan->unacked_frames)
7019 				__set_retrans_timer(chan);
7020 
7021 			l2cap_ertm_send(chan);
7022 		}
7023 		break;
7024 	case L2CAP_EV_RECV_RNR:
7025 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7026 		l2cap_pass_to_tx(chan, control);
7027 		if (control && control->poll) {
7028 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7029 			l2cap_send_rr_or_rnr(chan, 0);
7030 		}
7031 		__clear_retrans_timer(chan);
7032 		l2cap_seq_list_clear(&chan->retrans_list);
7033 		break;
7034 	case L2CAP_EV_RECV_REJ:
7035 		l2cap_handle_rej(chan, control);
7036 		break;
7037 	case L2CAP_EV_RECV_SREJ:
7038 		l2cap_handle_srej(chan, control);
7039 		break;
7040 	default:
7041 		break;
7042 	}
7043 
7044 	if (skb && !skb_in_use) {
7045 		BT_DBG("Freeing %p", skb);
7046 		kfree_skb(skb);
7047 	}
7048 
7049 	return err;
7050 }
7051 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7052 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7053 				    struct l2cap_ctrl *control,
7054 				    struct sk_buff *skb, u8 event)
7055 {
7056 	int err = 0;
7057 	u16 txseq = control->txseq;
7058 	bool skb_in_use = false;
7059 
7060 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7061 	       event);
7062 
7063 	switch (event) {
7064 	case L2CAP_EV_RECV_IFRAME:
7065 		switch (l2cap_classify_txseq(chan, txseq)) {
7066 		case L2CAP_TXSEQ_EXPECTED:
7067 			/* Keep frame for reassembly later */
7068 			l2cap_pass_to_tx(chan, control);
7069 			skb_queue_tail(&chan->srej_q, skb);
7070 			skb_in_use = true;
7071 			BT_DBG("Queued %p (queue len %d)", skb,
7072 			       skb_queue_len(&chan->srej_q));
7073 
7074 			chan->expected_tx_seq = __next_seq(chan, txseq);
7075 			break;
7076 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7077 			l2cap_seq_list_pop(&chan->srej_list);
7078 
7079 			l2cap_pass_to_tx(chan, control);
7080 			skb_queue_tail(&chan->srej_q, skb);
7081 			skb_in_use = true;
7082 			BT_DBG("Queued %p (queue len %d)", skb,
7083 			       skb_queue_len(&chan->srej_q));
7084 
7085 			err = l2cap_rx_queued_iframes(chan);
7086 			if (err)
7087 				break;
7088 
7089 			break;
7090 		case L2CAP_TXSEQ_UNEXPECTED:
7091 			/* Got a frame that can't be reassembled yet.
7092 			 * Save it for later, and send SREJs to cover
7093 			 * the missing frames.
7094 			 */
7095 			skb_queue_tail(&chan->srej_q, skb);
7096 			skb_in_use = true;
7097 			BT_DBG("Queued %p (queue len %d)", skb,
7098 			       skb_queue_len(&chan->srej_q));
7099 
7100 			l2cap_pass_to_tx(chan, control);
7101 			l2cap_send_srej(chan, control->txseq);
7102 			break;
7103 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7104 			/* This frame was requested with an SREJ, but
7105 			 * some expected retransmitted frames are
7106 			 * missing.  Request retransmission of missing
7107 			 * SREJ'd frames.
7108 			 */
7109 			skb_queue_tail(&chan->srej_q, skb);
7110 			skb_in_use = true;
7111 			BT_DBG("Queued %p (queue len %d)", skb,
7112 			       skb_queue_len(&chan->srej_q));
7113 
7114 			l2cap_pass_to_tx(chan, control);
7115 			l2cap_send_srej_list(chan, control->txseq);
7116 			break;
7117 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7118 			/* We've already queued this frame.  Drop this copy. */
7119 			l2cap_pass_to_tx(chan, control);
7120 			break;
7121 		case L2CAP_TXSEQ_DUPLICATE:
7122 			/* Expecting a later sequence number, so this frame
7123 			 * was already received.  Ignore it completely.
7124 			 */
7125 			break;
7126 		case L2CAP_TXSEQ_INVALID_IGNORE:
7127 			break;
7128 		case L2CAP_TXSEQ_INVALID:
7129 		default:
7130 			l2cap_send_disconn_req(chan, ECONNRESET);
7131 			break;
7132 		}
7133 		break;
7134 	case L2CAP_EV_RECV_RR:
7135 		l2cap_pass_to_tx(chan, control);
7136 		if (control->final) {
7137 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7138 
7139 			if (!test_and_clear_bit(CONN_REJ_ACT,
7140 						&chan->conn_state)) {
7141 				control->final = 0;
7142 				l2cap_retransmit_all(chan, control);
7143 			}
7144 
7145 			l2cap_ertm_send(chan);
7146 		} else if (control->poll) {
7147 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7148 					       &chan->conn_state) &&
7149 			    chan->unacked_frames) {
7150 				__set_retrans_timer(chan);
7151 			}
7152 
7153 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7154 			l2cap_send_srej_tail(chan);
7155 		} else {
7156 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7157 					       &chan->conn_state) &&
7158 			    chan->unacked_frames)
7159 				__set_retrans_timer(chan);
7160 
7161 			l2cap_send_ack(chan);
7162 		}
7163 		break;
7164 	case L2CAP_EV_RECV_RNR:
7165 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7166 		l2cap_pass_to_tx(chan, control);
7167 		if (control->poll) {
7168 			l2cap_send_srej_tail(chan);
7169 		} else {
7170 			struct l2cap_ctrl rr_control;
7171 			memset(&rr_control, 0, sizeof(rr_control));
7172 			rr_control.sframe = 1;
7173 			rr_control.super = L2CAP_SUPER_RR;
7174 			rr_control.reqseq = chan->buffer_seq;
7175 			l2cap_send_sframe(chan, &rr_control);
7176 		}
7177 
7178 		break;
7179 	case L2CAP_EV_RECV_REJ:
7180 		l2cap_handle_rej(chan, control);
7181 		break;
7182 	case L2CAP_EV_RECV_SREJ:
7183 		l2cap_handle_srej(chan, control);
7184 		break;
7185 	}
7186 
7187 	if (skb && !skb_in_use) {
7188 		BT_DBG("Freeing %p", skb);
7189 		kfree_skb(skb);
7190 	}
7191 
7192 	return err;
7193 }
7194 
l2cap_finish_move(struct l2cap_chan * chan)7195 static int l2cap_finish_move(struct l2cap_chan *chan)
7196 {
7197 	BT_DBG("chan %p", chan);
7198 
7199 	chan->rx_state = L2CAP_RX_STATE_RECV;
7200 
7201 	if (chan->hs_hcon)
7202 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7203 	else
7204 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7205 
7206 	return l2cap_resegment(chan);
7207 }
7208 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7209 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7210 				 struct l2cap_ctrl *control,
7211 				 struct sk_buff *skb, u8 event)
7212 {
7213 	int err;
7214 
7215 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7216 	       event);
7217 
7218 	if (!control->poll)
7219 		return -EPROTO;
7220 
7221 	l2cap_process_reqseq(chan, control->reqseq);
7222 
7223 	if (!skb_queue_empty(&chan->tx_q))
7224 		chan->tx_send_head = skb_peek(&chan->tx_q);
7225 	else
7226 		chan->tx_send_head = NULL;
7227 
7228 	/* Rewind next_tx_seq to the point expected
7229 	 * by the receiver.
7230 	 */
7231 	chan->next_tx_seq = control->reqseq;
7232 	chan->unacked_frames = 0;
7233 
7234 	err = l2cap_finish_move(chan);
7235 	if (err)
7236 		return err;
7237 
7238 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7239 	l2cap_send_i_or_rr_or_rnr(chan);
7240 
7241 	if (event == L2CAP_EV_RECV_IFRAME)
7242 		return -EPROTO;
7243 
7244 	return l2cap_rx_state_recv(chan, control, NULL, event);
7245 }
7246 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7247 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7248 				 struct l2cap_ctrl *control,
7249 				 struct sk_buff *skb, u8 event)
7250 {
7251 	int err;
7252 
7253 	if (!control->final)
7254 		return -EPROTO;
7255 
7256 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7257 
7258 	chan->rx_state = L2CAP_RX_STATE_RECV;
7259 	l2cap_process_reqseq(chan, control->reqseq);
7260 
7261 	if (!skb_queue_empty(&chan->tx_q))
7262 		chan->tx_send_head = skb_peek(&chan->tx_q);
7263 	else
7264 		chan->tx_send_head = NULL;
7265 
7266 	/* Rewind next_tx_seq to the point expected
7267 	 * by the receiver.
7268 	 */
7269 	chan->next_tx_seq = control->reqseq;
7270 	chan->unacked_frames = 0;
7271 
7272 	if (chan->hs_hcon)
7273 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7274 	else
7275 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7276 
7277 	err = l2cap_resegment(chan);
7278 
7279 	if (!err)
7280 		err = l2cap_rx_state_recv(chan, control, skb, event);
7281 
7282 	return err;
7283 }
7284 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7285 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7286 {
7287 	/* Make sure reqseq is for a packet that has been sent but not acked */
7288 	u16 unacked;
7289 
7290 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7291 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7292 }
7293 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7294 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7295 		    struct sk_buff *skb, u8 event)
7296 {
7297 	int err = 0;
7298 
7299 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7300 	       control, skb, event, chan->rx_state);
7301 
7302 	if (__valid_reqseq(chan, control->reqseq)) {
7303 		switch (chan->rx_state) {
7304 		case L2CAP_RX_STATE_RECV:
7305 			err = l2cap_rx_state_recv(chan, control, skb, event);
7306 			break;
7307 		case L2CAP_RX_STATE_SREJ_SENT:
7308 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7309 						       event);
7310 			break;
7311 		case L2CAP_RX_STATE_WAIT_P:
7312 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7313 			break;
7314 		case L2CAP_RX_STATE_WAIT_F:
7315 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7316 			break;
7317 		default:
7318 			/* shut it down */
7319 			break;
7320 		}
7321 	} else {
7322 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7323 		       control->reqseq, chan->next_tx_seq,
7324 		       chan->expected_ack_seq);
7325 		l2cap_send_disconn_req(chan, ECONNRESET);
7326 	}
7327 
7328 	return err;
7329 }
7330 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7331 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7332 			   struct sk_buff *skb)
7333 {
7334 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7335 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7336 	 * returns and to avoid the race condition, for example:
7337 	 *
7338 	 * The current thread calls:
7339 	 *   l2cap_reassemble_sdu
7340 	 *     chan->ops->recv == l2cap_sock_recv_cb
7341 	 *       __sock_queue_rcv_skb
7342 	 * Another thread calls:
7343 	 *   bt_sock_recvmsg
7344 	 *     skb_recv_datagram
7345 	 *     skb_free_datagram
7346 	 * Then the current thread tries to access control, but it was freed by
7347 	 * skb_free_datagram.
7348 	 */
7349 	u16 txseq = control->txseq;
7350 
7351 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7352 	       chan->rx_state);
7353 
7354 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7355 		l2cap_pass_to_tx(chan, control);
7356 
7357 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7358 		       __next_seq(chan, chan->buffer_seq));
7359 
7360 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7361 
7362 		l2cap_reassemble_sdu(chan, skb, control);
7363 	} else {
7364 		if (chan->sdu) {
7365 			kfree_skb(chan->sdu);
7366 			chan->sdu = NULL;
7367 		}
7368 		chan->sdu_last_frag = NULL;
7369 		chan->sdu_len = 0;
7370 
7371 		if (skb) {
7372 			BT_DBG("Freeing %p", skb);
7373 			kfree_skb(skb);
7374 		}
7375 	}
7376 
7377 	chan->last_acked_seq = txseq;
7378 	chan->expected_tx_seq = __next_seq(chan, txseq);
7379 
7380 	return 0;
7381 }
7382 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7383 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7384 {
7385 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7386 	u16 len;
7387 	u8 event;
7388 
7389 	__unpack_control(chan, skb);
7390 
7391 	len = skb->len;
7392 
7393 	/*
7394 	 * We can just drop the corrupted I-frame here.
7395 	 * Receiver will miss it and start proper recovery
7396 	 * procedures and ask for retransmission.
7397 	 */
7398 	if (l2cap_check_fcs(chan, skb))
7399 		goto drop;
7400 
7401 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7402 		len -= L2CAP_SDULEN_SIZE;
7403 
7404 	if (chan->fcs == L2CAP_FCS_CRC16)
7405 		len -= L2CAP_FCS_SIZE;
7406 
7407 	if (len > chan->mps) {
7408 		l2cap_send_disconn_req(chan, ECONNRESET);
7409 		goto drop;
7410 	}
7411 
7412 	if (chan->ops->filter) {
7413 		if (chan->ops->filter(chan, skb))
7414 			goto drop;
7415 	}
7416 
7417 	if (!control->sframe) {
7418 		int err;
7419 
7420 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7421 		       control->sar, control->reqseq, control->final,
7422 		       control->txseq);
7423 
7424 		/* Validate F-bit - F=0 always valid, F=1 only
7425 		 * valid in TX WAIT_F
7426 		 */
7427 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7428 			goto drop;
7429 
7430 		if (chan->mode != L2CAP_MODE_STREAMING) {
7431 			event = L2CAP_EV_RECV_IFRAME;
7432 			err = l2cap_rx(chan, control, skb, event);
7433 		} else {
7434 			err = l2cap_stream_rx(chan, control, skb);
7435 		}
7436 
7437 		if (err)
7438 			l2cap_send_disconn_req(chan, ECONNRESET);
7439 	} else {
7440 		const u8 rx_func_to_event[4] = {
7441 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7442 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7443 		};
7444 
7445 		/* Only I-frames are expected in streaming mode */
7446 		if (chan->mode == L2CAP_MODE_STREAMING)
7447 			goto drop;
7448 
7449 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7450 		       control->reqseq, control->final, control->poll,
7451 		       control->super);
7452 
7453 		if (len != 0) {
7454 			BT_ERR("Trailing bytes: %d in sframe", len);
7455 			l2cap_send_disconn_req(chan, ECONNRESET);
7456 			goto drop;
7457 		}
7458 
7459 		/* Validate F and P bits */
7460 		if (control->final && (control->poll ||
7461 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7462 			goto drop;
7463 
7464 		event = rx_func_to_event[control->super];
7465 		if (l2cap_rx(chan, control, skb, event))
7466 			l2cap_send_disconn_req(chan, ECONNRESET);
7467 	}
7468 
7469 	return 0;
7470 
7471 drop:
7472 	kfree_skb(skb);
7473 	return 0;
7474 }
7475 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7476 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7477 {
7478 	struct l2cap_conn *conn = chan->conn;
7479 	struct l2cap_le_credits pkt;
7480 	u16 return_credits;
7481 
7482 	return_credits = (chan->imtu / chan->mps) + 1;
7483 
7484 	if (chan->rx_credits >= return_credits)
7485 		return;
7486 
7487 	return_credits -= chan->rx_credits;
7488 
7489 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7490 
7491 	chan->rx_credits += return_credits;
7492 
7493 	pkt.cid     = cpu_to_le16(chan->scid);
7494 	pkt.credits = cpu_to_le16(return_credits);
7495 
7496 	chan->ident = l2cap_get_ident(conn);
7497 
7498 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7499 }
7500 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7501 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7502 {
7503 	int err;
7504 
7505 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7506 
7507 	/* Wait recv to confirm reception before updating the credits */
7508 	err = chan->ops->recv(chan, skb);
7509 
7510 	/* Update credits whenever an SDU is received */
7511 	l2cap_chan_le_send_credits(chan);
7512 
7513 	return err;
7514 }
7515 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7516 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7517 {
7518 	int err;
7519 
7520 	if (!chan->rx_credits) {
7521 		BT_ERR("No credits to receive LE L2CAP data");
7522 		l2cap_send_disconn_req(chan, ECONNRESET);
7523 		return -ENOBUFS;
7524 	}
7525 
7526 	if (chan->imtu < skb->len) {
7527 		BT_ERR("Too big LE L2CAP PDU");
7528 		return -ENOBUFS;
7529 	}
7530 
7531 	chan->rx_credits--;
7532 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7533 
7534 	/* Update if remote had run out of credits, this should only happens
7535 	 * if the remote is not using the entire MPS.
7536 	 */
7537 	if (!chan->rx_credits)
7538 		l2cap_chan_le_send_credits(chan);
7539 
7540 	err = 0;
7541 
7542 	if (!chan->sdu) {
7543 		u16 sdu_len;
7544 
7545 		sdu_len = get_unaligned_le16(skb->data);
7546 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7547 
7548 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7549 		       sdu_len, skb->len, chan->imtu);
7550 
7551 		if (sdu_len > chan->imtu) {
7552 			BT_ERR("Too big LE L2CAP SDU length received");
7553 			err = -EMSGSIZE;
7554 			goto failed;
7555 		}
7556 
7557 		if (skb->len > sdu_len) {
7558 			BT_ERR("Too much LE L2CAP data received");
7559 			err = -EINVAL;
7560 			goto failed;
7561 		}
7562 
7563 		if (skb->len == sdu_len)
7564 			return l2cap_ecred_recv(chan, skb);
7565 
7566 		chan->sdu = skb;
7567 		chan->sdu_len = sdu_len;
7568 		chan->sdu_last_frag = skb;
7569 
7570 		/* Detect if remote is not able to use the selected MPS */
7571 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7572 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7573 
7574 			/* Adjust the number of credits */
7575 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7576 			chan->mps = mps_len;
7577 			l2cap_chan_le_send_credits(chan);
7578 		}
7579 
7580 		return 0;
7581 	}
7582 
7583 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7584 	       chan->sdu->len, skb->len, chan->sdu_len);
7585 
7586 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7587 		BT_ERR("Too much LE L2CAP data received");
7588 		err = -EINVAL;
7589 		goto failed;
7590 	}
7591 
7592 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7593 	skb = NULL;
7594 
7595 	if (chan->sdu->len == chan->sdu_len) {
7596 		err = l2cap_ecred_recv(chan, chan->sdu);
7597 		if (!err) {
7598 			chan->sdu = NULL;
7599 			chan->sdu_last_frag = NULL;
7600 			chan->sdu_len = 0;
7601 		}
7602 	}
7603 
7604 failed:
7605 	if (err) {
7606 		kfree_skb(skb);
7607 		kfree_skb(chan->sdu);
7608 		chan->sdu = NULL;
7609 		chan->sdu_last_frag = NULL;
7610 		chan->sdu_len = 0;
7611 	}
7612 
7613 	/* We can't return an error here since we took care of the skb
7614 	 * freeing internally. An error return would cause the caller to
7615 	 * do a double-free of the skb.
7616 	 */
7617 	return 0;
7618 }
7619 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7620 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7621 			       struct sk_buff *skb)
7622 {
7623 	struct l2cap_chan *chan;
7624 
7625 	chan = l2cap_get_chan_by_scid(conn, cid);
7626 	if (!chan) {
7627 		if (cid == L2CAP_CID_A2MP) {
7628 			chan = a2mp_channel_create(conn, skb);
7629 			if (!chan) {
7630 				kfree_skb(skb);
7631 				return;
7632 			}
7633 
7634 			l2cap_chan_hold(chan);
7635 			l2cap_chan_lock(chan);
7636 		} else {
7637 			BT_DBG("unknown cid 0x%4.4x", cid);
7638 			/* Drop packet and return */
7639 			kfree_skb(skb);
7640 			return;
7641 		}
7642 	}
7643 
7644 	BT_DBG("chan %p, len %d", chan, skb->len);
7645 
7646 	/* If we receive data on a fixed channel before the info req/rsp
7647 	 * procedure is done simply assume that the channel is supported
7648 	 * and mark it as ready.
7649 	 */
7650 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7651 		l2cap_chan_ready(chan);
7652 
7653 	if (chan->state != BT_CONNECTED)
7654 		goto drop;
7655 
7656 	switch (chan->mode) {
7657 	case L2CAP_MODE_LE_FLOWCTL:
7658 	case L2CAP_MODE_EXT_FLOWCTL:
7659 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7660 			goto drop;
7661 
7662 		goto done;
7663 
7664 	case L2CAP_MODE_BASIC:
7665 		/* If socket recv buffers overflows we drop data here
7666 		 * which is *bad* because L2CAP has to be reliable.
7667 		 * But we don't have any other choice. L2CAP doesn't
7668 		 * provide flow control mechanism. */
7669 
7670 		if (chan->imtu < skb->len) {
7671 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7672 			goto drop;
7673 		}
7674 
7675 		if (!chan->ops->recv(chan, skb))
7676 			goto done;
7677 		break;
7678 
7679 	case L2CAP_MODE_ERTM:
7680 	case L2CAP_MODE_STREAMING:
7681 		l2cap_data_rcv(chan, skb);
7682 		goto done;
7683 
7684 	default:
7685 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7686 		break;
7687 	}
7688 
7689 drop:
7690 	kfree_skb(skb);
7691 
7692 done:
7693 	l2cap_chan_unlock(chan);
7694 	l2cap_chan_put(chan);
7695 }
7696 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7697 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7698 				  struct sk_buff *skb)
7699 {
7700 	struct hci_conn *hcon = conn->hcon;
7701 	struct l2cap_chan *chan;
7702 
7703 	if (hcon->type != ACL_LINK)
7704 		goto free_skb;
7705 
7706 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7707 					ACL_LINK);
7708 	if (!chan)
7709 		goto free_skb;
7710 
7711 	BT_DBG("chan %p, len %d", chan, skb->len);
7712 
7713 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7714 		goto drop;
7715 
7716 	if (chan->imtu < skb->len)
7717 		goto drop;
7718 
7719 	/* Store remote BD_ADDR and PSM for msg_name */
7720 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7721 	bt_cb(skb)->l2cap.psm = psm;
7722 
7723 	if (!chan->ops->recv(chan, skb)) {
7724 		l2cap_chan_put(chan);
7725 		return;
7726 	}
7727 
7728 drop:
7729 	l2cap_chan_put(chan);
7730 free_skb:
7731 	kfree_skb(skb);
7732 }
7733 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7734 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7735 {
7736 	struct l2cap_hdr *lh = (void *) skb->data;
7737 	struct hci_conn *hcon = conn->hcon;
7738 	u16 cid, len;
7739 	__le16 psm;
7740 
7741 	if (hcon->state != BT_CONNECTED) {
7742 		BT_DBG("queueing pending rx skb");
7743 		skb_queue_tail(&conn->pending_rx, skb);
7744 		return;
7745 	}
7746 
7747 	skb_pull(skb, L2CAP_HDR_SIZE);
7748 	cid = __le16_to_cpu(lh->cid);
7749 	len = __le16_to_cpu(lh->len);
7750 
7751 	if (len != skb->len) {
7752 		kfree_skb(skb);
7753 		return;
7754 	}
7755 
7756 	/* Since we can't actively block incoming LE connections we must
7757 	 * at least ensure that we ignore incoming data from them.
7758 	 */
7759 	if (hcon->type == LE_LINK &&
7760 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7761 				   bdaddr_dst_type(hcon))) {
7762 		kfree_skb(skb);
7763 		return;
7764 	}
7765 
7766 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7767 
7768 	switch (cid) {
7769 	case L2CAP_CID_SIGNALING:
7770 		l2cap_sig_channel(conn, skb);
7771 		break;
7772 
7773 	case L2CAP_CID_CONN_LESS:
7774 		psm = get_unaligned((__le16 *) skb->data);
7775 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7776 		l2cap_conless_channel(conn, psm, skb);
7777 		break;
7778 
7779 	case L2CAP_CID_LE_SIGNALING:
7780 		l2cap_le_sig_channel(conn, skb);
7781 		break;
7782 
7783 	default:
7784 		l2cap_data_channel(conn, cid, skb);
7785 		break;
7786 	}
7787 }
7788 
process_pending_rx(struct work_struct * work)7789 static void process_pending_rx(struct work_struct *work)
7790 {
7791 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7792 					       pending_rx_work);
7793 	struct sk_buff *skb;
7794 
7795 	BT_DBG("");
7796 
7797 	while ((skb = skb_dequeue(&conn->pending_rx)))
7798 		l2cap_recv_frame(conn, skb);
7799 }
7800 
l2cap_conn_add(struct hci_conn * hcon)7801 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7802 {
7803 	struct l2cap_conn *conn = hcon->l2cap_data;
7804 	struct hci_chan *hchan;
7805 
7806 	if (conn)
7807 		return conn;
7808 
7809 	hchan = hci_chan_create(hcon);
7810 	if (!hchan)
7811 		return NULL;
7812 
7813 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7814 	if (!conn) {
7815 		hci_chan_del(hchan);
7816 		return NULL;
7817 	}
7818 
7819 	kref_init(&conn->ref);
7820 	hcon->l2cap_data = conn;
7821 	conn->hcon = hci_conn_get(hcon);
7822 	conn->hchan = hchan;
7823 
7824 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7825 
7826 	switch (hcon->type) {
7827 	case LE_LINK:
7828 		if (hcon->hdev->le_mtu) {
7829 			conn->mtu = hcon->hdev->le_mtu;
7830 			break;
7831 		}
7832 		fallthrough;
7833 	default:
7834 		conn->mtu = hcon->hdev->acl_mtu;
7835 		break;
7836 	}
7837 
7838 	conn->feat_mask = 0;
7839 
7840 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7841 
7842 	if (hcon->type == ACL_LINK &&
7843 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7844 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7845 
7846 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7847 	    (bredr_sc_enabled(hcon->hdev) ||
7848 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7849 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7850 
7851 	mutex_init(&conn->ident_lock);
7852 	mutex_init(&conn->chan_lock);
7853 
7854 	INIT_LIST_HEAD(&conn->chan_l);
7855 	INIT_LIST_HEAD(&conn->users);
7856 
7857 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7858 
7859 	skb_queue_head_init(&conn->pending_rx);
7860 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7861 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7862 
7863 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7864 
7865 	return conn;
7866 }
7867 
is_valid_psm(u16 psm,u8 dst_type)7868 static bool is_valid_psm(u16 psm, u8 dst_type) {
7869 	if (!psm)
7870 		return false;
7871 
7872 	if (bdaddr_type_is_le(dst_type))
7873 		return (psm <= 0x00ff);
7874 
7875 	/* PSM must be odd and lsb of upper byte must be 0 */
7876 	return ((psm & 0x0101) == 0x0001);
7877 }
7878 
7879 struct l2cap_chan_data {
7880 	struct l2cap_chan *chan;
7881 	struct pid *pid;
7882 	int count;
7883 };
7884 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7885 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7886 {
7887 	struct l2cap_chan_data *d = data;
7888 	struct pid *pid;
7889 
7890 	if (chan == d->chan)
7891 		return;
7892 
7893 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7894 		return;
7895 
7896 	pid = chan->ops->get_peer_pid(chan);
7897 
7898 	/* Only count deferred channels with the same PID/PSM */
7899 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7900 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7901 		return;
7902 
7903 	d->count++;
7904 }
7905 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7906 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7907 		       bdaddr_t *dst, u8 dst_type)
7908 {
7909 	struct l2cap_conn *conn;
7910 	struct hci_conn *hcon;
7911 	struct hci_dev *hdev;
7912 	int err;
7913 
7914 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7915 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7916 
7917 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7918 	if (!hdev)
7919 		return -EHOSTUNREACH;
7920 
7921 	hci_dev_lock(hdev);
7922 
7923 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7924 	    chan->chan_type != L2CAP_CHAN_RAW) {
7925 		err = -EINVAL;
7926 		goto done;
7927 	}
7928 
7929 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7930 		err = -EINVAL;
7931 		goto done;
7932 	}
7933 
7934 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7935 		err = -EINVAL;
7936 		goto done;
7937 	}
7938 
7939 	switch (chan->mode) {
7940 	case L2CAP_MODE_BASIC:
7941 		break;
7942 	case L2CAP_MODE_LE_FLOWCTL:
7943 		break;
7944 	case L2CAP_MODE_EXT_FLOWCTL:
7945 		if (!enable_ecred) {
7946 			err = -EOPNOTSUPP;
7947 			goto done;
7948 		}
7949 		break;
7950 	case L2CAP_MODE_ERTM:
7951 	case L2CAP_MODE_STREAMING:
7952 		if (!disable_ertm)
7953 			break;
7954 		fallthrough;
7955 	default:
7956 		err = -EOPNOTSUPP;
7957 		goto done;
7958 	}
7959 
7960 	switch (chan->state) {
7961 	case BT_CONNECT:
7962 	case BT_CONNECT2:
7963 	case BT_CONFIG:
7964 		/* Already connecting */
7965 		err = 0;
7966 		goto done;
7967 
7968 	case BT_CONNECTED:
7969 		/* Already connected */
7970 		err = -EISCONN;
7971 		goto done;
7972 
7973 	case BT_OPEN:
7974 	case BT_BOUND:
7975 		/* Can connect */
7976 		break;
7977 
7978 	default:
7979 		err = -EBADFD;
7980 		goto done;
7981 	}
7982 
7983 	/* Set destination address and psm */
7984 	bacpy(&chan->dst, dst);
7985 	chan->dst_type = dst_type;
7986 
7987 	chan->psm = psm;
7988 	chan->dcid = cid;
7989 
7990 	if (bdaddr_type_is_le(dst_type)) {
7991 		/* Convert from L2CAP channel address type to HCI address type
7992 		 */
7993 		if (dst_type == BDADDR_LE_PUBLIC)
7994 			dst_type = ADDR_LE_DEV_PUBLIC;
7995 		else
7996 			dst_type = ADDR_LE_DEV_RANDOM;
7997 
7998 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7999 			hcon = hci_connect_le(hdev, dst, dst_type,
8000 					      chan->sec_level,
8001 					      HCI_LE_CONN_TIMEOUT,
8002 					      HCI_ROLE_SLAVE, NULL);
8003 		else
8004 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8005 						   chan->sec_level,
8006 						   HCI_LE_CONN_TIMEOUT,
8007 						   CONN_REASON_L2CAP_CHAN);
8008 
8009 	} else {
8010 		u8 auth_type = l2cap_get_auth_type(chan);
8011 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8012 				       CONN_REASON_L2CAP_CHAN);
8013 	}
8014 
8015 	if (IS_ERR(hcon)) {
8016 		err = PTR_ERR(hcon);
8017 		goto done;
8018 	}
8019 
8020 	conn = l2cap_conn_add(hcon);
8021 	if (!conn) {
8022 		hci_conn_drop(hcon);
8023 		err = -ENOMEM;
8024 		goto done;
8025 	}
8026 
8027 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8028 		struct l2cap_chan_data data;
8029 
8030 		data.chan = chan;
8031 		data.pid = chan->ops->get_peer_pid(chan);
8032 		data.count = 1;
8033 
8034 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8035 
8036 		/* Check if there isn't too many channels being connected */
8037 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8038 			hci_conn_drop(hcon);
8039 			err = -EPROTO;
8040 			goto done;
8041 		}
8042 	}
8043 
8044 	mutex_lock(&conn->chan_lock);
8045 	l2cap_chan_lock(chan);
8046 
8047 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8048 		hci_conn_drop(hcon);
8049 		err = -EBUSY;
8050 		goto chan_unlock;
8051 	}
8052 
8053 	/* Update source addr of the socket */
8054 	bacpy(&chan->src, &hcon->src);
8055 	chan->src_type = bdaddr_src_type(hcon);
8056 
8057 	__l2cap_chan_add(conn, chan);
8058 
8059 	/* l2cap_chan_add takes its own ref so we can drop this one */
8060 	hci_conn_drop(hcon);
8061 
8062 	l2cap_state_change(chan, BT_CONNECT);
8063 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8064 
8065 	/* Release chan->sport so that it can be reused by other
8066 	 * sockets (as it's only used for listening sockets).
8067 	 */
8068 	write_lock(&chan_list_lock);
8069 	chan->sport = 0;
8070 	write_unlock(&chan_list_lock);
8071 
8072 	if (hcon->state == BT_CONNECTED) {
8073 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8074 			__clear_chan_timer(chan);
8075 			if (l2cap_chan_check_security(chan, true))
8076 				l2cap_state_change(chan, BT_CONNECTED);
8077 		} else
8078 			l2cap_do_start(chan);
8079 	}
8080 
8081 	err = 0;
8082 
8083 chan_unlock:
8084 	l2cap_chan_unlock(chan);
8085 	mutex_unlock(&conn->chan_lock);
8086 done:
8087 	hci_dev_unlock(hdev);
8088 	hci_dev_put(hdev);
8089 	return err;
8090 }
8091 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8092 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8093 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8094 {
8095 	struct l2cap_conn *conn = chan->conn;
8096 	struct {
8097 		struct l2cap_ecred_reconf_req req;
8098 		__le16 scid;
8099 	} pdu;
8100 
8101 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8102 	pdu.req.mps = cpu_to_le16(chan->mps);
8103 	pdu.scid    = cpu_to_le16(chan->scid);
8104 
8105 	chan->ident = l2cap_get_ident(conn);
8106 
8107 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8108 		       sizeof(pdu), &pdu);
8109 }
8110 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8111 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8112 {
8113 	if (chan->imtu > mtu)
8114 		return -EINVAL;
8115 
8116 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8117 
8118 	chan->imtu = mtu;
8119 
8120 	l2cap_ecred_reconfigure(chan);
8121 
8122 	return 0;
8123 }
8124 
8125 /* ---- L2CAP interface with lower layer (HCI) ---- */
8126 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8127 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8128 {
8129 	int exact = 0, lm1 = 0, lm2 = 0;
8130 	struct l2cap_chan *c;
8131 
8132 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8133 
8134 	/* Find listening sockets and check their link_mode */
8135 	read_lock(&chan_list_lock);
8136 	list_for_each_entry(c, &chan_list, global_l) {
8137 		if (c->state != BT_LISTEN)
8138 			continue;
8139 
8140 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8141 			lm1 |= HCI_LM_ACCEPT;
8142 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8143 				lm1 |= HCI_LM_MASTER;
8144 			exact++;
8145 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8146 			lm2 |= HCI_LM_ACCEPT;
8147 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8148 				lm2 |= HCI_LM_MASTER;
8149 		}
8150 	}
8151 	read_unlock(&chan_list_lock);
8152 
8153 	return exact ? lm1 : lm2;
8154 }
8155 
8156 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8157  * from an existing channel in the list or from the beginning of the
8158  * global list (by passing NULL as first parameter).
8159  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8160 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8161 						  struct hci_conn *hcon)
8162 {
8163 	u8 src_type = bdaddr_src_type(hcon);
8164 
8165 	read_lock(&chan_list_lock);
8166 
8167 	if (c)
8168 		c = list_next_entry(c, global_l);
8169 	else
8170 		c = list_entry(chan_list.next, typeof(*c), global_l);
8171 
8172 	list_for_each_entry_from(c, &chan_list, global_l) {
8173 		if (c->chan_type != L2CAP_CHAN_FIXED)
8174 			continue;
8175 		if (c->state != BT_LISTEN)
8176 			continue;
8177 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8178 			continue;
8179 		if (src_type != c->src_type)
8180 			continue;
8181 
8182 		c = l2cap_chan_hold_unless_zero(c);
8183 		read_unlock(&chan_list_lock);
8184 		return c;
8185 	}
8186 
8187 	read_unlock(&chan_list_lock);
8188 
8189 	return NULL;
8190 }
8191 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8192 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8193 {
8194 	struct hci_dev *hdev = hcon->hdev;
8195 	struct l2cap_conn *conn;
8196 	struct l2cap_chan *pchan;
8197 	u8 dst_type;
8198 
8199 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8200 		return;
8201 
8202 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8203 
8204 	if (status) {
8205 		l2cap_conn_del(hcon, bt_to_errno(status));
8206 		return;
8207 	}
8208 
8209 	conn = l2cap_conn_add(hcon);
8210 	if (!conn)
8211 		return;
8212 
8213 	dst_type = bdaddr_dst_type(hcon);
8214 
8215 	/* If device is blocked, do not create channels for it */
8216 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8217 		return;
8218 
8219 	/* Find fixed channels and notify them of the new connection. We
8220 	 * use multiple individual lookups, continuing each time where
8221 	 * we left off, because the list lock would prevent calling the
8222 	 * potentially sleeping l2cap_chan_lock() function.
8223 	 */
8224 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8225 	while (pchan) {
8226 		struct l2cap_chan *chan, *next;
8227 
8228 		/* Client fixed channels should override server ones */
8229 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8230 			goto next;
8231 
8232 		l2cap_chan_lock(pchan);
8233 		chan = pchan->ops->new_connection(pchan);
8234 		if (chan) {
8235 			bacpy(&chan->src, &hcon->src);
8236 			bacpy(&chan->dst, &hcon->dst);
8237 			chan->src_type = bdaddr_src_type(hcon);
8238 			chan->dst_type = dst_type;
8239 
8240 			__l2cap_chan_add(conn, chan);
8241 		}
8242 
8243 		l2cap_chan_unlock(pchan);
8244 next:
8245 		next = l2cap_global_fixed_chan(pchan, hcon);
8246 		l2cap_chan_put(pchan);
8247 		pchan = next;
8248 	}
8249 
8250 	l2cap_conn_ready(conn);
8251 }
8252 
l2cap_disconn_ind(struct hci_conn * hcon)8253 int l2cap_disconn_ind(struct hci_conn *hcon)
8254 {
8255 	struct l2cap_conn *conn = hcon->l2cap_data;
8256 
8257 	BT_DBG("hcon %p", hcon);
8258 
8259 	if (!conn)
8260 		return HCI_ERROR_REMOTE_USER_TERM;
8261 	return conn->disc_reason;
8262 }
8263 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8264 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8265 {
8266 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8267 		return;
8268 
8269 	BT_DBG("hcon %p reason %d", hcon, reason);
8270 
8271 	l2cap_conn_del(hcon, bt_to_errno(reason));
8272 }
8273 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8274 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8275 {
8276 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8277 		return;
8278 
8279 	if (encrypt == 0x00) {
8280 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8281 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8282 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8283 			   chan->sec_level == BT_SECURITY_FIPS)
8284 			l2cap_chan_close(chan, ECONNREFUSED);
8285 	} else {
8286 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8287 			__clear_chan_timer(chan);
8288 	}
8289 }
8290 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8291 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8292 {
8293 	struct l2cap_conn *conn = hcon->l2cap_data;
8294 	struct l2cap_chan *chan;
8295 
8296 	if (!conn)
8297 		return;
8298 
8299 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8300 
8301 	mutex_lock(&conn->chan_lock);
8302 
8303 	list_for_each_entry(chan, &conn->chan_l, list) {
8304 		l2cap_chan_lock(chan);
8305 
8306 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8307 		       state_to_string(chan->state));
8308 
8309 		if (chan->scid == L2CAP_CID_A2MP) {
8310 			l2cap_chan_unlock(chan);
8311 			continue;
8312 		}
8313 
8314 		if (!status && encrypt)
8315 			chan->sec_level = hcon->sec_level;
8316 
8317 		if (!__l2cap_no_conn_pending(chan)) {
8318 			l2cap_chan_unlock(chan);
8319 			continue;
8320 		}
8321 
8322 		if (!status && (chan->state == BT_CONNECTED ||
8323 				chan->state == BT_CONFIG)) {
8324 			chan->ops->resume(chan);
8325 			l2cap_check_encryption(chan, encrypt);
8326 			l2cap_chan_unlock(chan);
8327 			continue;
8328 		}
8329 
8330 		if (chan->state == BT_CONNECT) {
8331 			if (!status && l2cap_check_enc_key_size(hcon))
8332 				l2cap_start_connection(chan);
8333 			else
8334 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8335 		} else if (chan->state == BT_CONNECT2 &&
8336 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8337 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8338 			struct l2cap_conn_rsp rsp;
8339 			__u16 res, stat;
8340 
8341 			if (!status && l2cap_check_enc_key_size(hcon)) {
8342 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8343 					res = L2CAP_CR_PEND;
8344 					stat = L2CAP_CS_AUTHOR_PEND;
8345 					chan->ops->defer(chan);
8346 				} else {
8347 					l2cap_state_change(chan, BT_CONFIG);
8348 					res = L2CAP_CR_SUCCESS;
8349 					stat = L2CAP_CS_NO_INFO;
8350 				}
8351 			} else {
8352 				l2cap_state_change(chan, BT_DISCONN);
8353 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8354 				res = L2CAP_CR_SEC_BLOCK;
8355 				stat = L2CAP_CS_NO_INFO;
8356 			}
8357 
8358 			rsp.scid   = cpu_to_le16(chan->dcid);
8359 			rsp.dcid   = cpu_to_le16(chan->scid);
8360 			rsp.result = cpu_to_le16(res);
8361 			rsp.status = cpu_to_le16(stat);
8362 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8363 				       sizeof(rsp), &rsp);
8364 
8365 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8366 			    res == L2CAP_CR_SUCCESS) {
8367 				char buf[128];
8368 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8369 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8370 					       L2CAP_CONF_REQ,
8371 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8372 					       buf);
8373 				chan->num_conf_req++;
8374 			}
8375 		}
8376 
8377 		l2cap_chan_unlock(chan);
8378 	}
8379 
8380 	mutex_unlock(&conn->chan_lock);
8381 }
8382 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8383 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8384 {
8385 	struct l2cap_conn *conn = hcon->l2cap_data;
8386 	struct l2cap_hdr *hdr;
8387 	int len;
8388 
8389 	/* For AMP controller do not create l2cap conn */
8390 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8391 		goto drop;
8392 
8393 	if (!conn)
8394 		conn = l2cap_conn_add(hcon);
8395 
8396 	if (!conn)
8397 		goto drop;
8398 
8399 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8400 
8401 	switch (flags) {
8402 	case ACL_START:
8403 	case ACL_START_NO_FLUSH:
8404 	case ACL_COMPLETE:
8405 		if (conn->rx_len) {
8406 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8407 			kfree_skb(conn->rx_skb);
8408 			conn->rx_skb = NULL;
8409 			conn->rx_len = 0;
8410 			l2cap_conn_unreliable(conn, ECOMM);
8411 		}
8412 
8413 		/* Start fragment always begin with Basic L2CAP header */
8414 		if (skb->len < L2CAP_HDR_SIZE) {
8415 			BT_ERR("Frame is too short (len %d)", skb->len);
8416 			l2cap_conn_unreliable(conn, ECOMM);
8417 			goto drop;
8418 		}
8419 
8420 		hdr = (struct l2cap_hdr *) skb->data;
8421 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8422 
8423 		if (len == skb->len) {
8424 			/* Complete frame received */
8425 			l2cap_recv_frame(conn, skb);
8426 			return;
8427 		}
8428 
8429 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8430 
8431 		if (skb->len > len) {
8432 			BT_ERR("Frame is too long (len %d, expected len %d)",
8433 			       skb->len, len);
8434 			l2cap_conn_unreliable(conn, ECOMM);
8435 			goto drop;
8436 		}
8437 
8438 		/* Allocate skb for the complete frame (with header) */
8439 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8440 		if (!conn->rx_skb)
8441 			goto drop;
8442 
8443 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8444 					  skb->len);
8445 		conn->rx_len = len - skb->len;
8446 		break;
8447 
8448 	case ACL_CONT:
8449 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8450 
8451 		if (!conn->rx_len) {
8452 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8453 			l2cap_conn_unreliable(conn, ECOMM);
8454 			goto drop;
8455 		}
8456 
8457 		if (skb->len > conn->rx_len) {
8458 			BT_ERR("Fragment is too long (len %d, expected %d)",
8459 			       skb->len, conn->rx_len);
8460 			kfree_skb(conn->rx_skb);
8461 			conn->rx_skb = NULL;
8462 			conn->rx_len = 0;
8463 			l2cap_conn_unreliable(conn, ECOMM);
8464 			goto drop;
8465 		}
8466 
8467 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8468 					  skb->len);
8469 		conn->rx_len -= skb->len;
8470 
8471 		if (!conn->rx_len) {
8472 			/* Complete frame received. l2cap_recv_frame
8473 			 * takes ownership of the skb so set the global
8474 			 * rx_skb pointer to NULL first.
8475 			 */
8476 			struct sk_buff *rx_skb = conn->rx_skb;
8477 			conn->rx_skb = NULL;
8478 			l2cap_recv_frame(conn, rx_skb);
8479 		}
8480 		break;
8481 	}
8482 
8483 drop:
8484 	kfree_skb(skb);
8485 }
8486 
8487 static struct hci_cb l2cap_cb = {
8488 	.name		= "L2CAP",
8489 	.connect_cfm	= l2cap_connect_cfm,
8490 	.disconn_cfm	= l2cap_disconn_cfm,
8491 	.security_cfm	= l2cap_security_cfm,
8492 };
8493 
l2cap_debugfs_show(struct seq_file * f,void * p)8494 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8495 {
8496 	struct l2cap_chan *c;
8497 
8498 	read_lock(&chan_list_lock);
8499 
8500 	list_for_each_entry(c, &chan_list, global_l) {
8501 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8502 			   &c->src, c->src_type, &c->dst, c->dst_type,
8503 			   c->state, __le16_to_cpu(c->psm),
8504 			   c->scid, c->dcid, c->imtu, c->omtu,
8505 			   c->sec_level, c->mode);
8506 	}
8507 
8508 	read_unlock(&chan_list_lock);
8509 
8510 	return 0;
8511 }
8512 
8513 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8514 
8515 static struct dentry *l2cap_debugfs;
8516 
l2cap_init(void)8517 int __init l2cap_init(void)
8518 {
8519 	int err;
8520 
8521 	err = l2cap_init_sockets();
8522 	if (err < 0)
8523 		return err;
8524 
8525 	hci_register_cb(&l2cap_cb);
8526 
8527 	if (IS_ERR_OR_NULL(bt_debugfs))
8528 		return 0;
8529 
8530 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8531 					    NULL, &l2cap_debugfs_fops);
8532 
8533 	return 0;
8534 }
8535 
l2cap_exit(void)8536 void l2cap_exit(void)
8537 {
8538 	debugfs_remove(l2cap_debugfs);
8539 	hci_unregister_cb(&l2cap_cb);
8540 	l2cap_cleanup_sockets();
8541 }
8542 
8543 module_param(disable_ertm, bool, 0644);
8544 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8545 
8546 module_param(enable_ecred, bool, 0644);
8547 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8548