• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	if (!conn)
439 		return;
440 
441 	mutex_lock(&conn->chan_lock);
442 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
443 	 * this work. No need to call l2cap_chan_hold(chan) here again.
444 	 */
445 	l2cap_chan_lock(chan);
446 
447 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
448 		reason = ECONNREFUSED;
449 	else if (chan->state == BT_CONNECT &&
450 		 chan->sec_level != BT_SECURITY_SDP)
451 		reason = ECONNREFUSED;
452 	else
453 		reason = ETIMEDOUT;
454 
455 	l2cap_chan_close(chan, reason);
456 
457 	chan->ops->close(chan);
458 
459 	l2cap_chan_unlock(chan);
460 	l2cap_chan_put(chan);
461 
462 	mutex_unlock(&conn->chan_lock);
463 }
464 
l2cap_chan_create(void)465 struct l2cap_chan *l2cap_chan_create(void)
466 {
467 	struct l2cap_chan *chan;
468 
469 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 	if (!chan)
471 		return NULL;
472 
473 	skb_queue_head_init(&chan->tx_q);
474 	skb_queue_head_init(&chan->srej_q);
475 	mutex_init(&chan->lock);
476 
477 	/* Set default lock nesting level */
478 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
479 
480 	write_lock(&chan_list_lock);
481 	list_add(&chan->global_l, &chan_list);
482 	write_unlock(&chan_list_lock);
483 
484 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
485 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
486 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
487 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
488 
489 	chan->state = BT_OPEN;
490 
491 	kref_init(&chan->kref);
492 
493 	/* This flag is cleared in l2cap_chan_ready() */
494 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
495 
496 	BT_DBG("chan %p", chan);
497 
498 	return chan;
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_create);
501 
l2cap_chan_destroy(struct kref * kref)502 static void l2cap_chan_destroy(struct kref *kref)
503 {
504 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
505 
506 	BT_DBG("chan %p", chan);
507 
508 	write_lock(&chan_list_lock);
509 	list_del(&chan->global_l);
510 	write_unlock(&chan_list_lock);
511 
512 	kfree(chan);
513 }
514 
l2cap_chan_hold(struct l2cap_chan * c)515 void l2cap_chan_hold(struct l2cap_chan *c)
516 {
517 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
518 
519 	kref_get(&c->kref);
520 }
521 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)522 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
523 {
524 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
525 
526 	if (!kref_get_unless_zero(&c->kref))
527 		return NULL;
528 
529 	return c;
530 }
531 
l2cap_chan_put(struct l2cap_chan * c)532 void l2cap_chan_put(struct l2cap_chan *c)
533 {
534 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
535 
536 	kref_put(&c->kref, l2cap_chan_destroy);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_put);
539 
l2cap_chan_set_defaults(struct l2cap_chan * chan)540 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
541 {
542 	chan->fcs  = L2CAP_FCS_CRC16;
543 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
544 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
545 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->remote_max_tx = chan->max_tx;
547 	chan->remote_tx_win = chan->tx_win;
548 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
549 	chan->sec_level = BT_SECURITY_LOW;
550 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
551 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
552 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
553 
554 	chan->conf_state = 0;
555 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
556 
557 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
558 }
559 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
560 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)561 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
562 {
563 	chan->sdu = NULL;
564 	chan->sdu_last_frag = NULL;
565 	chan->sdu_len = 0;
566 	chan->tx_credits = tx_credits;
567 	/* Derive MPS from connection MTU to stop HCI fragmentation */
568 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
569 	/* Give enough credits for a full packet */
570 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 	}
584 }
585 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	list_add(&chan->list, &conn->chan_l);
636 }
637 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)638 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
639 {
640 	mutex_lock(&conn->chan_lock);
641 	__l2cap_chan_add(conn, chan);
642 	mutex_unlock(&conn->chan_lock);
643 }
644 
l2cap_chan_del(struct l2cap_chan * chan,int err)645 void l2cap_chan_del(struct l2cap_chan *chan, int err)
646 {
647 	struct l2cap_conn *conn = chan->conn;
648 
649 	__clear_chan_timer(chan);
650 
651 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
652 	       state_to_string(chan->state));
653 
654 	chan->ops->teardown(chan, err);
655 
656 	if (conn) {
657 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 
673 		if (mgr && mgr->bredr_chan == chan)
674 			mgr->bredr_chan = NULL;
675 	}
676 
677 	if (chan->hs_hchan) {
678 		struct hci_chan *hs_hchan = chan->hs_hchan;
679 
680 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
681 		amp_disconnect_logical_link(hs_hchan);
682 	}
683 
684 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
685 		return;
686 
687 	switch(chan->mode) {
688 	case L2CAP_MODE_BASIC:
689 		break;
690 
691 	case L2CAP_MODE_LE_FLOWCTL:
692 	case L2CAP_MODE_EXT_FLOWCTL:
693 		skb_queue_purge(&chan->tx_q);
694 		break;
695 
696 	case L2CAP_MODE_ERTM:
697 		__clear_retrans_timer(chan);
698 		__clear_monitor_timer(chan);
699 		__clear_ack_timer(chan);
700 
701 		skb_queue_purge(&chan->srej_q);
702 
703 		l2cap_seq_list_free(&chan->srej_list);
704 		l2cap_seq_list_free(&chan->retrans_list);
705 		fallthrough;
706 
707 	case L2CAP_MODE_STREAMING:
708 		skb_queue_purge(&chan->tx_q);
709 		break;
710 	}
711 
712 	return;
713 }
714 EXPORT_SYMBOL_GPL(l2cap_chan_del);
715 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)716 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
717 				 l2cap_chan_func_t func, void *data)
718 {
719 	struct l2cap_chan *chan, *l;
720 
721 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
722 		if (chan->ident == id)
723 			func(chan, data);
724 	}
725 }
726 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)727 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
728 			      void *data)
729 {
730 	struct l2cap_chan *chan;
731 
732 	list_for_each_entry(chan, &conn->chan_l, list) {
733 		func(chan, data);
734 	}
735 }
736 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)737 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
738 		     void *data)
739 {
740 	if (!conn)
741 		return;
742 
743 	mutex_lock(&conn->chan_lock);
744 	__l2cap_chan_list(conn, func, data);
745 	mutex_unlock(&conn->chan_lock);
746 }
747 
748 EXPORT_SYMBOL_GPL(l2cap_chan_list);
749 
l2cap_conn_update_id_addr(struct work_struct * work)750 static void l2cap_conn_update_id_addr(struct work_struct *work)
751 {
752 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
753 					       id_addr_update_work);
754 	struct hci_conn *hcon = conn->hcon;
755 	struct l2cap_chan *chan;
756 
757 	mutex_lock(&conn->chan_lock);
758 
759 	list_for_each_entry(chan, &conn->chan_l, list) {
760 		l2cap_chan_lock(chan);
761 		bacpy(&chan->dst, &hcon->dst);
762 		chan->dst_type = bdaddr_dst_type(hcon);
763 		l2cap_chan_unlock(chan);
764 	}
765 
766 	mutex_unlock(&conn->chan_lock);
767 }
768 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)769 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
770 {
771 	struct l2cap_conn *conn = chan->conn;
772 	struct l2cap_le_conn_rsp rsp;
773 	u16 result;
774 
775 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
776 		result = L2CAP_CR_LE_AUTHORIZATION;
777 	else
778 		result = L2CAP_CR_LE_BAD_PSM;
779 
780 	l2cap_state_change(chan, BT_DISCONN);
781 
782 	rsp.dcid    = cpu_to_le16(chan->scid);
783 	rsp.mtu     = cpu_to_le16(chan->imtu);
784 	rsp.mps     = cpu_to_le16(chan->mps);
785 	rsp.credits = cpu_to_le16(chan->rx_credits);
786 	rsp.result  = cpu_to_le16(result);
787 
788 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
789 		       &rsp);
790 }
791 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)792 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
793 {
794 	l2cap_state_change(chan, BT_DISCONN);
795 
796 	__l2cap_ecred_conn_rsp_defer(chan);
797 }
798 
l2cap_chan_connect_reject(struct l2cap_chan * chan)799 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
800 {
801 	struct l2cap_conn *conn = chan->conn;
802 	struct l2cap_conn_rsp rsp;
803 	u16 result;
804 
805 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
806 		result = L2CAP_CR_SEC_BLOCK;
807 	else
808 		result = L2CAP_CR_BAD_PSM;
809 
810 	l2cap_state_change(chan, BT_DISCONN);
811 
812 	rsp.scid   = cpu_to_le16(chan->dcid);
813 	rsp.dcid   = cpu_to_le16(chan->scid);
814 	rsp.result = cpu_to_le16(result);
815 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
816 
817 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
818 }
819 
l2cap_chan_close(struct l2cap_chan * chan,int reason)820 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
821 {
822 	struct l2cap_conn *conn = chan->conn;
823 
824 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
825 
826 	switch (chan->state) {
827 	case BT_LISTEN:
828 		chan->ops->teardown(chan, 0);
829 		break;
830 
831 	case BT_CONNECTED:
832 	case BT_CONFIG:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
835 			l2cap_send_disconn_req(chan, reason);
836 		} else
837 			l2cap_chan_del(chan, reason);
838 		break;
839 
840 	case BT_CONNECT2:
841 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
842 			if (conn->hcon->type == ACL_LINK)
843 				l2cap_chan_connect_reject(chan);
844 			else if (conn->hcon->type == LE_LINK) {
845 				switch (chan->mode) {
846 				case L2CAP_MODE_LE_FLOWCTL:
847 					l2cap_chan_le_connect_reject(chan);
848 					break;
849 				case L2CAP_MODE_EXT_FLOWCTL:
850 					l2cap_chan_ecred_connect_reject(chan);
851 					return;
852 				}
853 			}
854 		}
855 
856 		l2cap_chan_del(chan, reason);
857 		break;
858 
859 	case BT_CONNECT:
860 	case BT_DISCONN:
861 		l2cap_chan_del(chan, reason);
862 		break;
863 
864 	default:
865 		chan->ops->teardown(chan, 0);
866 		break;
867 	}
868 }
869 EXPORT_SYMBOL(l2cap_chan_close);
870 
l2cap_get_auth_type(struct l2cap_chan * chan)871 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
872 {
873 	switch (chan->chan_type) {
874 	case L2CAP_CHAN_RAW:
875 		switch (chan->sec_level) {
876 		case BT_SECURITY_HIGH:
877 		case BT_SECURITY_FIPS:
878 			return HCI_AT_DEDICATED_BONDING_MITM;
879 		case BT_SECURITY_MEDIUM:
880 			return HCI_AT_DEDICATED_BONDING;
881 		default:
882 			return HCI_AT_NO_BONDING;
883 		}
884 		break;
885 	case L2CAP_CHAN_CONN_LESS:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 		}
890 		if (chan->sec_level == BT_SECURITY_HIGH ||
891 		    chan->sec_level == BT_SECURITY_FIPS)
892 			return HCI_AT_NO_BONDING_MITM;
893 		else
894 			return HCI_AT_NO_BONDING;
895 		break;
896 	case L2CAP_CHAN_CONN_ORIENTED:
897 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
898 			if (chan->sec_level == BT_SECURITY_LOW)
899 				chan->sec_level = BT_SECURITY_SDP;
900 
901 			if (chan->sec_level == BT_SECURITY_HIGH ||
902 			    chan->sec_level == BT_SECURITY_FIPS)
903 				return HCI_AT_NO_BONDING_MITM;
904 			else
905 				return HCI_AT_NO_BONDING;
906 		}
907 		fallthrough;
908 
909 	default:
910 		switch (chan->sec_level) {
911 		case BT_SECURITY_HIGH:
912 		case BT_SECURITY_FIPS:
913 			return HCI_AT_GENERAL_BONDING_MITM;
914 		case BT_SECURITY_MEDIUM:
915 			return HCI_AT_GENERAL_BONDING;
916 		default:
917 			return HCI_AT_NO_BONDING;
918 		}
919 		break;
920 	}
921 }
922 
923 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)924 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
925 {
926 	struct l2cap_conn *conn = chan->conn;
927 	__u8 auth_type;
928 
929 	if (conn->hcon->type == LE_LINK)
930 		return smp_conn_security(conn->hcon, chan->sec_level);
931 
932 	auth_type = l2cap_get_auth_type(chan);
933 
934 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
935 				 initiator);
936 }
937 
l2cap_get_ident(struct l2cap_conn * conn)938 static u8 l2cap_get_ident(struct l2cap_conn *conn)
939 {
940 	u8 id;
941 
942 	/* Get next available identificator.
943 	 *    1 - 128 are used by kernel.
944 	 *  129 - 199 are reserved.
945 	 *  200 - 254 are used by utilities like l2ping, etc.
946 	 */
947 
948 	mutex_lock(&conn->ident_lock);
949 
950 	if (++conn->tx_ident > 128)
951 		conn->tx_ident = 1;
952 
953 	id = conn->tx_ident;
954 
955 	mutex_unlock(&conn->ident_lock);
956 
957 	return id;
958 }
959 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)960 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
961 			   void *data)
962 {
963 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
964 	u8 flags;
965 
966 	BT_DBG("code 0x%2.2x", code);
967 
968 	if (!skb)
969 		return;
970 
971 	/* Use NO_FLUSH if supported or we have an LE link (which does
972 	 * not support auto-flushing packets) */
973 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
974 	    conn->hcon->type == LE_LINK)
975 		flags = ACL_START_NO_FLUSH;
976 	else
977 		flags = ACL_START;
978 
979 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
980 	skb->priority = HCI_PRIO_MAX;
981 
982 	hci_send_acl(conn->hchan, skb, flags);
983 }
984 
__chan_is_moving(struct l2cap_chan * chan)985 static bool __chan_is_moving(struct l2cap_chan *chan)
986 {
987 	return chan->move_state != L2CAP_MOVE_STABLE &&
988 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
989 }
990 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)991 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
992 {
993 	struct hci_conn *hcon = chan->conn->hcon;
994 	u16 flags;
995 
996 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
997 	       skb->priority);
998 
999 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
1000 		if (chan->hs_hchan)
1001 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1002 		else
1003 			kfree_skb(skb);
1004 
1005 		return;
1006 	}
1007 
1008 	/* Use NO_FLUSH for LE links (where this is the only option) or
1009 	 * if the BR/EDR link supports it and flushing has not been
1010 	 * explicitly requested (through FLAG_FLUSHABLE).
1011 	 */
1012 	if (hcon->type == LE_LINK ||
1013 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1014 	     lmp_no_flush_capable(hcon->hdev)))
1015 		flags = ACL_START_NO_FLUSH;
1016 	else
1017 		flags = ACL_START;
1018 
1019 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1020 	hci_send_acl(chan->conn->hchan, skb, flags);
1021 }
1022 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1023 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1024 {
1025 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1026 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1027 
1028 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1029 		/* S-Frame */
1030 		control->sframe = 1;
1031 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1032 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1033 
1034 		control->sar = 0;
1035 		control->txseq = 0;
1036 	} else {
1037 		/* I-Frame */
1038 		control->sframe = 0;
1039 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1040 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1041 
1042 		control->poll = 0;
1043 		control->super = 0;
1044 	}
1045 }
1046 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1047 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1048 {
1049 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1050 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1051 
1052 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1053 		/* S-Frame */
1054 		control->sframe = 1;
1055 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1056 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1057 
1058 		control->sar = 0;
1059 		control->txseq = 0;
1060 	} else {
1061 		/* I-Frame */
1062 		control->sframe = 0;
1063 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1064 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1065 
1066 		control->poll = 0;
1067 		control->super = 0;
1068 	}
1069 }
1070 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1071 static inline void __unpack_control(struct l2cap_chan *chan,
1072 				    struct sk_buff *skb)
1073 {
1074 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1075 		__unpack_extended_control(get_unaligned_le32(skb->data),
1076 					  &bt_cb(skb)->l2cap);
1077 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1078 	} else {
1079 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1080 					  &bt_cb(skb)->l2cap);
1081 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1082 	}
1083 }
1084 
__pack_extended_control(struct l2cap_ctrl * control)1085 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1086 {
1087 	u32 packed;
1088 
1089 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1090 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1091 
1092 	if (control->sframe) {
1093 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1094 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1095 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1096 	} else {
1097 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1098 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1099 	}
1100 
1101 	return packed;
1102 }
1103 
__pack_enhanced_control(struct l2cap_ctrl * control)1104 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1105 {
1106 	u16 packed;
1107 
1108 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1109 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1110 
1111 	if (control->sframe) {
1112 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1113 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1114 		packed |= L2CAP_CTRL_FRAME_TYPE;
1115 	} else {
1116 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1117 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1118 	}
1119 
1120 	return packed;
1121 }
1122 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1123 static inline void __pack_control(struct l2cap_chan *chan,
1124 				  struct l2cap_ctrl *control,
1125 				  struct sk_buff *skb)
1126 {
1127 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1128 		put_unaligned_le32(__pack_extended_control(control),
1129 				   skb->data + L2CAP_HDR_SIZE);
1130 	} else {
1131 		put_unaligned_le16(__pack_enhanced_control(control),
1132 				   skb->data + L2CAP_HDR_SIZE);
1133 	}
1134 }
1135 
__ertm_hdr_size(struct l2cap_chan * chan)1136 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1137 {
1138 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1139 		return L2CAP_EXT_HDR_SIZE;
1140 	else
1141 		return L2CAP_ENH_HDR_SIZE;
1142 }
1143 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1144 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1145 					       u32 control)
1146 {
1147 	struct sk_buff *skb;
1148 	struct l2cap_hdr *lh;
1149 	int hlen = __ertm_hdr_size(chan);
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16)
1152 		hlen += L2CAP_FCS_SIZE;
1153 
1154 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1155 
1156 	if (!skb)
1157 		return ERR_PTR(-ENOMEM);
1158 
1159 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1160 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1161 	lh->cid = cpu_to_le16(chan->dcid);
1162 
1163 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1164 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1165 	else
1166 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1167 
1168 	if (chan->fcs == L2CAP_FCS_CRC16) {
1169 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1170 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1171 	}
1172 
1173 	skb->priority = HCI_PRIO_MAX;
1174 	return skb;
1175 }
1176 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1177 static void l2cap_send_sframe(struct l2cap_chan *chan,
1178 			      struct l2cap_ctrl *control)
1179 {
1180 	struct sk_buff *skb;
1181 	u32 control_field;
1182 
1183 	BT_DBG("chan %p, control %p", chan, control);
1184 
1185 	if (!control->sframe)
1186 		return;
1187 
1188 	if (__chan_is_moving(chan))
1189 		return;
1190 
1191 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1192 	    !control->poll)
1193 		control->final = 1;
1194 
1195 	if (control->super == L2CAP_SUPER_RR)
1196 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1197 	else if (control->super == L2CAP_SUPER_RNR)
1198 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1199 
1200 	if (control->super != L2CAP_SUPER_SREJ) {
1201 		chan->last_acked_seq = control->reqseq;
1202 		__clear_ack_timer(chan);
1203 	}
1204 
1205 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1206 	       control->final, control->poll, control->super);
1207 
1208 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1209 		control_field = __pack_extended_control(control);
1210 	else
1211 		control_field = __pack_enhanced_control(control);
1212 
1213 	skb = l2cap_create_sframe_pdu(chan, control_field);
1214 	if (!IS_ERR(skb))
1215 		l2cap_do_send(chan, skb);
1216 }
1217 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1218 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1219 {
1220 	struct l2cap_ctrl control;
1221 
1222 	BT_DBG("chan %p, poll %d", chan, poll);
1223 
1224 	memset(&control, 0, sizeof(control));
1225 	control.sframe = 1;
1226 	control.poll = poll;
1227 
1228 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1229 		control.super = L2CAP_SUPER_RNR;
1230 	else
1231 		control.super = L2CAP_SUPER_RR;
1232 
1233 	control.reqseq = chan->buffer_seq;
1234 	l2cap_send_sframe(chan, &control);
1235 }
1236 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1237 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1238 {
1239 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1240 		return true;
1241 
1242 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1243 }
1244 
__amp_capable(struct l2cap_chan * chan)1245 static bool __amp_capable(struct l2cap_chan *chan)
1246 {
1247 	struct l2cap_conn *conn = chan->conn;
1248 	struct hci_dev *hdev;
1249 	bool amp_available = false;
1250 
1251 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1252 		return false;
1253 
1254 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1255 		return false;
1256 
1257 	read_lock(&hci_dev_list_lock);
1258 	list_for_each_entry(hdev, &hci_dev_list, list) {
1259 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1260 		    test_bit(HCI_UP, &hdev->flags)) {
1261 			amp_available = true;
1262 			break;
1263 		}
1264 	}
1265 	read_unlock(&hci_dev_list_lock);
1266 
1267 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1268 		return amp_available;
1269 
1270 	return false;
1271 }
1272 
l2cap_check_efs(struct l2cap_chan * chan)1273 static bool l2cap_check_efs(struct l2cap_chan *chan)
1274 {
1275 	/* Check EFS parameters */
1276 	return true;
1277 }
1278 
l2cap_send_conn_req(struct l2cap_chan * chan)1279 void l2cap_send_conn_req(struct l2cap_chan *chan)
1280 {
1281 	struct l2cap_conn *conn = chan->conn;
1282 	struct l2cap_conn_req req;
1283 
1284 	req.scid = cpu_to_le16(chan->scid);
1285 	req.psm  = chan->psm;
1286 
1287 	chan->ident = l2cap_get_ident(conn);
1288 
1289 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1290 
1291 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1292 }
1293 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1294 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1295 {
1296 	struct l2cap_create_chan_req req;
1297 	req.scid = cpu_to_le16(chan->scid);
1298 	req.psm  = chan->psm;
1299 	req.amp_id = amp_id;
1300 
1301 	chan->ident = l2cap_get_ident(chan->conn);
1302 
1303 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1304 		       sizeof(req), &req);
1305 }
1306 
l2cap_move_setup(struct l2cap_chan * chan)1307 static void l2cap_move_setup(struct l2cap_chan *chan)
1308 {
1309 	struct sk_buff *skb;
1310 
1311 	BT_DBG("chan %p", chan);
1312 
1313 	if (chan->mode != L2CAP_MODE_ERTM)
1314 		return;
1315 
1316 	__clear_retrans_timer(chan);
1317 	__clear_monitor_timer(chan);
1318 	__clear_ack_timer(chan);
1319 
1320 	chan->retry_count = 0;
1321 	skb_queue_walk(&chan->tx_q, skb) {
1322 		if (bt_cb(skb)->l2cap.retries)
1323 			bt_cb(skb)->l2cap.retries = 1;
1324 		else
1325 			break;
1326 	}
1327 
1328 	chan->expected_tx_seq = chan->buffer_seq;
1329 
1330 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1331 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1332 	l2cap_seq_list_clear(&chan->retrans_list);
1333 	l2cap_seq_list_clear(&chan->srej_list);
1334 	skb_queue_purge(&chan->srej_q);
1335 
1336 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1337 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1338 
1339 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1340 }
1341 
l2cap_move_done(struct l2cap_chan * chan)1342 static void l2cap_move_done(struct l2cap_chan *chan)
1343 {
1344 	u8 move_role = chan->move_role;
1345 	BT_DBG("chan %p", chan);
1346 
1347 	chan->move_state = L2CAP_MOVE_STABLE;
1348 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1349 
1350 	if (chan->mode != L2CAP_MODE_ERTM)
1351 		return;
1352 
1353 	switch (move_role) {
1354 	case L2CAP_MOVE_ROLE_INITIATOR:
1355 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1356 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1357 		break;
1358 	case L2CAP_MOVE_ROLE_RESPONDER:
1359 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1360 		break;
1361 	}
1362 }
1363 
l2cap_chan_ready(struct l2cap_chan * chan)1364 static void l2cap_chan_ready(struct l2cap_chan *chan)
1365 {
1366 	/* The channel may have already been flagged as connected in
1367 	 * case of receiving data before the L2CAP info req/rsp
1368 	 * procedure is complete.
1369 	 */
1370 	if (chan->state == BT_CONNECTED)
1371 		return;
1372 
1373 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1374 	chan->conf_state = 0;
1375 	__clear_chan_timer(chan);
1376 
1377 	switch (chan->mode) {
1378 	case L2CAP_MODE_LE_FLOWCTL:
1379 	case L2CAP_MODE_EXT_FLOWCTL:
1380 		if (!chan->tx_credits)
1381 			chan->ops->suspend(chan);
1382 		break;
1383 	}
1384 
1385 	chan->state = BT_CONNECTED;
1386 
1387 	chan->ops->ready(chan);
1388 }
1389 
l2cap_le_connect(struct l2cap_chan * chan)1390 static void l2cap_le_connect(struct l2cap_chan *chan)
1391 {
1392 	struct l2cap_conn *conn = chan->conn;
1393 	struct l2cap_le_conn_req req;
1394 
1395 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1396 		return;
1397 
1398 	if (!chan->imtu)
1399 		chan->imtu = chan->conn->mtu;
1400 
1401 	l2cap_le_flowctl_init(chan, 0);
1402 
1403 	req.psm     = chan->psm;
1404 	req.scid    = cpu_to_le16(chan->scid);
1405 	req.mtu     = cpu_to_le16(chan->imtu);
1406 	req.mps     = cpu_to_le16(chan->mps);
1407 	req.credits = cpu_to_le16(chan->rx_credits);
1408 
1409 	chan->ident = l2cap_get_ident(conn);
1410 
1411 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1412 		       sizeof(req), &req);
1413 }
1414 
1415 struct l2cap_ecred_conn_data {
1416 	struct {
1417 		struct l2cap_ecred_conn_req req;
1418 		__le16 scid[5];
1419 	} __packed pdu;
1420 	struct l2cap_chan *chan;
1421 	struct pid *pid;
1422 	int count;
1423 };
1424 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1425 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1426 {
1427 	struct l2cap_ecred_conn_data *conn = data;
1428 	struct pid *pid;
1429 
1430 	if (chan == conn->chan)
1431 		return;
1432 
1433 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1434 		return;
1435 
1436 	pid = chan->ops->get_peer_pid(chan);
1437 
1438 	/* Only add deferred channels with the same PID/PSM */
1439 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1440 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1441 		return;
1442 
1443 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1444 		return;
1445 
1446 	l2cap_ecred_init(chan, 0);
1447 
1448 	/* Set the same ident so we can match on the rsp */
1449 	chan->ident = conn->chan->ident;
1450 
1451 	/* Include all channels deferred */
1452 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1453 
1454 	conn->count++;
1455 }
1456 
l2cap_ecred_connect(struct l2cap_chan * chan)1457 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1458 {
1459 	struct l2cap_conn *conn = chan->conn;
1460 	struct l2cap_ecred_conn_data data;
1461 
1462 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1463 		return;
1464 
1465 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1466 		return;
1467 
1468 	l2cap_ecred_init(chan, 0);
1469 
1470 	memset(&data, 0, sizeof(data));
1471 	data.pdu.req.psm     = chan->psm;
1472 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1473 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1474 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1475 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1476 
1477 	chan->ident = l2cap_get_ident(conn);
1478 	data.pid = chan->ops->get_peer_pid(chan);
1479 
1480 	data.count = 1;
1481 	data.chan = chan;
1482 	data.pid = chan->ops->get_peer_pid(chan);
1483 
1484 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1485 
1486 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1487 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1488 		       &data.pdu);
1489 }
1490 
l2cap_le_start(struct l2cap_chan * chan)1491 static void l2cap_le_start(struct l2cap_chan *chan)
1492 {
1493 	struct l2cap_conn *conn = chan->conn;
1494 
1495 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1496 		return;
1497 
1498 	if (!chan->psm) {
1499 		l2cap_chan_ready(chan);
1500 		return;
1501 	}
1502 
1503 	if (chan->state == BT_CONNECT) {
1504 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1505 			l2cap_ecred_connect(chan);
1506 		else
1507 			l2cap_le_connect(chan);
1508 	}
1509 }
1510 
l2cap_start_connection(struct l2cap_chan * chan)1511 static void l2cap_start_connection(struct l2cap_chan *chan)
1512 {
1513 	if (__amp_capable(chan)) {
1514 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1515 		a2mp_discover_amp(chan);
1516 	} else if (chan->conn->hcon->type == LE_LINK) {
1517 		l2cap_le_start(chan);
1518 	} else {
1519 		l2cap_send_conn_req(chan);
1520 	}
1521 }
1522 
l2cap_request_info(struct l2cap_conn * conn)1523 static void l2cap_request_info(struct l2cap_conn *conn)
1524 {
1525 	struct l2cap_info_req req;
1526 
1527 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1528 		return;
1529 
1530 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1531 
1532 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1533 	conn->info_ident = l2cap_get_ident(conn);
1534 
1535 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1536 
1537 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1538 		       sizeof(req), &req);
1539 }
1540 
l2cap_check_enc_key_size(struct hci_conn * hcon)1541 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1542 {
1543 	/* The minimum encryption key size needs to be enforced by the
1544 	 * host stack before establishing any L2CAP connections. The
1545 	 * specification in theory allows a minimum of 1, but to align
1546 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1547 	 *
1548 	 * This check might also be called for unencrypted connections
1549 	 * that have no key size requirements. Ensure that the link is
1550 	 * actually encrypted before enforcing a key size.
1551 	 */
1552 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1553 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1554 }
1555 
l2cap_do_start(struct l2cap_chan * chan)1556 static void l2cap_do_start(struct l2cap_chan *chan)
1557 {
1558 	struct l2cap_conn *conn = chan->conn;
1559 
1560 	if (conn->hcon->type == LE_LINK) {
1561 		l2cap_le_start(chan);
1562 		return;
1563 	}
1564 
1565 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1566 		l2cap_request_info(conn);
1567 		return;
1568 	}
1569 
1570 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1571 		return;
1572 
1573 	if (!l2cap_chan_check_security(chan, true) ||
1574 	    !__l2cap_no_conn_pending(chan))
1575 		return;
1576 
1577 	if (l2cap_check_enc_key_size(conn->hcon))
1578 		l2cap_start_connection(chan);
1579 	else
1580 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1581 }
1582 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1583 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1584 {
1585 	u32 local_feat_mask = l2cap_feat_mask;
1586 	if (!disable_ertm)
1587 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1588 
1589 	switch (mode) {
1590 	case L2CAP_MODE_ERTM:
1591 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1592 	case L2CAP_MODE_STREAMING:
1593 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1594 	default:
1595 		return 0x00;
1596 	}
1597 }
1598 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1599 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1600 {
1601 	struct l2cap_conn *conn = chan->conn;
1602 	struct l2cap_disconn_req req;
1603 
1604 	if (!conn)
1605 		return;
1606 
1607 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1608 		__clear_retrans_timer(chan);
1609 		__clear_monitor_timer(chan);
1610 		__clear_ack_timer(chan);
1611 	}
1612 
1613 	if (chan->scid == L2CAP_CID_A2MP) {
1614 		l2cap_state_change(chan, BT_DISCONN);
1615 		return;
1616 	}
1617 
1618 	req.dcid = cpu_to_le16(chan->dcid);
1619 	req.scid = cpu_to_le16(chan->scid);
1620 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1621 		       sizeof(req), &req);
1622 
1623 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1624 }
1625 
1626 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1627 static void l2cap_conn_start(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan, *tmp;
1630 
1631 	BT_DBG("conn %p", conn);
1632 
1633 	mutex_lock(&conn->chan_lock);
1634 
1635 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1636 		l2cap_chan_lock(chan);
1637 
1638 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1639 			l2cap_chan_ready(chan);
1640 			l2cap_chan_unlock(chan);
1641 			continue;
1642 		}
1643 
1644 		if (chan->state == BT_CONNECT) {
1645 			if (!l2cap_chan_check_security(chan, true) ||
1646 			    !__l2cap_no_conn_pending(chan)) {
1647 				l2cap_chan_unlock(chan);
1648 				continue;
1649 			}
1650 
1651 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1652 			    && test_bit(CONF_STATE2_DEVICE,
1653 					&chan->conf_state)) {
1654 				l2cap_chan_close(chan, ECONNRESET);
1655 				l2cap_chan_unlock(chan);
1656 				continue;
1657 			}
1658 
1659 			if (l2cap_check_enc_key_size(conn->hcon))
1660 				l2cap_start_connection(chan);
1661 			else
1662 				l2cap_chan_close(chan, ECONNREFUSED);
1663 
1664 		} else if (chan->state == BT_CONNECT2) {
1665 			struct l2cap_conn_rsp rsp;
1666 			char buf[128];
1667 			rsp.scid = cpu_to_le16(chan->dcid);
1668 			rsp.dcid = cpu_to_le16(chan->scid);
1669 
1670 			if (l2cap_chan_check_security(chan, false)) {
1671 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1672 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1673 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1674 					chan->ops->defer(chan);
1675 
1676 				} else {
1677 					l2cap_state_change(chan, BT_CONFIG);
1678 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1679 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1680 				}
1681 			} else {
1682 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1683 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1684 			}
1685 
1686 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1687 				       sizeof(rsp), &rsp);
1688 
1689 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1690 			    rsp.result != L2CAP_CR_SUCCESS) {
1691 				l2cap_chan_unlock(chan);
1692 				continue;
1693 			}
1694 
1695 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1696 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1697 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1698 			chan->num_conf_req++;
1699 		}
1700 
1701 		l2cap_chan_unlock(chan);
1702 	}
1703 
1704 	mutex_unlock(&conn->chan_lock);
1705 }
1706 
l2cap_le_conn_ready(struct l2cap_conn * conn)1707 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1708 {
1709 	struct hci_conn *hcon = conn->hcon;
1710 	struct hci_dev *hdev = hcon->hdev;
1711 
1712 	BT_DBG("%s conn %p", hdev->name, conn);
1713 
1714 	/* For outgoing pairing which doesn't necessarily have an
1715 	 * associated socket (e.g. mgmt_pair_device).
1716 	 */
1717 	if (hcon->out)
1718 		smp_conn_security(hcon, hcon->pending_sec_level);
1719 
1720 	/* For LE peripheral connections, make sure the connection interval
1721 	 * is in the range of the minimum and maximum interval that has
1722 	 * been configured for this connection. If not, then trigger
1723 	 * the connection update procedure.
1724 	 */
1725 	if (hcon->role == HCI_ROLE_SLAVE &&
1726 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1727 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1728 		struct l2cap_conn_param_update_req req;
1729 
1730 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1731 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1732 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1733 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1734 
1735 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1736 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1737 	}
1738 }
1739 
l2cap_conn_ready(struct l2cap_conn * conn)1740 static void l2cap_conn_ready(struct l2cap_conn *conn)
1741 {
1742 	struct l2cap_chan *chan;
1743 	struct hci_conn *hcon = conn->hcon;
1744 
1745 	BT_DBG("conn %p", conn);
1746 
1747 	if (hcon->type == ACL_LINK)
1748 		l2cap_request_info(conn);
1749 
1750 	mutex_lock(&conn->chan_lock);
1751 
1752 	list_for_each_entry(chan, &conn->chan_l, list) {
1753 
1754 		l2cap_chan_lock(chan);
1755 
1756 		if (chan->scid == L2CAP_CID_A2MP) {
1757 			l2cap_chan_unlock(chan);
1758 			continue;
1759 		}
1760 
1761 		if (hcon->type == LE_LINK) {
1762 			l2cap_le_start(chan);
1763 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1764 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1765 				l2cap_chan_ready(chan);
1766 		} else if (chan->state == BT_CONNECT) {
1767 			l2cap_do_start(chan);
1768 		}
1769 
1770 		l2cap_chan_unlock(chan);
1771 	}
1772 
1773 	mutex_unlock(&conn->chan_lock);
1774 
1775 	if (hcon->type == LE_LINK)
1776 		l2cap_le_conn_ready(conn);
1777 
1778 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1779 }
1780 
1781 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1782 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1783 {
1784 	struct l2cap_chan *chan;
1785 
1786 	BT_DBG("conn %p", conn);
1787 
1788 	mutex_lock(&conn->chan_lock);
1789 
1790 	list_for_each_entry(chan, &conn->chan_l, list) {
1791 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1792 			l2cap_chan_set_err(chan, err);
1793 	}
1794 
1795 	mutex_unlock(&conn->chan_lock);
1796 }
1797 
l2cap_info_timeout(struct work_struct * work)1798 static void l2cap_info_timeout(struct work_struct *work)
1799 {
1800 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1801 					       info_timer.work);
1802 
1803 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1804 	conn->info_ident = 0;
1805 
1806 	l2cap_conn_start(conn);
1807 }
1808 
1809 /*
1810  * l2cap_user
1811  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1812  * callback is called during registration. The ->remove callback is called
1813  * during unregistration.
1814  * An l2cap_user object can either be explicitly unregistered or when the
1815  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1816  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1817  * External modules must own a reference to the l2cap_conn object if they intend
1818  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1819  * any time if they don't.
1820  */
1821 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1822 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1823 {
1824 	struct hci_dev *hdev = conn->hcon->hdev;
1825 	int ret;
1826 
1827 	/* We need to check whether l2cap_conn is registered. If it is not, we
1828 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1829 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1830 	 * relies on the parent hci_conn object to be locked. This itself relies
1831 	 * on the hci_dev object to be locked. So we must lock the hci device
1832 	 * here, too. */
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (!list_empty(&user->list)) {
1837 		ret = -EINVAL;
1838 		goto out_unlock;
1839 	}
1840 
1841 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1842 	if (!conn->hchan) {
1843 		ret = -ENODEV;
1844 		goto out_unlock;
1845 	}
1846 
1847 	ret = user->probe(conn, user);
1848 	if (ret)
1849 		goto out_unlock;
1850 
1851 	list_add(&user->list, &conn->users);
1852 	ret = 0;
1853 
1854 out_unlock:
1855 	hci_dev_unlock(hdev);
1856 	return ret;
1857 }
1858 EXPORT_SYMBOL(l2cap_register_user);
1859 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1860 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1861 {
1862 	struct hci_dev *hdev = conn->hcon->hdev;
1863 
1864 	hci_dev_lock(hdev);
1865 
1866 	if (list_empty(&user->list))
1867 		goto out_unlock;
1868 
1869 	list_del_init(&user->list);
1870 	user->remove(conn, user);
1871 
1872 out_unlock:
1873 	hci_dev_unlock(hdev);
1874 }
1875 EXPORT_SYMBOL(l2cap_unregister_user);
1876 
l2cap_unregister_all_users(struct l2cap_conn * conn)1877 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1878 {
1879 	struct l2cap_user *user;
1880 
1881 	while (!list_empty(&conn->users)) {
1882 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1883 		list_del_init(&user->list);
1884 		user->remove(conn, user);
1885 	}
1886 }
1887 
l2cap_conn_del(struct hci_conn * hcon,int err)1888 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1889 {
1890 	struct l2cap_conn *conn = hcon->l2cap_data;
1891 	struct l2cap_chan *chan, *l;
1892 
1893 	if (!conn)
1894 		return;
1895 
1896 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1897 
1898 	kfree_skb(conn->rx_skb);
1899 
1900 	skb_queue_purge(&conn->pending_rx);
1901 
1902 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1903 	 * might block if we are running on a worker from the same workqueue
1904 	 * pending_rx_work is waiting on.
1905 	 */
1906 	if (work_pending(&conn->pending_rx_work))
1907 		cancel_work_sync(&conn->pending_rx_work);
1908 
1909 	if (work_pending(&conn->id_addr_update_work))
1910 		cancel_work_sync(&conn->id_addr_update_work);
1911 
1912 	l2cap_unregister_all_users(conn);
1913 
1914 	/* Force the connection to be immediately dropped */
1915 	hcon->disc_timeout = 0;
1916 
1917 	mutex_lock(&conn->chan_lock);
1918 
1919 	/* Kill channels */
1920 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1921 		l2cap_chan_hold(chan);
1922 		l2cap_chan_lock(chan);
1923 
1924 		l2cap_chan_del(chan, err);
1925 
1926 		chan->ops->close(chan);
1927 
1928 		l2cap_chan_unlock(chan);
1929 		l2cap_chan_put(chan);
1930 	}
1931 
1932 	mutex_unlock(&conn->chan_lock);
1933 
1934 	hci_chan_del(conn->hchan);
1935 
1936 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1937 		cancel_delayed_work_sync(&conn->info_timer);
1938 
1939 	hcon->l2cap_data = NULL;
1940 	conn->hchan = NULL;
1941 	l2cap_conn_put(conn);
1942 }
1943 
l2cap_conn_free(struct kref * ref)1944 static void l2cap_conn_free(struct kref *ref)
1945 {
1946 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1947 
1948 	hci_conn_put(conn->hcon);
1949 	kfree(conn);
1950 }
1951 
l2cap_conn_get(struct l2cap_conn * conn)1952 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1953 {
1954 	kref_get(&conn->ref);
1955 	return conn;
1956 }
1957 EXPORT_SYMBOL(l2cap_conn_get);
1958 
l2cap_conn_put(struct l2cap_conn * conn)1959 void l2cap_conn_put(struct l2cap_conn *conn)
1960 {
1961 	kref_put(&conn->ref, l2cap_conn_free);
1962 }
1963 EXPORT_SYMBOL(l2cap_conn_put);
1964 
1965 /* ---- Socket interface ---- */
1966 
1967 /* Find socket with psm and source / destination bdaddr.
1968  * Returns closest match.
1969  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1970 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1971 						   bdaddr_t *src,
1972 						   bdaddr_t *dst,
1973 						   u8 link_type)
1974 {
1975 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1976 
1977 	read_lock(&chan_list_lock);
1978 
1979 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1980 		if (state && c->state != state)
1981 			continue;
1982 
1983 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1987 			continue;
1988 
1989 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1990 			int src_match, dst_match;
1991 			int src_any, dst_any;
1992 
1993 			/* Exact match. */
1994 			src_match = !bacmp(&c->src, src);
1995 			dst_match = !bacmp(&c->dst, dst);
1996 			if (src_match && dst_match) {
1997 				if (!l2cap_chan_hold_unless_zero(c))
1998 					continue;
1999 
2000 				read_unlock(&chan_list_lock);
2001 				return c;
2002 			}
2003 
2004 			/* Closest match */
2005 			src_any = !bacmp(&c->src, BDADDR_ANY);
2006 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2007 			if ((src_match && dst_any) || (src_any && dst_match) ||
2008 			    (src_any && dst_any))
2009 				c1 = c;
2010 		}
2011 	}
2012 
2013 	if (c1)
2014 		c1 = l2cap_chan_hold_unless_zero(c1);
2015 
2016 	read_unlock(&chan_list_lock);
2017 
2018 	return c1;
2019 }
2020 
l2cap_monitor_timeout(struct work_struct * work)2021 static void l2cap_monitor_timeout(struct work_struct *work)
2022 {
2023 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2024 					       monitor_timer.work);
2025 
2026 	BT_DBG("chan %p", chan);
2027 
2028 	l2cap_chan_lock(chan);
2029 
2030 	if (!chan->conn) {
2031 		l2cap_chan_unlock(chan);
2032 		l2cap_chan_put(chan);
2033 		return;
2034 	}
2035 
2036 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2037 
2038 	l2cap_chan_unlock(chan);
2039 	l2cap_chan_put(chan);
2040 }
2041 
l2cap_retrans_timeout(struct work_struct * work)2042 static void l2cap_retrans_timeout(struct work_struct *work)
2043 {
2044 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2045 					       retrans_timer.work);
2046 
2047 	BT_DBG("chan %p", chan);
2048 
2049 	l2cap_chan_lock(chan);
2050 
2051 	if (!chan->conn) {
2052 		l2cap_chan_unlock(chan);
2053 		l2cap_chan_put(chan);
2054 		return;
2055 	}
2056 
2057 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2058 	l2cap_chan_unlock(chan);
2059 	l2cap_chan_put(chan);
2060 }
2061 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2062 static void l2cap_streaming_send(struct l2cap_chan *chan,
2063 				 struct sk_buff_head *skbs)
2064 {
2065 	struct sk_buff *skb;
2066 	struct l2cap_ctrl *control;
2067 
2068 	BT_DBG("chan %p, skbs %p", chan, skbs);
2069 
2070 	if (__chan_is_moving(chan))
2071 		return;
2072 
2073 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2074 
2075 	while (!skb_queue_empty(&chan->tx_q)) {
2076 
2077 		skb = skb_dequeue(&chan->tx_q);
2078 
2079 		bt_cb(skb)->l2cap.retries = 1;
2080 		control = &bt_cb(skb)->l2cap;
2081 
2082 		control->reqseq = 0;
2083 		control->txseq = chan->next_tx_seq;
2084 
2085 		__pack_control(chan, control, skb);
2086 
2087 		if (chan->fcs == L2CAP_FCS_CRC16) {
2088 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2089 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2090 		}
2091 
2092 		l2cap_do_send(chan, skb);
2093 
2094 		BT_DBG("Sent txseq %u", control->txseq);
2095 
2096 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2097 		chan->frames_sent++;
2098 	}
2099 }
2100 
l2cap_ertm_send(struct l2cap_chan * chan)2101 static int l2cap_ertm_send(struct l2cap_chan *chan)
2102 {
2103 	struct sk_buff *skb, *tx_skb;
2104 	struct l2cap_ctrl *control;
2105 	int sent = 0;
2106 
2107 	BT_DBG("chan %p", chan);
2108 
2109 	if (chan->state != BT_CONNECTED)
2110 		return -ENOTCONN;
2111 
2112 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2113 		return 0;
2114 
2115 	if (__chan_is_moving(chan))
2116 		return 0;
2117 
2118 	while (chan->tx_send_head &&
2119 	       chan->unacked_frames < chan->remote_tx_win &&
2120 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2121 
2122 		skb = chan->tx_send_head;
2123 
2124 		bt_cb(skb)->l2cap.retries = 1;
2125 		control = &bt_cb(skb)->l2cap;
2126 
2127 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2128 			control->final = 1;
2129 
2130 		control->reqseq = chan->buffer_seq;
2131 		chan->last_acked_seq = chan->buffer_seq;
2132 		control->txseq = chan->next_tx_seq;
2133 
2134 		__pack_control(chan, control, skb);
2135 
2136 		if (chan->fcs == L2CAP_FCS_CRC16) {
2137 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2138 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2139 		}
2140 
2141 		/* Clone after data has been modified. Data is assumed to be
2142 		   read-only (for locking purposes) on cloned sk_buffs.
2143 		 */
2144 		tx_skb = skb_clone(skb, GFP_KERNEL);
2145 
2146 		if (!tx_skb)
2147 			break;
2148 
2149 		__set_retrans_timer(chan);
2150 
2151 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2152 		chan->unacked_frames++;
2153 		chan->frames_sent++;
2154 		sent++;
2155 
2156 		if (skb_queue_is_last(&chan->tx_q, skb))
2157 			chan->tx_send_head = NULL;
2158 		else
2159 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2160 
2161 		l2cap_do_send(chan, tx_skb);
2162 		BT_DBG("Sent txseq %u", control->txseq);
2163 	}
2164 
2165 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2166 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2167 
2168 	return sent;
2169 }
2170 
l2cap_ertm_resend(struct l2cap_chan * chan)2171 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2172 {
2173 	struct l2cap_ctrl control;
2174 	struct sk_buff *skb;
2175 	struct sk_buff *tx_skb;
2176 	u16 seq;
2177 
2178 	BT_DBG("chan %p", chan);
2179 
2180 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2181 		return;
2182 
2183 	if (__chan_is_moving(chan))
2184 		return;
2185 
2186 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2187 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2188 
2189 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2190 		if (!skb) {
2191 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2192 			       seq);
2193 			continue;
2194 		}
2195 
2196 		bt_cb(skb)->l2cap.retries++;
2197 		control = bt_cb(skb)->l2cap;
2198 
2199 		if (chan->max_tx != 0 &&
2200 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2201 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2202 			l2cap_send_disconn_req(chan, ECONNRESET);
2203 			l2cap_seq_list_clear(&chan->retrans_list);
2204 			break;
2205 		}
2206 
2207 		control.reqseq = chan->buffer_seq;
2208 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2209 			control.final = 1;
2210 		else
2211 			control.final = 0;
2212 
2213 		if (skb_cloned(skb)) {
2214 			/* Cloned sk_buffs are read-only, so we need a
2215 			 * writeable copy
2216 			 */
2217 			tx_skb = skb_copy(skb, GFP_KERNEL);
2218 		} else {
2219 			tx_skb = skb_clone(skb, GFP_KERNEL);
2220 		}
2221 
2222 		if (!tx_skb) {
2223 			l2cap_seq_list_clear(&chan->retrans_list);
2224 			break;
2225 		}
2226 
2227 		/* Update skb contents */
2228 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2229 			put_unaligned_le32(__pack_extended_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		} else {
2232 			put_unaligned_le16(__pack_enhanced_control(&control),
2233 					   tx_skb->data + L2CAP_HDR_SIZE);
2234 		}
2235 
2236 		/* Update FCS */
2237 		if (chan->fcs == L2CAP_FCS_CRC16) {
2238 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2239 					tx_skb->len - L2CAP_FCS_SIZE);
2240 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2241 						L2CAP_FCS_SIZE);
2242 		}
2243 
2244 		l2cap_do_send(chan, tx_skb);
2245 
2246 		BT_DBG("Resent txseq %d", control.txseq);
2247 
2248 		chan->last_acked_seq = chan->buffer_seq;
2249 	}
2250 }
2251 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2252 static void l2cap_retransmit(struct l2cap_chan *chan,
2253 			     struct l2cap_ctrl *control)
2254 {
2255 	BT_DBG("chan %p, control %p", chan, control);
2256 
2257 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2258 	l2cap_ertm_resend(chan);
2259 }
2260 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2261 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2262 				 struct l2cap_ctrl *control)
2263 {
2264 	struct sk_buff *skb;
2265 
2266 	BT_DBG("chan %p, control %p", chan, control);
2267 
2268 	if (control->poll)
2269 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2270 
2271 	l2cap_seq_list_clear(&chan->retrans_list);
2272 
2273 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2274 		return;
2275 
2276 	if (chan->unacked_frames) {
2277 		skb_queue_walk(&chan->tx_q, skb) {
2278 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2279 			    skb == chan->tx_send_head)
2280 				break;
2281 		}
2282 
2283 		skb_queue_walk_from(&chan->tx_q, skb) {
2284 			if (skb == chan->tx_send_head)
2285 				break;
2286 
2287 			l2cap_seq_list_append(&chan->retrans_list,
2288 					      bt_cb(skb)->l2cap.txseq);
2289 		}
2290 
2291 		l2cap_ertm_resend(chan);
2292 	}
2293 }
2294 
l2cap_send_ack(struct l2cap_chan * chan)2295 static void l2cap_send_ack(struct l2cap_chan *chan)
2296 {
2297 	struct l2cap_ctrl control;
2298 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2299 					 chan->last_acked_seq);
2300 	int threshold;
2301 
2302 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2303 	       chan, chan->last_acked_seq, chan->buffer_seq);
2304 
2305 	memset(&control, 0, sizeof(control));
2306 	control.sframe = 1;
2307 
2308 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2309 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2310 		__clear_ack_timer(chan);
2311 		control.super = L2CAP_SUPER_RNR;
2312 		control.reqseq = chan->buffer_seq;
2313 		l2cap_send_sframe(chan, &control);
2314 	} else {
2315 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2316 			l2cap_ertm_send(chan);
2317 			/* If any i-frames were sent, they included an ack */
2318 			if (chan->buffer_seq == chan->last_acked_seq)
2319 				frames_to_ack = 0;
2320 		}
2321 
2322 		/* Ack now if the window is 3/4ths full.
2323 		 * Calculate without mul or div
2324 		 */
2325 		threshold = chan->ack_win;
2326 		threshold += threshold << 1;
2327 		threshold >>= 2;
2328 
2329 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2330 		       threshold);
2331 
2332 		if (frames_to_ack >= threshold) {
2333 			__clear_ack_timer(chan);
2334 			control.super = L2CAP_SUPER_RR;
2335 			control.reqseq = chan->buffer_seq;
2336 			l2cap_send_sframe(chan, &control);
2337 			frames_to_ack = 0;
2338 		}
2339 
2340 		if (frames_to_ack)
2341 			__set_ack_timer(chan);
2342 	}
2343 }
2344 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2345 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2346 					 struct msghdr *msg, int len,
2347 					 int count, struct sk_buff *skb)
2348 {
2349 	struct l2cap_conn *conn = chan->conn;
2350 	struct sk_buff **frag;
2351 	int sent = 0;
2352 
2353 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2354 		return -EFAULT;
2355 
2356 	sent += count;
2357 	len  -= count;
2358 
2359 	/* Continuation fragments (no L2CAP header) */
2360 	frag = &skb_shinfo(skb)->frag_list;
2361 	while (len) {
2362 		struct sk_buff *tmp;
2363 
2364 		count = min_t(unsigned int, conn->mtu, len);
2365 
2366 		tmp = chan->ops->alloc_skb(chan, 0, count,
2367 					   msg->msg_flags & MSG_DONTWAIT);
2368 		if (IS_ERR(tmp))
2369 			return PTR_ERR(tmp);
2370 
2371 		*frag = tmp;
2372 
2373 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2374 				   &msg->msg_iter))
2375 			return -EFAULT;
2376 
2377 		sent += count;
2378 		len  -= count;
2379 
2380 		skb->len += (*frag)->len;
2381 		skb->data_len += (*frag)->len;
2382 
2383 		frag = &(*frag)->next;
2384 	}
2385 
2386 	return sent;
2387 }
2388 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2389 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2390 						 struct msghdr *msg, size_t len)
2391 {
2392 	struct l2cap_conn *conn = chan->conn;
2393 	struct sk_buff *skb;
2394 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2395 	struct l2cap_hdr *lh;
2396 
2397 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2398 	       __le16_to_cpu(chan->psm), len);
2399 
2400 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2401 
2402 	skb = chan->ops->alloc_skb(chan, hlen, count,
2403 				   msg->msg_flags & MSG_DONTWAIT);
2404 	if (IS_ERR(skb))
2405 		return skb;
2406 
2407 	/* Create L2CAP header */
2408 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 	lh->cid = cpu_to_le16(chan->dcid);
2410 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2411 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2421 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2422 					      struct msghdr *msg, size_t len)
2423 {
2424 	struct l2cap_conn *conn = chan->conn;
2425 	struct sk_buff *skb;
2426 	int err, count;
2427 	struct l2cap_hdr *lh;
2428 
2429 	BT_DBG("chan %p len %zu", chan, len);
2430 
2431 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2432 
2433 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2434 				   msg->msg_flags & MSG_DONTWAIT);
2435 	if (IS_ERR(skb))
2436 		return skb;
2437 
2438 	/* Create L2CAP header */
2439 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2440 	lh->cid = cpu_to_le16(chan->dcid);
2441 	lh->len = cpu_to_le16(len);
2442 
2443 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 	if (unlikely(err < 0)) {
2445 		kfree_skb(skb);
2446 		return ERR_PTR(err);
2447 	}
2448 	return skb;
2449 }
2450 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2451 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2452 					       struct msghdr *msg, size_t len,
2453 					       u16 sdulen)
2454 {
2455 	struct l2cap_conn *conn = chan->conn;
2456 	struct sk_buff *skb;
2457 	int err, count, hlen;
2458 	struct l2cap_hdr *lh;
2459 
2460 	BT_DBG("chan %p len %zu", chan, len);
2461 
2462 	if (!conn)
2463 		return ERR_PTR(-ENOTCONN);
2464 
2465 	hlen = __ertm_hdr_size(chan);
2466 
2467 	if (sdulen)
2468 		hlen += L2CAP_SDULEN_SIZE;
2469 
2470 	if (chan->fcs == L2CAP_FCS_CRC16)
2471 		hlen += L2CAP_FCS_SIZE;
2472 
2473 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2474 
2475 	skb = chan->ops->alloc_skb(chan, hlen, count,
2476 				   msg->msg_flags & MSG_DONTWAIT);
2477 	if (IS_ERR(skb))
2478 		return skb;
2479 
2480 	/* Create L2CAP header */
2481 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2482 	lh->cid = cpu_to_le16(chan->dcid);
2483 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2484 
2485 	/* Control header is populated later */
2486 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2487 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2488 	else
2489 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2490 
2491 	if (sdulen)
2492 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2493 
2494 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2495 	if (unlikely(err < 0)) {
2496 		kfree_skb(skb);
2497 		return ERR_PTR(err);
2498 	}
2499 
2500 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2501 	bt_cb(skb)->l2cap.retries = 0;
2502 	return skb;
2503 }
2504 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2505 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2506 			     struct sk_buff_head *seg_queue,
2507 			     struct msghdr *msg, size_t len)
2508 {
2509 	struct sk_buff *skb;
2510 	u16 sdu_len;
2511 	size_t pdu_len;
2512 	u8 sar;
2513 
2514 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2515 
2516 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2517 	 * so fragmented skbs are not used.  The HCI layer's handling
2518 	 * of fragmented skbs is not compatible with ERTM's queueing.
2519 	 */
2520 
2521 	/* PDU size is derived from the HCI MTU */
2522 	pdu_len = chan->conn->mtu;
2523 
2524 	/* Constrain PDU size for BR/EDR connections */
2525 	if (!chan->hs_hcon)
2526 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2527 
2528 	/* Adjust for largest possible L2CAP overhead. */
2529 	if (chan->fcs)
2530 		pdu_len -= L2CAP_FCS_SIZE;
2531 
2532 	pdu_len -= __ertm_hdr_size(chan);
2533 
2534 	/* Remote device may have requested smaller PDUs */
2535 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2536 
2537 	if (len <= pdu_len) {
2538 		sar = L2CAP_SAR_UNSEGMENTED;
2539 		sdu_len = 0;
2540 		pdu_len = len;
2541 	} else {
2542 		sar = L2CAP_SAR_START;
2543 		sdu_len = len;
2544 	}
2545 
2546 	while (len > 0) {
2547 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2548 
2549 		if (IS_ERR(skb)) {
2550 			__skb_queue_purge(seg_queue);
2551 			return PTR_ERR(skb);
2552 		}
2553 
2554 		bt_cb(skb)->l2cap.sar = sar;
2555 		__skb_queue_tail(seg_queue, skb);
2556 
2557 		len -= pdu_len;
2558 		if (sdu_len)
2559 			sdu_len = 0;
2560 
2561 		if (len <= pdu_len) {
2562 			sar = L2CAP_SAR_END;
2563 			pdu_len = len;
2564 		} else {
2565 			sar = L2CAP_SAR_CONTINUE;
2566 		}
2567 	}
2568 
2569 	return 0;
2570 }
2571 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2573 						   struct msghdr *msg,
2574 						   size_t len, u16 sdulen)
2575 {
2576 	struct l2cap_conn *conn = chan->conn;
2577 	struct sk_buff *skb;
2578 	int err, count, hlen;
2579 	struct l2cap_hdr *lh;
2580 
2581 	BT_DBG("chan %p len %zu", chan, len);
2582 
2583 	if (!conn)
2584 		return ERR_PTR(-ENOTCONN);
2585 
2586 	hlen = L2CAP_HDR_SIZE;
2587 
2588 	if (sdulen)
2589 		hlen += L2CAP_SDULEN_SIZE;
2590 
2591 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2592 
2593 	skb = chan->ops->alloc_skb(chan, hlen, count,
2594 				   msg->msg_flags & MSG_DONTWAIT);
2595 	if (IS_ERR(skb))
2596 		return skb;
2597 
2598 	/* Create L2CAP header */
2599 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2600 	lh->cid = cpu_to_le16(chan->dcid);
2601 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602 
2603 	if (sdulen)
2604 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2605 
2606 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 	if (unlikely(err < 0)) {
2608 		kfree_skb(skb);
2609 		return ERR_PTR(err);
2610 	}
2611 
2612 	return skb;
2613 }
2614 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 				struct sk_buff_head *seg_queue,
2617 				struct msghdr *msg, size_t len)
2618 {
2619 	struct sk_buff *skb;
2620 	size_t pdu_len;
2621 	u16 sdu_len;
2622 
2623 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624 
2625 	sdu_len = len;
2626 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2627 
2628 	while (len > 0) {
2629 		if (len <= pdu_len)
2630 			pdu_len = len;
2631 
2632 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2633 		if (IS_ERR(skb)) {
2634 			__skb_queue_purge(seg_queue);
2635 			return PTR_ERR(skb);
2636 		}
2637 
2638 		__skb_queue_tail(seg_queue, skb);
2639 
2640 		len -= pdu_len;
2641 
2642 		if (sdu_len) {
2643 			sdu_len = 0;
2644 			pdu_len += L2CAP_SDULEN_SIZE;
2645 		}
2646 	}
2647 
2648 	return 0;
2649 }
2650 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2651 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2652 {
2653 	int sent = 0;
2654 
2655 	BT_DBG("chan %p", chan);
2656 
2657 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2658 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2659 		chan->tx_credits--;
2660 		sent++;
2661 	}
2662 
2663 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2664 	       skb_queue_len(&chan->tx_q));
2665 }
2666 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2667 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2668 {
2669 	struct sk_buff *skb;
2670 	int err;
2671 	struct sk_buff_head seg_queue;
2672 
2673 	if (!chan->conn)
2674 		return -ENOTCONN;
2675 
2676 	/* Connectionless channel */
2677 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2678 		skb = l2cap_create_connless_pdu(chan, msg, len);
2679 		if (IS_ERR(skb))
2680 			return PTR_ERR(skb);
2681 
2682 		l2cap_do_send(chan, skb);
2683 		return len;
2684 	}
2685 
2686 	switch (chan->mode) {
2687 	case L2CAP_MODE_LE_FLOWCTL:
2688 	case L2CAP_MODE_EXT_FLOWCTL:
2689 		/* Check outgoing MTU */
2690 		if (len > chan->omtu)
2691 			return -EMSGSIZE;
2692 
2693 		__skb_queue_head_init(&seg_queue);
2694 
2695 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2696 
2697 		if (chan->state != BT_CONNECTED) {
2698 			__skb_queue_purge(&seg_queue);
2699 			err = -ENOTCONN;
2700 		}
2701 
2702 		if (err)
2703 			return err;
2704 
2705 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2706 
2707 		l2cap_le_flowctl_send(chan);
2708 
2709 		if (!chan->tx_credits)
2710 			chan->ops->suspend(chan);
2711 
2712 		err = len;
2713 
2714 		break;
2715 
2716 	case L2CAP_MODE_BASIC:
2717 		/* Check outgoing MTU */
2718 		if (len > chan->omtu)
2719 			return -EMSGSIZE;
2720 
2721 		/* Create a basic PDU */
2722 		skb = l2cap_create_basic_pdu(chan, msg, len);
2723 		if (IS_ERR(skb))
2724 			return PTR_ERR(skb);
2725 
2726 		l2cap_do_send(chan, skb);
2727 		err = len;
2728 		break;
2729 
2730 	case L2CAP_MODE_ERTM:
2731 	case L2CAP_MODE_STREAMING:
2732 		/* Check outgoing MTU */
2733 		if (len > chan->omtu) {
2734 			err = -EMSGSIZE;
2735 			break;
2736 		}
2737 
2738 		__skb_queue_head_init(&seg_queue);
2739 
2740 		/* Do segmentation before calling in to the state machine,
2741 		 * since it's possible to block while waiting for memory
2742 		 * allocation.
2743 		 */
2744 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2745 
2746 		if (err)
2747 			break;
2748 
2749 		if (chan->mode == L2CAP_MODE_ERTM)
2750 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2751 		else
2752 			l2cap_streaming_send(chan, &seg_queue);
2753 
2754 		err = len;
2755 
2756 		/* If the skbs were not queued for sending, they'll still be in
2757 		 * seg_queue and need to be purged.
2758 		 */
2759 		__skb_queue_purge(&seg_queue);
2760 		break;
2761 
2762 	default:
2763 		BT_DBG("bad state %1.1x", chan->mode);
2764 		err = -EBADFD;
2765 	}
2766 
2767 	return err;
2768 }
2769 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2770 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2771 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2772 {
2773 	struct l2cap_ctrl control;
2774 	u16 seq;
2775 
2776 	BT_DBG("chan %p, txseq %u", chan, txseq);
2777 
2778 	memset(&control, 0, sizeof(control));
2779 	control.sframe = 1;
2780 	control.super = L2CAP_SUPER_SREJ;
2781 
2782 	for (seq = chan->expected_tx_seq; seq != txseq;
2783 	     seq = __next_seq(chan, seq)) {
2784 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2785 			control.reqseq = seq;
2786 			l2cap_send_sframe(chan, &control);
2787 			l2cap_seq_list_append(&chan->srej_list, seq);
2788 		}
2789 	}
2790 
2791 	chan->expected_tx_seq = __next_seq(chan, txseq);
2792 }
2793 
l2cap_send_srej_tail(struct l2cap_chan * chan)2794 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2795 {
2796 	struct l2cap_ctrl control;
2797 
2798 	BT_DBG("chan %p", chan);
2799 
2800 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2801 		return;
2802 
2803 	memset(&control, 0, sizeof(control));
2804 	control.sframe = 1;
2805 	control.super = L2CAP_SUPER_SREJ;
2806 	control.reqseq = chan->srej_list.tail;
2807 	l2cap_send_sframe(chan, &control);
2808 }
2809 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2810 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2811 {
2812 	struct l2cap_ctrl control;
2813 	u16 initial_head;
2814 	u16 seq;
2815 
2816 	BT_DBG("chan %p, txseq %u", chan, txseq);
2817 
2818 	memset(&control, 0, sizeof(control));
2819 	control.sframe = 1;
2820 	control.super = L2CAP_SUPER_SREJ;
2821 
2822 	/* Capture initial list head to allow only one pass through the list. */
2823 	initial_head = chan->srej_list.head;
2824 
2825 	do {
2826 		seq = l2cap_seq_list_pop(&chan->srej_list);
2827 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2828 			break;
2829 
2830 		control.reqseq = seq;
2831 		l2cap_send_sframe(chan, &control);
2832 		l2cap_seq_list_append(&chan->srej_list, seq);
2833 	} while (chan->srej_list.head != initial_head);
2834 }
2835 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2836 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2837 {
2838 	struct sk_buff *acked_skb;
2839 	u16 ackseq;
2840 
2841 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2842 
2843 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2844 		return;
2845 
2846 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2847 	       chan->expected_ack_seq, chan->unacked_frames);
2848 
2849 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2850 	     ackseq = __next_seq(chan, ackseq)) {
2851 
2852 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2853 		if (acked_skb) {
2854 			skb_unlink(acked_skb, &chan->tx_q);
2855 			kfree_skb(acked_skb);
2856 			chan->unacked_frames--;
2857 		}
2858 	}
2859 
2860 	chan->expected_ack_seq = reqseq;
2861 
2862 	if (chan->unacked_frames == 0)
2863 		__clear_retrans_timer(chan);
2864 
2865 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2866 }
2867 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2868 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2869 {
2870 	BT_DBG("chan %p", chan);
2871 
2872 	chan->expected_tx_seq = chan->buffer_seq;
2873 	l2cap_seq_list_clear(&chan->srej_list);
2874 	skb_queue_purge(&chan->srej_q);
2875 	chan->rx_state = L2CAP_RX_STATE_RECV;
2876 }
2877 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2878 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2879 				struct l2cap_ctrl *control,
2880 				struct sk_buff_head *skbs, u8 event)
2881 {
2882 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2883 	       event);
2884 
2885 	switch (event) {
2886 	case L2CAP_EV_DATA_REQUEST:
2887 		if (chan->tx_send_head == NULL)
2888 			chan->tx_send_head = skb_peek(skbs);
2889 
2890 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2891 		l2cap_ertm_send(chan);
2892 		break;
2893 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2894 		BT_DBG("Enter LOCAL_BUSY");
2895 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2896 
2897 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2898 			/* The SREJ_SENT state must be aborted if we are to
2899 			 * enter the LOCAL_BUSY state.
2900 			 */
2901 			l2cap_abort_rx_srej_sent(chan);
2902 		}
2903 
2904 		l2cap_send_ack(chan);
2905 
2906 		break;
2907 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2908 		BT_DBG("Exit LOCAL_BUSY");
2909 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2910 
2911 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2912 			struct l2cap_ctrl local_control;
2913 
2914 			memset(&local_control, 0, sizeof(local_control));
2915 			local_control.sframe = 1;
2916 			local_control.super = L2CAP_SUPER_RR;
2917 			local_control.poll = 1;
2918 			local_control.reqseq = chan->buffer_seq;
2919 			l2cap_send_sframe(chan, &local_control);
2920 
2921 			chan->retry_count = 1;
2922 			__set_monitor_timer(chan);
2923 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 		}
2925 		break;
2926 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2927 		l2cap_process_reqseq(chan, control->reqseq);
2928 		break;
2929 	case L2CAP_EV_EXPLICIT_POLL:
2930 		l2cap_send_rr_or_rnr(chan, 1);
2931 		chan->retry_count = 1;
2932 		__set_monitor_timer(chan);
2933 		__clear_ack_timer(chan);
2934 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2935 		break;
2936 	case L2CAP_EV_RETRANS_TO:
2937 		l2cap_send_rr_or_rnr(chan, 1);
2938 		chan->retry_count = 1;
2939 		__set_monitor_timer(chan);
2940 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2941 		break;
2942 	case L2CAP_EV_RECV_FBIT:
2943 		/* Nothing to process */
2944 		break;
2945 	default:
2946 		break;
2947 	}
2948 }
2949 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2950 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2951 				  struct l2cap_ctrl *control,
2952 				  struct sk_buff_head *skbs, u8 event)
2953 {
2954 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2955 	       event);
2956 
2957 	switch (event) {
2958 	case L2CAP_EV_DATA_REQUEST:
2959 		if (chan->tx_send_head == NULL)
2960 			chan->tx_send_head = skb_peek(skbs);
2961 		/* Queue data, but don't send. */
2962 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2963 		break;
2964 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2965 		BT_DBG("Enter LOCAL_BUSY");
2966 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2967 
2968 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2969 			/* The SREJ_SENT state must be aborted if we are to
2970 			 * enter the LOCAL_BUSY state.
2971 			 */
2972 			l2cap_abort_rx_srej_sent(chan);
2973 		}
2974 
2975 		l2cap_send_ack(chan);
2976 
2977 		break;
2978 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2979 		BT_DBG("Exit LOCAL_BUSY");
2980 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2981 
2982 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2983 			struct l2cap_ctrl local_control;
2984 			memset(&local_control, 0, sizeof(local_control));
2985 			local_control.sframe = 1;
2986 			local_control.super = L2CAP_SUPER_RR;
2987 			local_control.poll = 1;
2988 			local_control.reqseq = chan->buffer_seq;
2989 			l2cap_send_sframe(chan, &local_control);
2990 
2991 			chan->retry_count = 1;
2992 			__set_monitor_timer(chan);
2993 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2994 		}
2995 		break;
2996 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2997 		l2cap_process_reqseq(chan, control->reqseq);
2998 		fallthrough;
2999 
3000 	case L2CAP_EV_RECV_FBIT:
3001 		if (control && control->final) {
3002 			__clear_monitor_timer(chan);
3003 			if (chan->unacked_frames > 0)
3004 				__set_retrans_timer(chan);
3005 			chan->retry_count = 0;
3006 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3007 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3008 		}
3009 		break;
3010 	case L2CAP_EV_EXPLICIT_POLL:
3011 		/* Ignore */
3012 		break;
3013 	case L2CAP_EV_MONITOR_TO:
3014 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3015 			l2cap_send_rr_or_rnr(chan, 1);
3016 			__set_monitor_timer(chan);
3017 			chan->retry_count++;
3018 		} else {
3019 			l2cap_send_disconn_req(chan, ECONNABORTED);
3020 		}
3021 		break;
3022 	default:
3023 		break;
3024 	}
3025 }
3026 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3027 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3028 		     struct sk_buff_head *skbs, u8 event)
3029 {
3030 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3031 	       chan, control, skbs, event, chan->tx_state);
3032 
3033 	switch (chan->tx_state) {
3034 	case L2CAP_TX_STATE_XMIT:
3035 		l2cap_tx_state_xmit(chan, control, skbs, event);
3036 		break;
3037 	case L2CAP_TX_STATE_WAIT_F:
3038 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3039 		break;
3040 	default:
3041 		/* Ignore event */
3042 		break;
3043 	}
3044 }
3045 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3046 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3047 			     struct l2cap_ctrl *control)
3048 {
3049 	BT_DBG("chan %p, control %p", chan, control);
3050 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3051 }
3052 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3053 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3054 				  struct l2cap_ctrl *control)
3055 {
3056 	BT_DBG("chan %p, control %p", chan, control);
3057 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3058 }
3059 
3060 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3061 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3062 {
3063 	struct sk_buff *nskb;
3064 	struct l2cap_chan *chan;
3065 
3066 	BT_DBG("conn %p", conn);
3067 
3068 	mutex_lock(&conn->chan_lock);
3069 
3070 	list_for_each_entry(chan, &conn->chan_l, list) {
3071 		if (chan->chan_type != L2CAP_CHAN_RAW)
3072 			continue;
3073 
3074 		/* Don't send frame to the channel it came from */
3075 		if (bt_cb(skb)->l2cap.chan == chan)
3076 			continue;
3077 
3078 		nskb = skb_clone(skb, GFP_KERNEL);
3079 		if (!nskb)
3080 			continue;
3081 		if (chan->ops->recv(chan, nskb))
3082 			kfree_skb(nskb);
3083 	}
3084 
3085 	mutex_unlock(&conn->chan_lock);
3086 }
3087 
3088 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3089 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3090 				       u8 ident, u16 dlen, void *data)
3091 {
3092 	struct sk_buff *skb, **frag;
3093 	struct l2cap_cmd_hdr *cmd;
3094 	struct l2cap_hdr *lh;
3095 	int len, count;
3096 
3097 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3098 	       conn, code, ident, dlen);
3099 
3100 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3101 		return NULL;
3102 
3103 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3104 	count = min_t(unsigned int, conn->mtu, len);
3105 
3106 	skb = bt_skb_alloc(count, GFP_KERNEL);
3107 	if (!skb)
3108 		return NULL;
3109 
3110 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3111 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3112 
3113 	if (conn->hcon->type == LE_LINK)
3114 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3115 	else
3116 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3117 
3118 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3119 	cmd->code  = code;
3120 	cmd->ident = ident;
3121 	cmd->len   = cpu_to_le16(dlen);
3122 
3123 	if (dlen) {
3124 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3125 		skb_put_data(skb, data, count);
3126 		data += count;
3127 	}
3128 
3129 	len -= skb->len;
3130 
3131 	/* Continuation fragments (no L2CAP header) */
3132 	frag = &skb_shinfo(skb)->frag_list;
3133 	while (len) {
3134 		count = min_t(unsigned int, conn->mtu, len);
3135 
3136 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3137 		if (!*frag)
3138 			goto fail;
3139 
3140 		skb_put_data(*frag, data, count);
3141 
3142 		len  -= count;
3143 		data += count;
3144 
3145 		frag = &(*frag)->next;
3146 	}
3147 
3148 	return skb;
3149 
3150 fail:
3151 	kfree_skb(skb);
3152 	return NULL;
3153 }
3154 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3155 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3156 				     unsigned long *val)
3157 {
3158 	struct l2cap_conf_opt *opt = *ptr;
3159 	int len;
3160 
3161 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3162 	*ptr += len;
3163 
3164 	*type = opt->type;
3165 	*olen = opt->len;
3166 
3167 	switch (opt->len) {
3168 	case 1:
3169 		*val = *((u8 *) opt->val);
3170 		break;
3171 
3172 	case 2:
3173 		*val = get_unaligned_le16(opt->val);
3174 		break;
3175 
3176 	case 4:
3177 		*val = get_unaligned_le32(opt->val);
3178 		break;
3179 
3180 	default:
3181 		*val = (unsigned long) opt->val;
3182 		break;
3183 	}
3184 
3185 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3186 	return len;
3187 }
3188 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3189 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3190 {
3191 	struct l2cap_conf_opt *opt = *ptr;
3192 
3193 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3194 
3195 	if (size < L2CAP_CONF_OPT_SIZE + len)
3196 		return;
3197 
3198 	opt->type = type;
3199 	opt->len  = len;
3200 
3201 	switch (len) {
3202 	case 1:
3203 		*((u8 *) opt->val)  = val;
3204 		break;
3205 
3206 	case 2:
3207 		put_unaligned_le16(val, opt->val);
3208 		break;
3209 
3210 	case 4:
3211 		put_unaligned_le32(val, opt->val);
3212 		break;
3213 
3214 	default:
3215 		memcpy(opt->val, (void *) val, len);
3216 		break;
3217 	}
3218 
3219 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3220 }
3221 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3222 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3223 {
3224 	struct l2cap_conf_efs efs;
3225 
3226 	switch (chan->mode) {
3227 	case L2CAP_MODE_ERTM:
3228 		efs.id		= chan->local_id;
3229 		efs.stype	= chan->local_stype;
3230 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3231 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3232 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3233 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3234 		break;
3235 
3236 	case L2CAP_MODE_STREAMING:
3237 		efs.id		= 1;
3238 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3239 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3240 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3241 		efs.acc_lat	= 0;
3242 		efs.flush_to	= 0;
3243 		break;
3244 
3245 	default:
3246 		return;
3247 	}
3248 
3249 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3250 			   (unsigned long) &efs, size);
3251 }
3252 
l2cap_ack_timeout(struct work_struct * work)3253 static void l2cap_ack_timeout(struct work_struct *work)
3254 {
3255 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3256 					       ack_timer.work);
3257 	u16 frames_to_ack;
3258 
3259 	BT_DBG("chan %p", chan);
3260 
3261 	l2cap_chan_lock(chan);
3262 
3263 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3264 				     chan->last_acked_seq);
3265 
3266 	if (frames_to_ack)
3267 		l2cap_send_rr_or_rnr(chan, 0);
3268 
3269 	l2cap_chan_unlock(chan);
3270 	l2cap_chan_put(chan);
3271 }
3272 
l2cap_ertm_init(struct l2cap_chan * chan)3273 int l2cap_ertm_init(struct l2cap_chan *chan)
3274 {
3275 	int err;
3276 
3277 	chan->next_tx_seq = 0;
3278 	chan->expected_tx_seq = 0;
3279 	chan->expected_ack_seq = 0;
3280 	chan->unacked_frames = 0;
3281 	chan->buffer_seq = 0;
3282 	chan->frames_sent = 0;
3283 	chan->last_acked_seq = 0;
3284 	chan->sdu = NULL;
3285 	chan->sdu_last_frag = NULL;
3286 	chan->sdu_len = 0;
3287 
3288 	skb_queue_head_init(&chan->tx_q);
3289 
3290 	chan->local_amp_id = AMP_ID_BREDR;
3291 	chan->move_id = AMP_ID_BREDR;
3292 	chan->move_state = L2CAP_MOVE_STABLE;
3293 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3294 
3295 	if (chan->mode != L2CAP_MODE_ERTM)
3296 		return 0;
3297 
3298 	chan->rx_state = L2CAP_RX_STATE_RECV;
3299 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3300 
3301 	skb_queue_head_init(&chan->srej_q);
3302 
3303 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3304 	if (err < 0)
3305 		return err;
3306 
3307 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3308 	if (err < 0)
3309 		l2cap_seq_list_free(&chan->srej_list);
3310 
3311 	return err;
3312 }
3313 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3314 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3315 {
3316 	switch (mode) {
3317 	case L2CAP_MODE_STREAMING:
3318 	case L2CAP_MODE_ERTM:
3319 		if (l2cap_mode_supported(mode, remote_feat_mask))
3320 			return mode;
3321 		fallthrough;
3322 	default:
3323 		return L2CAP_MODE_BASIC;
3324 	}
3325 }
3326 
__l2cap_ews_supported(struct l2cap_conn * conn)3327 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3328 {
3329 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3330 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3331 }
3332 
__l2cap_efs_supported(struct l2cap_conn * conn)3333 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3334 {
3335 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3336 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3337 }
3338 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3339 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3340 				      struct l2cap_conf_rfc *rfc)
3341 {
3342 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3343 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3344 
3345 		/* Class 1 devices have must have ERTM timeouts
3346 		 * exceeding the Link Supervision Timeout.  The
3347 		 * default Link Supervision Timeout for AMP
3348 		 * controllers is 10 seconds.
3349 		 *
3350 		 * Class 1 devices use 0xffffffff for their
3351 		 * best-effort flush timeout, so the clamping logic
3352 		 * will result in a timeout that meets the above
3353 		 * requirement.  ERTM timeouts are 16-bit values, so
3354 		 * the maximum timeout is 65.535 seconds.
3355 		 */
3356 
3357 		/* Convert timeout to milliseconds and round */
3358 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3359 
3360 		/* This is the recommended formula for class 2 devices
3361 		 * that start ERTM timers when packets are sent to the
3362 		 * controller.
3363 		 */
3364 		ertm_to = 3 * ertm_to + 500;
3365 
3366 		if (ertm_to > 0xffff)
3367 			ertm_to = 0xffff;
3368 
3369 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3370 		rfc->monitor_timeout = rfc->retrans_timeout;
3371 	} else {
3372 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3373 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3374 	}
3375 }
3376 
l2cap_txwin_setup(struct l2cap_chan * chan)3377 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3378 {
3379 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3380 	    __l2cap_ews_supported(chan->conn)) {
3381 		/* use extended control field */
3382 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3383 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3384 	} else {
3385 		chan->tx_win = min_t(u16, chan->tx_win,
3386 				     L2CAP_DEFAULT_TX_WINDOW);
3387 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3388 	}
3389 	chan->ack_win = chan->tx_win;
3390 }
3391 
l2cap_mtu_auto(struct l2cap_chan * chan)3392 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3393 {
3394 	struct hci_conn *conn = chan->conn->hcon;
3395 
3396 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3397 
3398 	/* The 2-DH1 packet has between 2 and 56 information bytes
3399 	 * (including the 2-byte payload header)
3400 	 */
3401 	if (!(conn->pkt_type & HCI_2DH1))
3402 		chan->imtu = 54;
3403 
3404 	/* The 3-DH1 packet has between 2 and 85 information bytes
3405 	 * (including the 2-byte payload header)
3406 	 */
3407 	if (!(conn->pkt_type & HCI_3DH1))
3408 		chan->imtu = 83;
3409 
3410 	/* The 2-DH3 packet has between 2 and 369 information bytes
3411 	 * (including the 2-byte payload header)
3412 	 */
3413 	if (!(conn->pkt_type & HCI_2DH3))
3414 		chan->imtu = 367;
3415 
3416 	/* The 3-DH3 packet has between 2 and 554 information bytes
3417 	 * (including the 2-byte payload header)
3418 	 */
3419 	if (!(conn->pkt_type & HCI_3DH3))
3420 		chan->imtu = 552;
3421 
3422 	/* The 2-DH5 packet has between 2 and 681 information bytes
3423 	 * (including the 2-byte payload header)
3424 	 */
3425 	if (!(conn->pkt_type & HCI_2DH5))
3426 		chan->imtu = 679;
3427 
3428 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3429 	 * (including the 2-byte payload header)
3430 	 */
3431 	if (!(conn->pkt_type & HCI_3DH5))
3432 		chan->imtu = 1021;
3433 }
3434 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3435 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3436 {
3437 	struct l2cap_conf_req *req = data;
3438 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3439 	void *ptr = req->data;
3440 	void *endptr = data + data_size;
3441 	u16 size;
3442 
3443 	BT_DBG("chan %p", chan);
3444 
3445 	if (chan->num_conf_req || chan->num_conf_rsp)
3446 		goto done;
3447 
3448 	switch (chan->mode) {
3449 	case L2CAP_MODE_STREAMING:
3450 	case L2CAP_MODE_ERTM:
3451 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3452 			break;
3453 
3454 		if (__l2cap_efs_supported(chan->conn))
3455 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3456 
3457 		fallthrough;
3458 	default:
3459 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3460 		break;
3461 	}
3462 
3463 done:
3464 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3465 		if (!chan->imtu)
3466 			l2cap_mtu_auto(chan);
3467 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3468 				   endptr - ptr);
3469 	}
3470 
3471 	switch (chan->mode) {
3472 	case L2CAP_MODE_BASIC:
3473 		if (disable_ertm)
3474 			break;
3475 
3476 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3477 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3478 			break;
3479 
3480 		rfc.mode            = L2CAP_MODE_BASIC;
3481 		rfc.txwin_size      = 0;
3482 		rfc.max_transmit    = 0;
3483 		rfc.retrans_timeout = 0;
3484 		rfc.monitor_timeout = 0;
3485 		rfc.max_pdu_size    = 0;
3486 
3487 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3488 				   (unsigned long) &rfc, endptr - ptr);
3489 		break;
3490 
3491 	case L2CAP_MODE_ERTM:
3492 		rfc.mode            = L2CAP_MODE_ERTM;
3493 		rfc.max_transmit    = chan->max_tx;
3494 
3495 		__l2cap_set_ertm_timeouts(chan, &rfc);
3496 
3497 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3498 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3499 			     L2CAP_FCS_SIZE);
3500 		rfc.max_pdu_size = cpu_to_le16(size);
3501 
3502 		l2cap_txwin_setup(chan);
3503 
3504 		rfc.txwin_size = min_t(u16, chan->tx_win,
3505 				       L2CAP_DEFAULT_TX_WINDOW);
3506 
3507 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3508 				   (unsigned long) &rfc, endptr - ptr);
3509 
3510 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3511 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3512 
3513 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3514 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3515 					   chan->tx_win, endptr - ptr);
3516 
3517 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3518 			if (chan->fcs == L2CAP_FCS_NONE ||
3519 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3520 				chan->fcs = L2CAP_FCS_NONE;
3521 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3522 						   chan->fcs, endptr - ptr);
3523 			}
3524 		break;
3525 
3526 	case L2CAP_MODE_STREAMING:
3527 		l2cap_txwin_setup(chan);
3528 		rfc.mode            = L2CAP_MODE_STREAMING;
3529 		rfc.txwin_size      = 0;
3530 		rfc.max_transmit    = 0;
3531 		rfc.retrans_timeout = 0;
3532 		rfc.monitor_timeout = 0;
3533 
3534 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3535 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3536 			     L2CAP_FCS_SIZE);
3537 		rfc.max_pdu_size = cpu_to_le16(size);
3538 
3539 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3540 				   (unsigned long) &rfc, endptr - ptr);
3541 
3542 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3543 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 	}
3554 
3555 	req->dcid  = cpu_to_le16(chan->dcid);
3556 	req->flags = cpu_to_le16(0);
3557 
3558 	return ptr - data;
3559 }
3560 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3561 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3562 {
3563 	struct l2cap_conf_rsp *rsp = data;
3564 	void *ptr = rsp->data;
3565 	void *endptr = data + data_size;
3566 	void *req = chan->conf_req;
3567 	int len = chan->conf_len;
3568 	int type, hint, olen;
3569 	unsigned long val;
3570 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3571 	struct l2cap_conf_efs efs;
3572 	u8 remote_efs = 0;
3573 	u16 mtu = L2CAP_DEFAULT_MTU;
3574 	u16 result = L2CAP_CONF_SUCCESS;
3575 	u16 size;
3576 
3577 	BT_DBG("chan %p", chan);
3578 
3579 	while (len >= L2CAP_CONF_OPT_SIZE) {
3580 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3581 		if (len < 0)
3582 			break;
3583 
3584 		hint  = type & L2CAP_CONF_HINT;
3585 		type &= L2CAP_CONF_MASK;
3586 
3587 		switch (type) {
3588 		case L2CAP_CONF_MTU:
3589 			if (olen != 2)
3590 				break;
3591 			mtu = val;
3592 			break;
3593 
3594 		case L2CAP_CONF_FLUSH_TO:
3595 			if (olen != 2)
3596 				break;
3597 			chan->flush_to = val;
3598 			break;
3599 
3600 		case L2CAP_CONF_QOS:
3601 			break;
3602 
3603 		case L2CAP_CONF_RFC:
3604 			if (olen != sizeof(rfc))
3605 				break;
3606 			memcpy(&rfc, (void *) val, olen);
3607 			break;
3608 
3609 		case L2CAP_CONF_FCS:
3610 			if (olen != 1)
3611 				break;
3612 			if (val == L2CAP_FCS_NONE)
3613 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3614 			break;
3615 
3616 		case L2CAP_CONF_EFS:
3617 			if (olen != sizeof(efs))
3618 				break;
3619 			remote_efs = 1;
3620 			memcpy(&efs, (void *) val, olen);
3621 			break;
3622 
3623 		case L2CAP_CONF_EWS:
3624 			if (olen != 2)
3625 				break;
3626 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3627 				return -ECONNREFUSED;
3628 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3629 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3630 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3631 			chan->remote_tx_win = val;
3632 			break;
3633 
3634 		default:
3635 			if (hint)
3636 				break;
3637 			result = L2CAP_CONF_UNKNOWN;
3638 			*((u8 *) ptr++) = type;
3639 			break;
3640 		}
3641 	}
3642 
3643 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3644 		goto done;
3645 
3646 	switch (chan->mode) {
3647 	case L2CAP_MODE_STREAMING:
3648 	case L2CAP_MODE_ERTM:
3649 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3650 			chan->mode = l2cap_select_mode(rfc.mode,
3651 						       chan->conn->feat_mask);
3652 			break;
3653 		}
3654 
3655 		if (remote_efs) {
3656 			if (__l2cap_efs_supported(chan->conn))
3657 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3658 			else
3659 				return -ECONNREFUSED;
3660 		}
3661 
3662 		if (chan->mode != rfc.mode)
3663 			return -ECONNREFUSED;
3664 
3665 		break;
3666 	}
3667 
3668 done:
3669 	if (chan->mode != rfc.mode) {
3670 		result = L2CAP_CONF_UNACCEPT;
3671 		rfc.mode = chan->mode;
3672 
3673 		if (chan->num_conf_rsp == 1)
3674 			return -ECONNREFUSED;
3675 
3676 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3677 				   (unsigned long) &rfc, endptr - ptr);
3678 	}
3679 
3680 	if (result == L2CAP_CONF_SUCCESS) {
3681 		/* Configure output options and let the other side know
3682 		 * which ones we don't like. */
3683 
3684 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3685 			result = L2CAP_CONF_UNACCEPT;
3686 		else {
3687 			chan->omtu = mtu;
3688 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3689 		}
3690 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3691 
3692 		if (remote_efs) {
3693 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3694 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3695 			    efs.stype != chan->local_stype) {
3696 
3697 				result = L2CAP_CONF_UNACCEPT;
3698 
3699 				if (chan->num_conf_req >= 1)
3700 					return -ECONNREFUSED;
3701 
3702 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3703 						   sizeof(efs),
3704 						   (unsigned long) &efs, endptr - ptr);
3705 			} else {
3706 				/* Send PENDING Conf Rsp */
3707 				result = L2CAP_CONF_PENDING;
3708 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3709 			}
3710 		}
3711 
3712 		switch (rfc.mode) {
3713 		case L2CAP_MODE_BASIC:
3714 			chan->fcs = L2CAP_FCS_NONE;
3715 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3716 			break;
3717 
3718 		case L2CAP_MODE_ERTM:
3719 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3720 				chan->remote_tx_win = rfc.txwin_size;
3721 			else
3722 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3723 
3724 			chan->remote_max_tx = rfc.max_transmit;
3725 
3726 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3727 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3728 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3729 			rfc.max_pdu_size = cpu_to_le16(size);
3730 			chan->remote_mps = size;
3731 
3732 			__l2cap_set_ertm_timeouts(chan, &rfc);
3733 
3734 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3735 
3736 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3737 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3738 
3739 			if (remote_efs &&
3740 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 				chan->remote_id = efs.id;
3742 				chan->remote_stype = efs.stype;
3743 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 				chan->remote_flush_to =
3745 					le32_to_cpu(efs.flush_to);
3746 				chan->remote_acc_lat =
3747 					le32_to_cpu(efs.acc_lat);
3748 				chan->remote_sdu_itime =
3749 					le32_to_cpu(efs.sdu_itime);
3750 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3751 						   sizeof(efs),
3752 						   (unsigned long) &efs, endptr - ptr);
3753 			}
3754 			break;
3755 
3756 		case L2CAP_MODE_STREAMING:
3757 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 			rfc.max_pdu_size = cpu_to_le16(size);
3761 			chan->remote_mps = size;
3762 
3763 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3764 
3765 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 					   (unsigned long) &rfc, endptr - ptr);
3767 
3768 			break;
3769 
3770 		default:
3771 			result = L2CAP_CONF_UNACCEPT;
3772 
3773 			memset(&rfc, 0, sizeof(rfc));
3774 			rfc.mode = chan->mode;
3775 		}
3776 
3777 		if (result == L2CAP_CONF_SUCCESS)
3778 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3779 	}
3780 	rsp->scid   = cpu_to_le16(chan->dcid);
3781 	rsp->result = cpu_to_le16(result);
3782 	rsp->flags  = cpu_to_le16(0);
3783 
3784 	return ptr - data;
3785 }
3786 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 				void *data, size_t size, u16 *result)
3789 {
3790 	struct l2cap_conf_req *req = data;
3791 	void *ptr = req->data;
3792 	void *endptr = data + size;
3793 	int type, olen;
3794 	unsigned long val;
3795 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 	struct l2cap_conf_efs efs;
3797 
3798 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3799 
3800 	while (len >= L2CAP_CONF_OPT_SIZE) {
3801 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3802 		if (len < 0)
3803 			break;
3804 
3805 		switch (type) {
3806 		case L2CAP_CONF_MTU:
3807 			if (olen != 2)
3808 				break;
3809 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 				*result = L2CAP_CONF_UNACCEPT;
3811 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3812 			} else
3813 				chan->imtu = val;
3814 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3815 					   endptr - ptr);
3816 			break;
3817 
3818 		case L2CAP_CONF_FLUSH_TO:
3819 			if (olen != 2)
3820 				break;
3821 			chan->flush_to = val;
3822 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 					   chan->flush_to, endptr - ptr);
3824 			break;
3825 
3826 		case L2CAP_CONF_RFC:
3827 			if (olen != sizeof(rfc))
3828 				break;
3829 			memcpy(&rfc, (void *)val, olen);
3830 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 			    rfc.mode != chan->mode)
3832 				return -ECONNREFUSED;
3833 			chan->fcs = 0;
3834 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 					   (unsigned long) &rfc, endptr - ptr);
3836 			break;
3837 
3838 		case L2CAP_CONF_EWS:
3839 			if (olen != 2)
3840 				break;
3841 			chan->ack_win = min_t(u16, val, chan->ack_win);
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 					   chan->tx_win, endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_EFS:
3847 			if (olen != sizeof(efs))
3848 				break;
3849 			memcpy(&efs, (void *)val, olen);
3850 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 			    efs.stype != chan->local_stype)
3853 				return -ECONNREFUSED;
3854 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 					   (unsigned long) &efs, endptr - ptr);
3856 			break;
3857 
3858 		case L2CAP_CONF_FCS:
3859 			if (olen != 1)
3860 				break;
3861 			if (*result == L2CAP_CONF_PENDING)
3862 				if (val == L2CAP_FCS_NONE)
3863 					set_bit(CONF_RECV_NO_FCS,
3864 						&chan->conf_state);
3865 			break;
3866 		}
3867 	}
3868 
3869 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 		return -ECONNREFUSED;
3871 
3872 	chan->mode = rfc.mode;
3873 
3874 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3875 		switch (rfc.mode) {
3876 		case L2CAP_MODE_ERTM:
3877 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 				chan->ack_win = min_t(u16, chan->ack_win,
3882 						      rfc.txwin_size);
3883 
3884 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 				chan->local_msdu = le16_to_cpu(efs.msdu);
3886 				chan->local_sdu_itime =
3887 					le32_to_cpu(efs.sdu_itime);
3888 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 				chan->local_flush_to =
3890 					le32_to_cpu(efs.flush_to);
3891 			}
3892 			break;
3893 
3894 		case L2CAP_MODE_STREAMING:
3895 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3896 		}
3897 	}
3898 
3899 	req->dcid   = cpu_to_le16(chan->dcid);
3900 	req->flags  = cpu_to_le16(0);
3901 
3902 	return ptr - data;
3903 }
3904 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 				u16 result, u16 flags)
3907 {
3908 	struct l2cap_conf_rsp *rsp = data;
3909 	void *ptr = rsp->data;
3910 
3911 	BT_DBG("chan %p", chan);
3912 
3913 	rsp->scid   = cpu_to_le16(chan->dcid);
3914 	rsp->result = cpu_to_le16(result);
3915 	rsp->flags  = cpu_to_le16(flags);
3916 
3917 	return ptr - data;
3918 }
3919 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3921 {
3922 	struct l2cap_le_conn_rsp rsp;
3923 	struct l2cap_conn *conn = chan->conn;
3924 
3925 	BT_DBG("chan %p", chan);
3926 
3927 	rsp.dcid    = cpu_to_le16(chan->scid);
3928 	rsp.mtu     = cpu_to_le16(chan->imtu);
3929 	rsp.mps     = cpu_to_le16(chan->mps);
3930 	rsp.credits = cpu_to_le16(chan->rx_credits);
3931 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3932 
3933 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3934 		       &rsp);
3935 }
3936 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3937 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3938 {
3939 	int *result = data;
3940 
3941 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3942 		return;
3943 
3944 	switch (chan->state) {
3945 	case BT_CONNECT2:
3946 		/* If channel still pending accept add to result */
3947 		(*result)++;
3948 		return;
3949 	case BT_CONNECTED:
3950 		return;
3951 	default:
3952 		/* If not connected or pending accept it has been refused */
3953 		*result = -ECONNREFUSED;
3954 		return;
3955 	}
3956 }
3957 
3958 struct l2cap_ecred_rsp_data {
3959 	struct {
3960 		struct l2cap_ecred_conn_rsp rsp;
3961 		__le16 scid[L2CAP_ECRED_MAX_CID];
3962 	} __packed pdu;
3963 	int count;
3964 };
3965 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3966 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3967 {
3968 	struct l2cap_ecred_rsp_data *rsp = data;
3969 
3970 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3971 		return;
3972 
3973 	/* Reset ident so only one response is sent */
3974 	chan->ident = 0;
3975 
3976 	/* Include all channels pending with the same ident */
3977 	if (!rsp->pdu.rsp.result)
3978 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3979 	else
3980 		l2cap_chan_del(chan, ECONNRESET);
3981 }
3982 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3983 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3984 {
3985 	struct l2cap_conn *conn = chan->conn;
3986 	struct l2cap_ecred_rsp_data data;
3987 	u16 id = chan->ident;
3988 	int result = 0;
3989 
3990 	if (!id)
3991 		return;
3992 
3993 	BT_DBG("chan %p id %d", chan, id);
3994 
3995 	memset(&data, 0, sizeof(data));
3996 
3997 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3998 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3999 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4000 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4001 
4002 	/* Verify that all channels are ready */
4003 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4004 
4005 	if (result > 0)
4006 		return;
4007 
4008 	if (result < 0)
4009 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4010 
4011 	/* Build response */
4012 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4013 
4014 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4015 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4016 		       &data.pdu);
4017 }
4018 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4020 {
4021 	struct l2cap_conn_rsp rsp;
4022 	struct l2cap_conn *conn = chan->conn;
4023 	u8 buf[128];
4024 	u8 rsp_code;
4025 
4026 	rsp.scid   = cpu_to_le16(chan->dcid);
4027 	rsp.dcid   = cpu_to_le16(chan->scid);
4028 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4030 
4031 	if (chan->hs_hcon)
4032 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4033 	else
4034 		rsp_code = L2CAP_CONN_RSP;
4035 
4036 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4037 
4038 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4039 
4040 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4041 		return;
4042 
4043 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 	chan->num_conf_req++;
4046 }
4047 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4049 {
4050 	int type, olen;
4051 	unsigned long val;
4052 	/* Use sane default values in case a misbehaving remote device
4053 	 * did not send an RFC or extended window size option.
4054 	 */
4055 	u16 txwin_ext = chan->ack_win;
4056 	struct l2cap_conf_rfc rfc = {
4057 		.mode = chan->mode,
4058 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 		.max_pdu_size = cpu_to_le16(chan->imtu),
4061 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4062 	};
4063 
4064 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4065 
4066 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4067 		return;
4068 
4069 	while (len >= L2CAP_CONF_OPT_SIZE) {
4070 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4071 		if (len < 0)
4072 			break;
4073 
4074 		switch (type) {
4075 		case L2CAP_CONF_RFC:
4076 			if (olen != sizeof(rfc))
4077 				break;
4078 			memcpy(&rfc, (void *)val, olen);
4079 			break;
4080 		case L2CAP_CONF_EWS:
4081 			if (olen != 2)
4082 				break;
4083 			txwin_ext = val;
4084 			break;
4085 		}
4086 	}
4087 
4088 	switch (rfc.mode) {
4089 	case L2CAP_MODE_ERTM:
4090 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4095 		else
4096 			chan->ack_win = min_t(u16, chan->ack_win,
4097 					      rfc.txwin_size);
4098 		break;
4099 	case L2CAP_MODE_STREAMING:
4100 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4101 	}
4102 }
4103 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 				    u8 *data)
4107 {
4108 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4109 
4110 	if (cmd_len < sizeof(*rej))
4111 		return -EPROTO;
4112 
4113 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4114 		return 0;
4115 
4116 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 	    cmd->ident == conn->info_ident) {
4118 		cancel_delayed_work(&conn->info_timer);
4119 
4120 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 		conn->info_ident = 0;
4122 
4123 		l2cap_conn_start(conn);
4124 	}
4125 
4126 	return 0;
4127 }
4128 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 					struct l2cap_cmd_hdr *cmd,
4131 					u8 *data, u8 rsp_code, u8 amp_id)
4132 {
4133 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 	struct l2cap_conn_rsp rsp;
4135 	struct l2cap_chan *chan = NULL, *pchan;
4136 	int result, status = L2CAP_CS_NO_INFO;
4137 
4138 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 	__le16 psm = req->psm;
4140 
4141 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4142 
4143 	/* Check if we have socket listening on psm */
4144 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 					 &conn->hcon->dst, ACL_LINK);
4146 	if (!pchan) {
4147 		result = L2CAP_CR_BAD_PSM;
4148 		goto sendresp;
4149 	}
4150 
4151 	mutex_lock(&conn->chan_lock);
4152 	l2cap_chan_lock(pchan);
4153 
4154 	/* Check if the ACL is secure enough (if not SDP) */
4155 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 	    !hci_conn_check_link_mode(conn->hcon)) {
4157 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 		result = L2CAP_CR_SEC_BLOCK;
4159 		goto response;
4160 	}
4161 
4162 	result = L2CAP_CR_NO_MEM;
4163 
4164 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4165 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 		result = L2CAP_CR_INVALID_SCID;
4167 		goto response;
4168 	}
4169 
4170 	/* Check if we already have channel with that dcid */
4171 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 		result = L2CAP_CR_SCID_IN_USE;
4173 		goto response;
4174 	}
4175 
4176 	chan = pchan->ops->new_connection(pchan);
4177 	if (!chan)
4178 		goto response;
4179 
4180 	/* For certain devices (ex: HID mouse), support for authentication,
4181 	 * pairing and bonding is optional. For such devices, inorder to avoid
4182 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4184 	 */
4185 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4186 
4187 	bacpy(&chan->src, &conn->hcon->src);
4188 	bacpy(&chan->dst, &conn->hcon->dst);
4189 	chan->src_type = bdaddr_src_type(conn->hcon);
4190 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4191 	chan->psm  = psm;
4192 	chan->dcid = scid;
4193 	chan->local_amp_id = amp_id;
4194 
4195 	__l2cap_chan_add(conn, chan);
4196 
4197 	dcid = chan->scid;
4198 
4199 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4200 
4201 	chan->ident = cmd->ident;
4202 
4203 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 		if (l2cap_chan_check_security(chan, false)) {
4205 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 				l2cap_state_change(chan, BT_CONNECT2);
4207 				result = L2CAP_CR_PEND;
4208 				status = L2CAP_CS_AUTHOR_PEND;
4209 				chan->ops->defer(chan);
4210 			} else {
4211 				/* Force pending result for AMP controllers.
4212 				 * The connection will succeed after the
4213 				 * physical link is up.
4214 				 */
4215 				if (amp_id == AMP_ID_BREDR) {
4216 					l2cap_state_change(chan, BT_CONFIG);
4217 					result = L2CAP_CR_SUCCESS;
4218 				} else {
4219 					l2cap_state_change(chan, BT_CONNECT2);
4220 					result = L2CAP_CR_PEND;
4221 				}
4222 				status = L2CAP_CS_NO_INFO;
4223 			}
4224 		} else {
4225 			l2cap_state_change(chan, BT_CONNECT2);
4226 			result = L2CAP_CR_PEND;
4227 			status = L2CAP_CS_AUTHEN_PEND;
4228 		}
4229 	} else {
4230 		l2cap_state_change(chan, BT_CONNECT2);
4231 		result = L2CAP_CR_PEND;
4232 		status = L2CAP_CS_NO_INFO;
4233 	}
4234 
4235 response:
4236 	l2cap_chan_unlock(pchan);
4237 	mutex_unlock(&conn->chan_lock);
4238 	l2cap_chan_put(pchan);
4239 
4240 sendresp:
4241 	rsp.scid   = cpu_to_le16(scid);
4242 	rsp.dcid   = cpu_to_le16(dcid);
4243 	rsp.result = cpu_to_le16(result);
4244 	rsp.status = cpu_to_le16(status);
4245 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4246 
4247 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 		struct l2cap_info_req info;
4249 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4250 
4251 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 		conn->info_ident = l2cap_get_ident(conn);
4253 
4254 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4255 
4256 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 			       sizeof(info), &info);
4258 	}
4259 
4260 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 	    result == L2CAP_CR_SUCCESS) {
4262 		u8 buf[128];
4263 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 		chan->num_conf_req++;
4267 	}
4268 
4269 	return chan;
4270 }
4271 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4274 {
4275 	struct hci_dev *hdev = conn->hcon->hdev;
4276 	struct hci_conn *hcon = conn->hcon;
4277 
4278 	if (cmd_len < sizeof(struct l2cap_conn_req))
4279 		return -EPROTO;
4280 
4281 	hci_dev_lock(hdev);
4282 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4283 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4284 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4285 	hci_dev_unlock(hdev);
4286 
4287 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4288 	return 0;
4289 }
4290 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4291 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4292 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4293 				    u8 *data)
4294 {
4295 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4296 	u16 scid, dcid, result, status;
4297 	struct l2cap_chan *chan;
4298 	u8 req[128];
4299 	int err;
4300 
4301 	if (cmd_len < sizeof(*rsp))
4302 		return -EPROTO;
4303 
4304 	scid   = __le16_to_cpu(rsp->scid);
4305 	dcid   = __le16_to_cpu(rsp->dcid);
4306 	result = __le16_to_cpu(rsp->result);
4307 	status = __le16_to_cpu(rsp->status);
4308 
4309 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4310 					   dcid > L2CAP_CID_DYN_END))
4311 		return -EPROTO;
4312 
4313 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4314 	       dcid, scid, result, status);
4315 
4316 	mutex_lock(&conn->chan_lock);
4317 
4318 	if (scid) {
4319 		chan = __l2cap_get_chan_by_scid(conn, scid);
4320 		if (!chan) {
4321 			err = -EBADSLT;
4322 			goto unlock;
4323 		}
4324 	} else {
4325 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4326 		if (!chan) {
4327 			err = -EBADSLT;
4328 			goto unlock;
4329 		}
4330 	}
4331 
4332 	chan = l2cap_chan_hold_unless_zero(chan);
4333 	if (!chan) {
4334 		err = -EBADSLT;
4335 		goto unlock;
4336 	}
4337 
4338 	err = 0;
4339 
4340 	l2cap_chan_lock(chan);
4341 
4342 	switch (result) {
4343 	case L2CAP_CR_SUCCESS:
4344 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4345 			err = -EBADSLT;
4346 			break;
4347 		}
4348 
4349 		l2cap_state_change(chan, BT_CONFIG);
4350 		chan->ident = 0;
4351 		chan->dcid = dcid;
4352 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4353 
4354 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4355 			break;
4356 
4357 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4358 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4359 		chan->num_conf_req++;
4360 		break;
4361 
4362 	case L2CAP_CR_PEND:
4363 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4364 		break;
4365 
4366 	default:
4367 		l2cap_chan_del(chan, ECONNREFUSED);
4368 		break;
4369 	}
4370 
4371 	l2cap_chan_unlock(chan);
4372 	l2cap_chan_put(chan);
4373 
4374 unlock:
4375 	mutex_unlock(&conn->chan_lock);
4376 
4377 	return err;
4378 }
4379 
set_default_fcs(struct l2cap_chan * chan)4380 static inline void set_default_fcs(struct l2cap_chan *chan)
4381 {
4382 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4383 	 * sides request it.
4384 	 */
4385 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4386 		chan->fcs = L2CAP_FCS_NONE;
4387 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4388 		chan->fcs = L2CAP_FCS_CRC16;
4389 }
4390 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4391 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4392 				    u8 ident, u16 flags)
4393 {
4394 	struct l2cap_conn *conn = chan->conn;
4395 
4396 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4397 	       flags);
4398 
4399 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4400 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4401 
4402 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4403 		       l2cap_build_conf_rsp(chan, data,
4404 					    L2CAP_CONF_SUCCESS, flags), data);
4405 }
4406 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4407 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4408 				   u16 scid, u16 dcid)
4409 {
4410 	struct l2cap_cmd_rej_cid rej;
4411 
4412 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4413 	rej.scid = __cpu_to_le16(scid);
4414 	rej.dcid = __cpu_to_le16(dcid);
4415 
4416 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4417 }
4418 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4419 static inline int l2cap_config_req(struct l2cap_conn *conn,
4420 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4421 				   u8 *data)
4422 {
4423 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4424 	u16 dcid, flags;
4425 	u8 rsp[64];
4426 	struct l2cap_chan *chan;
4427 	int len, err = 0;
4428 
4429 	if (cmd_len < sizeof(*req))
4430 		return -EPROTO;
4431 
4432 	dcid  = __le16_to_cpu(req->dcid);
4433 	flags = __le16_to_cpu(req->flags);
4434 
4435 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4436 
4437 	chan = l2cap_get_chan_by_scid(conn, dcid);
4438 	if (!chan) {
4439 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4440 		return 0;
4441 	}
4442 
4443 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4444 	    chan->state != BT_CONNECTED) {
4445 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4446 				       chan->dcid);
4447 		goto unlock;
4448 	}
4449 
4450 	/* Reject if config buffer is too small. */
4451 	len = cmd_len - sizeof(*req);
4452 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4453 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4454 			       l2cap_build_conf_rsp(chan, rsp,
4455 			       L2CAP_CONF_REJECT, flags), rsp);
4456 		goto unlock;
4457 	}
4458 
4459 	/* Store config. */
4460 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4461 	chan->conf_len += len;
4462 
4463 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4464 		/* Incomplete config. Send empty response. */
4465 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4466 			       l2cap_build_conf_rsp(chan, rsp,
4467 			       L2CAP_CONF_SUCCESS, flags), rsp);
4468 		goto unlock;
4469 	}
4470 
4471 	/* Complete config. */
4472 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4473 	if (len < 0) {
4474 		l2cap_send_disconn_req(chan, ECONNRESET);
4475 		goto unlock;
4476 	}
4477 
4478 	chan->ident = cmd->ident;
4479 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4480 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4481 		chan->num_conf_rsp++;
4482 
4483 	/* Reset config buffer. */
4484 	chan->conf_len = 0;
4485 
4486 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4487 		goto unlock;
4488 
4489 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4490 		set_default_fcs(chan);
4491 
4492 		if (chan->mode == L2CAP_MODE_ERTM ||
4493 		    chan->mode == L2CAP_MODE_STREAMING)
4494 			err = l2cap_ertm_init(chan);
4495 
4496 		if (err < 0)
4497 			l2cap_send_disconn_req(chan, -err);
4498 		else
4499 			l2cap_chan_ready(chan);
4500 
4501 		goto unlock;
4502 	}
4503 
4504 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4505 		u8 buf[64];
4506 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4507 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4508 		chan->num_conf_req++;
4509 	}
4510 
4511 	/* Got Conf Rsp PENDING from remote side and assume we sent
4512 	   Conf Rsp PENDING in the code above */
4513 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4514 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4515 
4516 		/* check compatibility */
4517 
4518 		/* Send rsp for BR/EDR channel */
4519 		if (!chan->hs_hcon)
4520 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4521 		else
4522 			chan->ident = cmd->ident;
4523 	}
4524 
4525 unlock:
4526 	l2cap_chan_unlock(chan);
4527 	l2cap_chan_put(chan);
4528 	return err;
4529 }
4530 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4531 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4532 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4533 				   u8 *data)
4534 {
4535 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4536 	u16 scid, flags, result;
4537 	struct l2cap_chan *chan;
4538 	int len = cmd_len - sizeof(*rsp);
4539 	int err = 0;
4540 
4541 	if (cmd_len < sizeof(*rsp))
4542 		return -EPROTO;
4543 
4544 	scid   = __le16_to_cpu(rsp->scid);
4545 	flags  = __le16_to_cpu(rsp->flags);
4546 	result = __le16_to_cpu(rsp->result);
4547 
4548 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4549 	       result, len);
4550 
4551 	chan = l2cap_get_chan_by_scid(conn, scid);
4552 	if (!chan)
4553 		return 0;
4554 
4555 	switch (result) {
4556 	case L2CAP_CONF_SUCCESS:
4557 		l2cap_conf_rfc_get(chan, rsp->data, len);
4558 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4559 		break;
4560 
4561 	case L2CAP_CONF_PENDING:
4562 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4563 
4564 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4565 			char buf[64];
4566 
4567 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4568 						   buf, sizeof(buf), &result);
4569 			if (len < 0) {
4570 				l2cap_send_disconn_req(chan, ECONNRESET);
4571 				goto done;
4572 			}
4573 
4574 			if (!chan->hs_hcon) {
4575 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4576 							0);
4577 			} else {
4578 				if (l2cap_check_efs(chan)) {
4579 					amp_create_logical_link(chan);
4580 					chan->ident = cmd->ident;
4581 				}
4582 			}
4583 		}
4584 		goto done;
4585 
4586 	case L2CAP_CONF_UNACCEPT:
4587 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4588 			char req[64];
4589 
4590 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4591 				l2cap_send_disconn_req(chan, ECONNRESET);
4592 				goto done;
4593 			}
4594 
4595 			/* throw out any old stored conf requests */
4596 			result = L2CAP_CONF_SUCCESS;
4597 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4598 						   req, sizeof(req), &result);
4599 			if (len < 0) {
4600 				l2cap_send_disconn_req(chan, ECONNRESET);
4601 				goto done;
4602 			}
4603 
4604 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4605 				       L2CAP_CONF_REQ, len, req);
4606 			chan->num_conf_req++;
4607 			if (result != L2CAP_CONF_SUCCESS)
4608 				goto done;
4609 			break;
4610 		}
4611 		fallthrough;
4612 
4613 	default:
4614 		l2cap_chan_set_err(chan, ECONNRESET);
4615 
4616 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4617 		l2cap_send_disconn_req(chan, ECONNRESET);
4618 		goto done;
4619 	}
4620 
4621 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4622 		goto done;
4623 
4624 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4625 
4626 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4627 		set_default_fcs(chan);
4628 
4629 		if (chan->mode == L2CAP_MODE_ERTM ||
4630 		    chan->mode == L2CAP_MODE_STREAMING)
4631 			err = l2cap_ertm_init(chan);
4632 
4633 		if (err < 0)
4634 			l2cap_send_disconn_req(chan, -err);
4635 		else
4636 			l2cap_chan_ready(chan);
4637 	}
4638 
4639 done:
4640 	l2cap_chan_unlock(chan);
4641 	l2cap_chan_put(chan);
4642 	return err;
4643 }
4644 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4645 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4646 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4647 				       u8 *data)
4648 {
4649 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4650 	struct l2cap_disconn_rsp rsp;
4651 	u16 dcid, scid;
4652 	struct l2cap_chan *chan;
4653 
4654 	if (cmd_len != sizeof(*req))
4655 		return -EPROTO;
4656 
4657 	scid = __le16_to_cpu(req->scid);
4658 	dcid = __le16_to_cpu(req->dcid);
4659 
4660 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4661 
4662 	chan = l2cap_get_chan_by_scid(conn, dcid);
4663 	if (!chan) {
4664 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4665 		return 0;
4666 	}
4667 
4668 	rsp.dcid = cpu_to_le16(chan->scid);
4669 	rsp.scid = cpu_to_le16(chan->dcid);
4670 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4671 
4672 	chan->ops->set_shutdown(chan);
4673 
4674 	l2cap_chan_unlock(chan);
4675 	mutex_lock(&conn->chan_lock);
4676 	l2cap_chan_lock(chan);
4677 	l2cap_chan_del(chan, ECONNRESET);
4678 	mutex_unlock(&conn->chan_lock);
4679 
4680 	chan->ops->close(chan);
4681 
4682 	l2cap_chan_unlock(chan);
4683 	l2cap_chan_put(chan);
4684 
4685 	return 0;
4686 }
4687 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4688 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4689 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4690 				       u8 *data)
4691 {
4692 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4693 	u16 dcid, scid;
4694 	struct l2cap_chan *chan;
4695 
4696 	if (cmd_len != sizeof(*rsp))
4697 		return -EPROTO;
4698 
4699 	scid = __le16_to_cpu(rsp->scid);
4700 	dcid = __le16_to_cpu(rsp->dcid);
4701 
4702 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4703 
4704 	chan = l2cap_get_chan_by_scid(conn, scid);
4705 	if (!chan) {
4706 		return 0;
4707 	}
4708 
4709 	if (chan->state != BT_DISCONN) {
4710 		l2cap_chan_unlock(chan);
4711 		l2cap_chan_put(chan);
4712 		return 0;
4713 	}
4714 
4715 	l2cap_chan_unlock(chan);
4716 	mutex_lock(&conn->chan_lock);
4717 	l2cap_chan_lock(chan);
4718 	l2cap_chan_del(chan, 0);
4719 	mutex_unlock(&conn->chan_lock);
4720 
4721 	chan->ops->close(chan);
4722 
4723 	l2cap_chan_unlock(chan);
4724 	l2cap_chan_put(chan);
4725 
4726 	return 0;
4727 }
4728 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4729 static inline int l2cap_information_req(struct l2cap_conn *conn,
4730 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4731 					u8 *data)
4732 {
4733 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4734 	u16 type;
4735 
4736 	if (cmd_len != sizeof(*req))
4737 		return -EPROTO;
4738 
4739 	type = __le16_to_cpu(req->type);
4740 
4741 	BT_DBG("type 0x%4.4x", type);
4742 
4743 	if (type == L2CAP_IT_FEAT_MASK) {
4744 		u8 buf[8];
4745 		u32 feat_mask = l2cap_feat_mask;
4746 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4747 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4748 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4749 		if (!disable_ertm)
4750 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4751 				| L2CAP_FEAT_FCS;
4752 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4753 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4754 				| L2CAP_FEAT_EXT_WINDOW;
4755 
4756 		put_unaligned_le32(feat_mask, rsp->data);
4757 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4758 			       buf);
4759 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4760 		u8 buf[12];
4761 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4762 
4763 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4764 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4765 		rsp->data[0] = conn->local_fixed_chan;
4766 		memset(rsp->data + 1, 0, 7);
4767 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4768 			       buf);
4769 	} else {
4770 		struct l2cap_info_rsp rsp;
4771 		rsp.type   = cpu_to_le16(type);
4772 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4773 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4774 			       &rsp);
4775 	}
4776 
4777 	return 0;
4778 }
4779 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4780 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4781 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4782 					u8 *data)
4783 {
4784 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4785 	u16 type, result;
4786 
4787 	if (cmd_len < sizeof(*rsp))
4788 		return -EPROTO;
4789 
4790 	type   = __le16_to_cpu(rsp->type);
4791 	result = __le16_to_cpu(rsp->result);
4792 
4793 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4794 
4795 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4796 	if (cmd->ident != conn->info_ident ||
4797 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4798 		return 0;
4799 
4800 	cancel_delayed_work(&conn->info_timer);
4801 
4802 	if (result != L2CAP_IR_SUCCESS) {
4803 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4804 		conn->info_ident = 0;
4805 
4806 		l2cap_conn_start(conn);
4807 
4808 		return 0;
4809 	}
4810 
4811 	switch (type) {
4812 	case L2CAP_IT_FEAT_MASK:
4813 		conn->feat_mask = get_unaligned_le32(rsp->data);
4814 
4815 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4816 			struct l2cap_info_req req;
4817 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4818 
4819 			conn->info_ident = l2cap_get_ident(conn);
4820 
4821 			l2cap_send_cmd(conn, conn->info_ident,
4822 				       L2CAP_INFO_REQ, sizeof(req), &req);
4823 		} else {
4824 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4825 			conn->info_ident = 0;
4826 
4827 			l2cap_conn_start(conn);
4828 		}
4829 		break;
4830 
4831 	case L2CAP_IT_FIXED_CHAN:
4832 		conn->remote_fixed_chan = rsp->data[0];
4833 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4834 		conn->info_ident = 0;
4835 
4836 		l2cap_conn_start(conn);
4837 		break;
4838 	}
4839 
4840 	return 0;
4841 }
4842 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4843 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4844 				    struct l2cap_cmd_hdr *cmd,
4845 				    u16 cmd_len, void *data)
4846 {
4847 	struct l2cap_create_chan_req *req = data;
4848 	struct l2cap_create_chan_rsp rsp;
4849 	struct l2cap_chan *chan;
4850 	struct hci_dev *hdev;
4851 	u16 psm, scid;
4852 
4853 	if (cmd_len != sizeof(*req))
4854 		return -EPROTO;
4855 
4856 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4857 		return -EINVAL;
4858 
4859 	psm = le16_to_cpu(req->psm);
4860 	scid = le16_to_cpu(req->scid);
4861 
4862 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4863 
4864 	/* For controller id 0 make BR/EDR connection */
4865 	if (req->amp_id == AMP_ID_BREDR) {
4866 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4867 			      req->amp_id);
4868 		return 0;
4869 	}
4870 
4871 	/* Validate AMP controller id */
4872 	hdev = hci_dev_get(req->amp_id);
4873 	if (!hdev)
4874 		goto error;
4875 
4876 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4877 		hci_dev_put(hdev);
4878 		goto error;
4879 	}
4880 
4881 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4882 			     req->amp_id);
4883 	if (chan) {
4884 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4885 		struct hci_conn *hs_hcon;
4886 
4887 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4888 						  &conn->hcon->dst);
4889 		if (!hs_hcon) {
4890 			hci_dev_put(hdev);
4891 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4892 					       chan->dcid);
4893 			return 0;
4894 		}
4895 
4896 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4897 
4898 		mgr->bredr_chan = chan;
4899 		chan->hs_hcon = hs_hcon;
4900 		chan->fcs = L2CAP_FCS_NONE;
4901 		conn->mtu = hdev->block_mtu;
4902 	}
4903 
4904 	hci_dev_put(hdev);
4905 
4906 	return 0;
4907 
4908 error:
4909 	rsp.dcid = 0;
4910 	rsp.scid = cpu_to_le16(scid);
4911 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4912 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4913 
4914 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4915 		       sizeof(rsp), &rsp);
4916 
4917 	return 0;
4918 }
4919 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4920 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4921 {
4922 	struct l2cap_move_chan_req req;
4923 	u8 ident;
4924 
4925 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4926 
4927 	ident = l2cap_get_ident(chan->conn);
4928 	chan->ident = ident;
4929 
4930 	req.icid = cpu_to_le16(chan->scid);
4931 	req.dest_amp_id = dest_amp_id;
4932 
4933 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4934 		       &req);
4935 
4936 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4937 }
4938 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4939 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4940 {
4941 	struct l2cap_move_chan_rsp rsp;
4942 
4943 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4944 
4945 	rsp.icid = cpu_to_le16(chan->dcid);
4946 	rsp.result = cpu_to_le16(result);
4947 
4948 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4949 		       sizeof(rsp), &rsp);
4950 }
4951 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4952 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4953 {
4954 	struct l2cap_move_chan_cfm cfm;
4955 
4956 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4957 
4958 	chan->ident = l2cap_get_ident(chan->conn);
4959 
4960 	cfm.icid = cpu_to_le16(chan->scid);
4961 	cfm.result = cpu_to_le16(result);
4962 
4963 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4964 		       sizeof(cfm), &cfm);
4965 
4966 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4967 }
4968 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4969 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4970 {
4971 	struct l2cap_move_chan_cfm cfm;
4972 
4973 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4974 
4975 	cfm.icid = cpu_to_le16(icid);
4976 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4977 
4978 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4979 		       sizeof(cfm), &cfm);
4980 }
4981 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4982 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4983 					 u16 icid)
4984 {
4985 	struct l2cap_move_chan_cfm_rsp rsp;
4986 
4987 	BT_DBG("icid 0x%4.4x", icid);
4988 
4989 	rsp.icid = cpu_to_le16(icid);
4990 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4991 }
4992 
__release_logical_link(struct l2cap_chan * chan)4993 static void __release_logical_link(struct l2cap_chan *chan)
4994 {
4995 	chan->hs_hchan = NULL;
4996 	chan->hs_hcon = NULL;
4997 
4998 	/* Placeholder - release the logical link */
4999 }
5000 
l2cap_logical_fail(struct l2cap_chan * chan)5001 static void l2cap_logical_fail(struct l2cap_chan *chan)
5002 {
5003 	/* Logical link setup failed */
5004 	if (chan->state != BT_CONNECTED) {
5005 		/* Create channel failure, disconnect */
5006 		l2cap_send_disconn_req(chan, ECONNRESET);
5007 		return;
5008 	}
5009 
5010 	switch (chan->move_role) {
5011 	case L2CAP_MOVE_ROLE_RESPONDER:
5012 		l2cap_move_done(chan);
5013 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5014 		break;
5015 	case L2CAP_MOVE_ROLE_INITIATOR:
5016 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5017 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5018 			/* Remote has only sent pending or
5019 			 * success responses, clean up
5020 			 */
5021 			l2cap_move_done(chan);
5022 		}
5023 
5024 		/* Other amp move states imply that the move
5025 		 * has already aborted
5026 		 */
5027 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028 		break;
5029 	}
5030 }
5031 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5032 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5033 					struct hci_chan *hchan)
5034 {
5035 	struct l2cap_conf_rsp rsp;
5036 
5037 	chan->hs_hchan = hchan;
5038 	chan->hs_hcon->l2cap_data = chan->conn;
5039 
5040 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5041 
5042 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5043 		int err;
5044 
5045 		set_default_fcs(chan);
5046 
5047 		err = l2cap_ertm_init(chan);
5048 		if (err < 0)
5049 			l2cap_send_disconn_req(chan, -err);
5050 		else
5051 			l2cap_chan_ready(chan);
5052 	}
5053 }
5054 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5055 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5056 				      struct hci_chan *hchan)
5057 {
5058 	chan->hs_hcon = hchan->conn;
5059 	chan->hs_hcon->l2cap_data = chan->conn;
5060 
5061 	BT_DBG("move_state %d", chan->move_state);
5062 
5063 	switch (chan->move_state) {
5064 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5065 		/* Move confirm will be sent after a success
5066 		 * response is received
5067 		 */
5068 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5069 		break;
5070 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5071 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5072 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5073 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5074 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5075 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5076 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5077 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5078 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5079 		}
5080 		break;
5081 	default:
5082 		/* Move was not in expected state, free the channel */
5083 		__release_logical_link(chan);
5084 
5085 		chan->move_state = L2CAP_MOVE_STABLE;
5086 	}
5087 }
5088 
5089 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5090 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5091 		       u8 status)
5092 {
5093 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5094 
5095 	if (status) {
5096 		l2cap_logical_fail(chan);
5097 		__release_logical_link(chan);
5098 		return;
5099 	}
5100 
5101 	if (chan->state != BT_CONNECTED) {
5102 		/* Ignore logical link if channel is on BR/EDR */
5103 		if (chan->local_amp_id != AMP_ID_BREDR)
5104 			l2cap_logical_finish_create(chan, hchan);
5105 	} else {
5106 		l2cap_logical_finish_move(chan, hchan);
5107 	}
5108 }
5109 
l2cap_move_start(struct l2cap_chan * chan)5110 void l2cap_move_start(struct l2cap_chan *chan)
5111 {
5112 	BT_DBG("chan %p", chan);
5113 
5114 	if (chan->local_amp_id == AMP_ID_BREDR) {
5115 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5116 			return;
5117 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5118 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5119 		/* Placeholder - start physical link setup */
5120 	} else {
5121 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5122 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5123 		chan->move_id = 0;
5124 		l2cap_move_setup(chan);
5125 		l2cap_send_move_chan_req(chan, 0);
5126 	}
5127 }
5128 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5129 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5130 			    u8 local_amp_id, u8 remote_amp_id)
5131 {
5132 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5133 	       local_amp_id, remote_amp_id);
5134 
5135 	chan->fcs = L2CAP_FCS_NONE;
5136 
5137 	/* Outgoing channel on AMP */
5138 	if (chan->state == BT_CONNECT) {
5139 		if (result == L2CAP_CR_SUCCESS) {
5140 			chan->local_amp_id = local_amp_id;
5141 			l2cap_send_create_chan_req(chan, remote_amp_id);
5142 		} else {
5143 			/* Revert to BR/EDR connect */
5144 			l2cap_send_conn_req(chan);
5145 		}
5146 
5147 		return;
5148 	}
5149 
5150 	/* Incoming channel on AMP */
5151 	if (__l2cap_no_conn_pending(chan)) {
5152 		struct l2cap_conn_rsp rsp;
5153 		char buf[128];
5154 		rsp.scid = cpu_to_le16(chan->dcid);
5155 		rsp.dcid = cpu_to_le16(chan->scid);
5156 
5157 		if (result == L2CAP_CR_SUCCESS) {
5158 			/* Send successful response */
5159 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5160 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5161 		} else {
5162 			/* Send negative response */
5163 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5164 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5165 		}
5166 
5167 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5168 			       sizeof(rsp), &rsp);
5169 
5170 		if (result == L2CAP_CR_SUCCESS) {
5171 			l2cap_state_change(chan, BT_CONFIG);
5172 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5173 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5174 				       L2CAP_CONF_REQ,
5175 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5176 			chan->num_conf_req++;
5177 		}
5178 	}
5179 }
5180 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5181 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5182 				   u8 remote_amp_id)
5183 {
5184 	l2cap_move_setup(chan);
5185 	chan->move_id = local_amp_id;
5186 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5187 
5188 	l2cap_send_move_chan_req(chan, remote_amp_id);
5189 }
5190 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5191 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5192 {
5193 	struct hci_chan *hchan = NULL;
5194 
5195 	/* Placeholder - get hci_chan for logical link */
5196 
5197 	if (hchan) {
5198 		if (hchan->state == BT_CONNECTED) {
5199 			/* Logical link is ready to go */
5200 			chan->hs_hcon = hchan->conn;
5201 			chan->hs_hcon->l2cap_data = chan->conn;
5202 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5203 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5204 
5205 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5206 		} else {
5207 			/* Wait for logical link to be ready */
5208 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5209 		}
5210 	} else {
5211 		/* Logical link not available */
5212 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5213 	}
5214 }
5215 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5216 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5217 {
5218 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5219 		u8 rsp_result;
5220 		if (result == -EINVAL)
5221 			rsp_result = L2CAP_MR_BAD_ID;
5222 		else
5223 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5224 
5225 		l2cap_send_move_chan_rsp(chan, rsp_result);
5226 	}
5227 
5228 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5229 	chan->move_state = L2CAP_MOVE_STABLE;
5230 
5231 	/* Restart data transmission */
5232 	l2cap_ertm_send(chan);
5233 }
5234 
5235 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5236 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5237 {
5238 	u8 local_amp_id = chan->local_amp_id;
5239 	u8 remote_amp_id = chan->remote_amp_id;
5240 
5241 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5242 	       chan, result, local_amp_id, remote_amp_id);
5243 
5244 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5245 		return;
5246 
5247 	if (chan->state != BT_CONNECTED) {
5248 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5249 	} else if (result != L2CAP_MR_SUCCESS) {
5250 		l2cap_do_move_cancel(chan, result);
5251 	} else {
5252 		switch (chan->move_role) {
5253 		case L2CAP_MOVE_ROLE_INITIATOR:
5254 			l2cap_do_move_initiate(chan, local_amp_id,
5255 					       remote_amp_id);
5256 			break;
5257 		case L2CAP_MOVE_ROLE_RESPONDER:
5258 			l2cap_do_move_respond(chan, result);
5259 			break;
5260 		default:
5261 			l2cap_do_move_cancel(chan, result);
5262 			break;
5263 		}
5264 	}
5265 }
5266 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5267 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5268 					 struct l2cap_cmd_hdr *cmd,
5269 					 u16 cmd_len, void *data)
5270 {
5271 	struct l2cap_move_chan_req *req = data;
5272 	struct l2cap_move_chan_rsp rsp;
5273 	struct l2cap_chan *chan;
5274 	u16 icid = 0;
5275 	u16 result = L2CAP_MR_NOT_ALLOWED;
5276 
5277 	if (cmd_len != sizeof(*req))
5278 		return -EPROTO;
5279 
5280 	icid = le16_to_cpu(req->icid);
5281 
5282 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5283 
5284 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5285 		return -EINVAL;
5286 
5287 	chan = l2cap_get_chan_by_dcid(conn, icid);
5288 	if (!chan) {
5289 		rsp.icid = cpu_to_le16(icid);
5290 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5291 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5292 			       sizeof(rsp), &rsp);
5293 		return 0;
5294 	}
5295 
5296 	chan->ident = cmd->ident;
5297 
5298 	if (chan->scid < L2CAP_CID_DYN_START ||
5299 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5300 	    (chan->mode != L2CAP_MODE_ERTM &&
5301 	     chan->mode != L2CAP_MODE_STREAMING)) {
5302 		result = L2CAP_MR_NOT_ALLOWED;
5303 		goto send_move_response;
5304 	}
5305 
5306 	if (chan->local_amp_id == req->dest_amp_id) {
5307 		result = L2CAP_MR_SAME_ID;
5308 		goto send_move_response;
5309 	}
5310 
5311 	if (req->dest_amp_id != AMP_ID_BREDR) {
5312 		struct hci_dev *hdev;
5313 		hdev = hci_dev_get(req->dest_amp_id);
5314 		if (!hdev || hdev->dev_type != HCI_AMP ||
5315 		    !test_bit(HCI_UP, &hdev->flags)) {
5316 			if (hdev)
5317 				hci_dev_put(hdev);
5318 
5319 			result = L2CAP_MR_BAD_ID;
5320 			goto send_move_response;
5321 		}
5322 		hci_dev_put(hdev);
5323 	}
5324 
5325 	/* Detect a move collision.  Only send a collision response
5326 	 * if this side has "lost", otherwise proceed with the move.
5327 	 * The winner has the larger bd_addr.
5328 	 */
5329 	if ((__chan_is_moving(chan) ||
5330 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5331 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5332 		result = L2CAP_MR_COLLISION;
5333 		goto send_move_response;
5334 	}
5335 
5336 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5337 	l2cap_move_setup(chan);
5338 	chan->move_id = req->dest_amp_id;
5339 
5340 	if (req->dest_amp_id == AMP_ID_BREDR) {
5341 		/* Moving to BR/EDR */
5342 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5343 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5344 			result = L2CAP_MR_PEND;
5345 		} else {
5346 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5347 			result = L2CAP_MR_SUCCESS;
5348 		}
5349 	} else {
5350 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5351 		/* Placeholder - uncomment when amp functions are available */
5352 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5353 		result = L2CAP_MR_PEND;
5354 	}
5355 
5356 send_move_response:
5357 	l2cap_send_move_chan_rsp(chan, result);
5358 
5359 	l2cap_chan_unlock(chan);
5360 	l2cap_chan_put(chan);
5361 
5362 	return 0;
5363 }
5364 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5365 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5366 {
5367 	struct l2cap_chan *chan;
5368 	struct hci_chan *hchan = NULL;
5369 
5370 	chan = l2cap_get_chan_by_scid(conn, icid);
5371 	if (!chan) {
5372 		l2cap_send_move_chan_cfm_icid(conn, icid);
5373 		return;
5374 	}
5375 
5376 	__clear_chan_timer(chan);
5377 	if (result == L2CAP_MR_PEND)
5378 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5379 
5380 	switch (chan->move_state) {
5381 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5382 		/* Move confirm will be sent when logical link
5383 		 * is complete.
5384 		 */
5385 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5386 		break;
5387 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5388 		if (result == L2CAP_MR_PEND) {
5389 			break;
5390 		} else if (test_bit(CONN_LOCAL_BUSY,
5391 				    &chan->conn_state)) {
5392 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5393 		} else {
5394 			/* Logical link is up or moving to BR/EDR,
5395 			 * proceed with move
5396 			 */
5397 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5398 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5399 		}
5400 		break;
5401 	case L2CAP_MOVE_WAIT_RSP:
5402 		/* Moving to AMP */
5403 		if (result == L2CAP_MR_SUCCESS) {
5404 			/* Remote is ready, send confirm immediately
5405 			 * after logical link is ready
5406 			 */
5407 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5408 		} else {
5409 			/* Both logical link and move success
5410 			 * are required to confirm
5411 			 */
5412 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5413 		}
5414 
5415 		/* Placeholder - get hci_chan for logical link */
5416 		if (!hchan) {
5417 			/* Logical link not available */
5418 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5419 			break;
5420 		}
5421 
5422 		/* If the logical link is not yet connected, do not
5423 		 * send confirmation.
5424 		 */
5425 		if (hchan->state != BT_CONNECTED)
5426 			break;
5427 
5428 		/* Logical link is already ready to go */
5429 
5430 		chan->hs_hcon = hchan->conn;
5431 		chan->hs_hcon->l2cap_data = chan->conn;
5432 
5433 		if (result == L2CAP_MR_SUCCESS) {
5434 			/* Can confirm now */
5435 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5436 		} else {
5437 			/* Now only need move success
5438 			 * to confirm
5439 			 */
5440 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5441 		}
5442 
5443 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5444 		break;
5445 	default:
5446 		/* Any other amp move state means the move failed. */
5447 		chan->move_id = chan->local_amp_id;
5448 		l2cap_move_done(chan);
5449 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5450 	}
5451 
5452 	l2cap_chan_unlock(chan);
5453 	l2cap_chan_put(chan);
5454 }
5455 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5456 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5457 			    u16 result)
5458 {
5459 	struct l2cap_chan *chan;
5460 
5461 	chan = l2cap_get_chan_by_ident(conn, ident);
5462 	if (!chan) {
5463 		/* Could not locate channel, icid is best guess */
5464 		l2cap_send_move_chan_cfm_icid(conn, icid);
5465 		return;
5466 	}
5467 
5468 	__clear_chan_timer(chan);
5469 
5470 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5471 		if (result == L2CAP_MR_COLLISION) {
5472 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5473 		} else {
5474 			/* Cleanup - cancel move */
5475 			chan->move_id = chan->local_amp_id;
5476 			l2cap_move_done(chan);
5477 		}
5478 	}
5479 
5480 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5481 
5482 	l2cap_chan_unlock(chan);
5483 	l2cap_chan_put(chan);
5484 }
5485 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5486 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5487 				  struct l2cap_cmd_hdr *cmd,
5488 				  u16 cmd_len, void *data)
5489 {
5490 	struct l2cap_move_chan_rsp *rsp = data;
5491 	u16 icid, result;
5492 
5493 	if (cmd_len != sizeof(*rsp))
5494 		return -EPROTO;
5495 
5496 	icid = le16_to_cpu(rsp->icid);
5497 	result = le16_to_cpu(rsp->result);
5498 
5499 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5500 
5501 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5502 		l2cap_move_continue(conn, icid, result);
5503 	else
5504 		l2cap_move_fail(conn, cmd->ident, icid, result);
5505 
5506 	return 0;
5507 }
5508 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5509 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5510 				      struct l2cap_cmd_hdr *cmd,
5511 				      u16 cmd_len, void *data)
5512 {
5513 	struct l2cap_move_chan_cfm *cfm = data;
5514 	struct l2cap_chan *chan;
5515 	u16 icid, result;
5516 
5517 	if (cmd_len != sizeof(*cfm))
5518 		return -EPROTO;
5519 
5520 	icid = le16_to_cpu(cfm->icid);
5521 	result = le16_to_cpu(cfm->result);
5522 
5523 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5524 
5525 	chan = l2cap_get_chan_by_dcid(conn, icid);
5526 	if (!chan) {
5527 		/* Spec requires a response even if the icid was not found */
5528 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5529 		return 0;
5530 	}
5531 
5532 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5533 		if (result == L2CAP_MC_CONFIRMED) {
5534 			chan->local_amp_id = chan->move_id;
5535 			if (chan->local_amp_id == AMP_ID_BREDR)
5536 				__release_logical_link(chan);
5537 		} else {
5538 			chan->move_id = chan->local_amp_id;
5539 		}
5540 
5541 		l2cap_move_done(chan);
5542 	}
5543 
5544 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5545 
5546 	l2cap_chan_unlock(chan);
5547 	l2cap_chan_put(chan);
5548 
5549 	return 0;
5550 }
5551 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5552 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5553 						 struct l2cap_cmd_hdr *cmd,
5554 						 u16 cmd_len, void *data)
5555 {
5556 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5557 	struct l2cap_chan *chan;
5558 	u16 icid;
5559 
5560 	if (cmd_len != sizeof(*rsp))
5561 		return -EPROTO;
5562 
5563 	icid = le16_to_cpu(rsp->icid);
5564 
5565 	BT_DBG("icid 0x%4.4x", icid);
5566 
5567 	chan = l2cap_get_chan_by_scid(conn, icid);
5568 	if (!chan)
5569 		return 0;
5570 
5571 	__clear_chan_timer(chan);
5572 
5573 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5574 		chan->local_amp_id = chan->move_id;
5575 
5576 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5577 			__release_logical_link(chan);
5578 
5579 		l2cap_move_done(chan);
5580 	}
5581 
5582 	l2cap_chan_unlock(chan);
5583 	l2cap_chan_put(chan);
5584 
5585 	return 0;
5586 }
5587 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5588 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5589 					      struct l2cap_cmd_hdr *cmd,
5590 					      u16 cmd_len, u8 *data)
5591 {
5592 	struct hci_conn *hcon = conn->hcon;
5593 	struct l2cap_conn_param_update_req *req;
5594 	struct l2cap_conn_param_update_rsp rsp;
5595 	u16 min, max, latency, to_multiplier;
5596 	int err;
5597 
5598 	if (hcon->role != HCI_ROLE_MASTER)
5599 		return -EINVAL;
5600 
5601 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5602 		return -EPROTO;
5603 
5604 	req = (struct l2cap_conn_param_update_req *) data;
5605 	min		= __le16_to_cpu(req->min);
5606 	max		= __le16_to_cpu(req->max);
5607 	latency		= __le16_to_cpu(req->latency);
5608 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5609 
5610 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5611 	       min, max, latency, to_multiplier);
5612 
5613 	memset(&rsp, 0, sizeof(rsp));
5614 
5615 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5616 	if (err)
5617 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5618 	else
5619 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5620 
5621 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5622 		       sizeof(rsp), &rsp);
5623 
5624 	if (!err) {
5625 		u8 store_hint;
5626 
5627 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5628 						to_multiplier);
5629 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5630 				    store_hint, min, max, latency,
5631 				    to_multiplier);
5632 
5633 	}
5634 
5635 	return 0;
5636 }
5637 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5638 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5639 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5640 				u8 *data)
5641 {
5642 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5643 	struct hci_conn *hcon = conn->hcon;
5644 	u16 dcid, mtu, mps, credits, result;
5645 	struct l2cap_chan *chan;
5646 	int err, sec_level;
5647 
5648 	if (cmd_len < sizeof(*rsp))
5649 		return -EPROTO;
5650 
5651 	dcid    = __le16_to_cpu(rsp->dcid);
5652 	mtu     = __le16_to_cpu(rsp->mtu);
5653 	mps     = __le16_to_cpu(rsp->mps);
5654 	credits = __le16_to_cpu(rsp->credits);
5655 	result  = __le16_to_cpu(rsp->result);
5656 
5657 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5658 					   dcid < L2CAP_CID_DYN_START ||
5659 					   dcid > L2CAP_CID_LE_DYN_END))
5660 		return -EPROTO;
5661 
5662 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5663 	       dcid, mtu, mps, credits, result);
5664 
5665 	mutex_lock(&conn->chan_lock);
5666 
5667 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5668 	if (!chan) {
5669 		err = -EBADSLT;
5670 		goto unlock;
5671 	}
5672 
5673 	err = 0;
5674 
5675 	l2cap_chan_lock(chan);
5676 
5677 	switch (result) {
5678 	case L2CAP_CR_LE_SUCCESS:
5679 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5680 			err = -EBADSLT;
5681 			break;
5682 		}
5683 
5684 		chan->ident = 0;
5685 		chan->dcid = dcid;
5686 		chan->omtu = mtu;
5687 		chan->remote_mps = mps;
5688 		chan->tx_credits = credits;
5689 		l2cap_chan_ready(chan);
5690 		break;
5691 
5692 	case L2CAP_CR_LE_AUTHENTICATION:
5693 	case L2CAP_CR_LE_ENCRYPTION:
5694 		/* If we already have MITM protection we can't do
5695 		 * anything.
5696 		 */
5697 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5698 			l2cap_chan_del(chan, ECONNREFUSED);
5699 			break;
5700 		}
5701 
5702 		sec_level = hcon->sec_level + 1;
5703 		if (chan->sec_level < sec_level)
5704 			chan->sec_level = sec_level;
5705 
5706 		/* We'll need to send a new Connect Request */
5707 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5708 
5709 		smp_conn_security(hcon, chan->sec_level);
5710 		break;
5711 
5712 	default:
5713 		l2cap_chan_del(chan, ECONNREFUSED);
5714 		break;
5715 	}
5716 
5717 	l2cap_chan_unlock(chan);
5718 
5719 unlock:
5720 	mutex_unlock(&conn->chan_lock);
5721 
5722 	return err;
5723 }
5724 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5725 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5726 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5727 				      u8 *data)
5728 {
5729 	int err = 0;
5730 
5731 	switch (cmd->code) {
5732 	case L2CAP_COMMAND_REJ:
5733 		l2cap_command_rej(conn, cmd, cmd_len, data);
5734 		break;
5735 
5736 	case L2CAP_CONN_REQ:
5737 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5738 		break;
5739 
5740 	case L2CAP_CONN_RSP:
5741 	case L2CAP_CREATE_CHAN_RSP:
5742 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5743 		break;
5744 
5745 	case L2CAP_CONF_REQ:
5746 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5747 		break;
5748 
5749 	case L2CAP_CONF_RSP:
5750 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5751 		break;
5752 
5753 	case L2CAP_DISCONN_REQ:
5754 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5755 		break;
5756 
5757 	case L2CAP_DISCONN_RSP:
5758 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5759 		break;
5760 
5761 	case L2CAP_ECHO_REQ:
5762 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5763 		break;
5764 
5765 	case L2CAP_ECHO_RSP:
5766 		break;
5767 
5768 	case L2CAP_INFO_REQ:
5769 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5770 		break;
5771 
5772 	case L2CAP_INFO_RSP:
5773 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5774 		break;
5775 
5776 	case L2CAP_CREATE_CHAN_REQ:
5777 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5778 		break;
5779 
5780 	case L2CAP_MOVE_CHAN_REQ:
5781 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5782 		break;
5783 
5784 	case L2CAP_MOVE_CHAN_RSP:
5785 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5786 		break;
5787 
5788 	case L2CAP_MOVE_CHAN_CFM:
5789 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5790 		break;
5791 
5792 	case L2CAP_MOVE_CHAN_CFM_RSP:
5793 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5794 		break;
5795 
5796 	default:
5797 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5798 		err = -EINVAL;
5799 		break;
5800 	}
5801 
5802 	return err;
5803 }
5804 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5805 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5806 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5807 				u8 *data)
5808 {
5809 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5810 	struct l2cap_le_conn_rsp rsp;
5811 	struct l2cap_chan *chan, *pchan;
5812 	u16 dcid, scid, credits, mtu, mps;
5813 	__le16 psm;
5814 	u8 result;
5815 
5816 	if (cmd_len != sizeof(*req))
5817 		return -EPROTO;
5818 
5819 	scid = __le16_to_cpu(req->scid);
5820 	mtu  = __le16_to_cpu(req->mtu);
5821 	mps  = __le16_to_cpu(req->mps);
5822 	psm  = req->psm;
5823 	dcid = 0;
5824 	credits = 0;
5825 
5826 	if (mtu < 23 || mps < 23)
5827 		return -EPROTO;
5828 
5829 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5830 	       scid, mtu, mps);
5831 
5832 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5833 	 * page 1059:
5834 	 *
5835 	 * Valid range: 0x0001-0x00ff
5836 	 *
5837 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5838 	 */
5839 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5840 		result = L2CAP_CR_LE_BAD_PSM;
5841 		chan = NULL;
5842 		goto response;
5843 	}
5844 
5845 	/* Check if we have socket listening on psm */
5846 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5847 					 &conn->hcon->dst, LE_LINK);
5848 	if (!pchan) {
5849 		result = L2CAP_CR_LE_BAD_PSM;
5850 		chan = NULL;
5851 		goto response;
5852 	}
5853 
5854 	mutex_lock(&conn->chan_lock);
5855 	l2cap_chan_lock(pchan);
5856 
5857 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5858 				     SMP_ALLOW_STK)) {
5859 		result = L2CAP_CR_LE_AUTHENTICATION;
5860 		chan = NULL;
5861 		goto response_unlock;
5862 	}
5863 
5864 	/* Check for valid dynamic CID range */
5865 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5866 		result = L2CAP_CR_LE_INVALID_SCID;
5867 		chan = NULL;
5868 		goto response_unlock;
5869 	}
5870 
5871 	/* Check if we already have channel with that dcid */
5872 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5873 		result = L2CAP_CR_LE_SCID_IN_USE;
5874 		chan = NULL;
5875 		goto response_unlock;
5876 	}
5877 
5878 	chan = pchan->ops->new_connection(pchan);
5879 	if (!chan) {
5880 		result = L2CAP_CR_LE_NO_MEM;
5881 		goto response_unlock;
5882 	}
5883 
5884 	bacpy(&chan->src, &conn->hcon->src);
5885 	bacpy(&chan->dst, &conn->hcon->dst);
5886 	chan->src_type = bdaddr_src_type(conn->hcon);
5887 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5888 	chan->psm  = psm;
5889 	chan->dcid = scid;
5890 	chan->omtu = mtu;
5891 	chan->remote_mps = mps;
5892 
5893 	__l2cap_chan_add(conn, chan);
5894 
5895 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5896 
5897 	dcid = chan->scid;
5898 	credits = chan->rx_credits;
5899 
5900 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5901 
5902 	chan->ident = cmd->ident;
5903 
5904 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5905 		l2cap_state_change(chan, BT_CONNECT2);
5906 		/* The following result value is actually not defined
5907 		 * for LE CoC but we use it to let the function know
5908 		 * that it should bail out after doing its cleanup
5909 		 * instead of sending a response.
5910 		 */
5911 		result = L2CAP_CR_PEND;
5912 		chan->ops->defer(chan);
5913 	} else {
5914 		l2cap_chan_ready(chan);
5915 		result = L2CAP_CR_LE_SUCCESS;
5916 	}
5917 
5918 response_unlock:
5919 	l2cap_chan_unlock(pchan);
5920 	mutex_unlock(&conn->chan_lock);
5921 	l2cap_chan_put(pchan);
5922 
5923 	if (result == L2CAP_CR_PEND)
5924 		return 0;
5925 
5926 response:
5927 	if (chan) {
5928 		rsp.mtu = cpu_to_le16(chan->imtu);
5929 		rsp.mps = cpu_to_le16(chan->mps);
5930 	} else {
5931 		rsp.mtu = 0;
5932 		rsp.mps = 0;
5933 	}
5934 
5935 	rsp.dcid    = cpu_to_le16(dcid);
5936 	rsp.credits = cpu_to_le16(credits);
5937 	rsp.result  = cpu_to_le16(result);
5938 
5939 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5940 
5941 	return 0;
5942 }
5943 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5944 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5945 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5946 				   u8 *data)
5947 {
5948 	struct l2cap_le_credits *pkt;
5949 	struct l2cap_chan *chan;
5950 	u16 cid, credits, max_credits;
5951 
5952 	if (cmd_len != sizeof(*pkt))
5953 		return -EPROTO;
5954 
5955 	pkt = (struct l2cap_le_credits *) data;
5956 	cid	= __le16_to_cpu(pkt->cid);
5957 	credits	= __le16_to_cpu(pkt->credits);
5958 
5959 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5960 
5961 	chan = l2cap_get_chan_by_dcid(conn, cid);
5962 	if (!chan)
5963 		return -EBADSLT;
5964 
5965 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5966 	if (credits > max_credits) {
5967 		BT_ERR("LE credits overflow");
5968 		l2cap_send_disconn_req(chan, ECONNRESET);
5969 
5970 		/* Return 0 so that we don't trigger an unnecessary
5971 		 * command reject packet.
5972 		 */
5973 		goto unlock;
5974 	}
5975 
5976 	chan->tx_credits += credits;
5977 
5978 	/* Resume sending */
5979 	l2cap_le_flowctl_send(chan);
5980 
5981 	if (chan->tx_credits)
5982 		chan->ops->resume(chan);
5983 
5984 unlock:
5985 	l2cap_chan_unlock(chan);
5986 	l2cap_chan_put(chan);
5987 
5988 	return 0;
5989 }
5990 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5991 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5992 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5993 				       u8 *data)
5994 {
5995 	struct l2cap_ecred_conn_req *req = (void *) data;
5996 	struct {
5997 		struct l2cap_ecred_conn_rsp rsp;
5998 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5999 	} __packed pdu;
6000 	struct l2cap_chan *chan, *pchan;
6001 	u16 mtu, mps;
6002 	__le16 psm;
6003 	u8 result, len = 0;
6004 	int i, num_scid;
6005 	bool defer = false;
6006 
6007 	if (!enable_ecred)
6008 		return -EINVAL;
6009 
6010 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6011 		result = L2CAP_CR_LE_INVALID_PARAMS;
6012 		goto response;
6013 	}
6014 
6015 	cmd_len -= sizeof(*req);
6016 	num_scid = cmd_len / sizeof(u16);
6017 
6018 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6019 		result = L2CAP_CR_LE_INVALID_PARAMS;
6020 		goto response;
6021 	}
6022 
6023 	mtu  = __le16_to_cpu(req->mtu);
6024 	mps  = __le16_to_cpu(req->mps);
6025 
6026 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6027 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6028 		goto response;
6029 	}
6030 
6031 	psm  = req->psm;
6032 
6033 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6034 	 * page 1059:
6035 	 *
6036 	 * Valid range: 0x0001-0x00ff
6037 	 *
6038 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6039 	 */
6040 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6041 		result = L2CAP_CR_LE_BAD_PSM;
6042 		goto response;
6043 	}
6044 
6045 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6046 
6047 	memset(&pdu, 0, sizeof(pdu));
6048 
6049 	/* Check if we have socket listening on psm */
6050 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6051 					 &conn->hcon->dst, LE_LINK);
6052 	if (!pchan) {
6053 		result = L2CAP_CR_LE_BAD_PSM;
6054 		goto response;
6055 	}
6056 
6057 	mutex_lock(&conn->chan_lock);
6058 	l2cap_chan_lock(pchan);
6059 
6060 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6061 				     SMP_ALLOW_STK)) {
6062 		result = L2CAP_CR_LE_AUTHENTICATION;
6063 		goto unlock;
6064 	}
6065 
6066 	result = L2CAP_CR_LE_SUCCESS;
6067 
6068 	for (i = 0; i < num_scid; i++) {
6069 		u16 scid = __le16_to_cpu(req->scid[i]);
6070 
6071 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6072 
6073 		pdu.dcid[i] = 0x0000;
6074 		len += sizeof(*pdu.dcid);
6075 
6076 		/* Check for valid dynamic CID range */
6077 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6078 			result = L2CAP_CR_LE_INVALID_SCID;
6079 			continue;
6080 		}
6081 
6082 		/* Check if we already have channel with that dcid */
6083 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6084 			result = L2CAP_CR_LE_SCID_IN_USE;
6085 			continue;
6086 		}
6087 
6088 		chan = pchan->ops->new_connection(pchan);
6089 		if (!chan) {
6090 			result = L2CAP_CR_LE_NO_MEM;
6091 			continue;
6092 		}
6093 
6094 		bacpy(&chan->src, &conn->hcon->src);
6095 		bacpy(&chan->dst, &conn->hcon->dst);
6096 		chan->src_type = bdaddr_src_type(conn->hcon);
6097 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6098 		chan->psm  = psm;
6099 		chan->dcid = scid;
6100 		chan->omtu = mtu;
6101 		chan->remote_mps = mps;
6102 
6103 		__l2cap_chan_add(conn, chan);
6104 
6105 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6106 
6107 		/* Init response */
6108 		if (!pdu.rsp.credits) {
6109 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6110 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6111 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6112 		}
6113 
6114 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6115 
6116 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6117 
6118 		chan->ident = cmd->ident;
6119 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6120 
6121 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6122 			l2cap_state_change(chan, BT_CONNECT2);
6123 			defer = true;
6124 			chan->ops->defer(chan);
6125 		} else {
6126 			l2cap_chan_ready(chan);
6127 		}
6128 	}
6129 
6130 unlock:
6131 	l2cap_chan_unlock(pchan);
6132 	mutex_unlock(&conn->chan_lock);
6133 	l2cap_chan_put(pchan);
6134 
6135 response:
6136 	pdu.rsp.result = cpu_to_le16(result);
6137 
6138 	if (defer)
6139 		return 0;
6140 
6141 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6142 		       sizeof(pdu.rsp) + len, &pdu);
6143 
6144 	return 0;
6145 }
6146 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6147 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6148 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6149 				       u8 *data)
6150 {
6151 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6152 	struct hci_conn *hcon = conn->hcon;
6153 	u16 mtu, mps, credits, result;
6154 	struct l2cap_chan *chan, *tmp;
6155 	int err = 0, sec_level;
6156 	int i = 0;
6157 
6158 	if (cmd_len < sizeof(*rsp))
6159 		return -EPROTO;
6160 
6161 	mtu     = __le16_to_cpu(rsp->mtu);
6162 	mps     = __le16_to_cpu(rsp->mps);
6163 	credits = __le16_to_cpu(rsp->credits);
6164 	result  = __le16_to_cpu(rsp->result);
6165 
6166 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6167 	       result);
6168 
6169 	mutex_lock(&conn->chan_lock);
6170 
6171 	cmd_len -= sizeof(*rsp);
6172 
6173 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6174 		u16 dcid;
6175 
6176 		if (chan->ident != cmd->ident ||
6177 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6178 		    chan->state == BT_CONNECTED)
6179 			continue;
6180 
6181 		l2cap_chan_lock(chan);
6182 
6183 		/* Check that there is a dcid for each pending channel */
6184 		if (cmd_len < sizeof(dcid)) {
6185 			l2cap_chan_del(chan, ECONNREFUSED);
6186 			l2cap_chan_unlock(chan);
6187 			continue;
6188 		}
6189 
6190 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6191 		cmd_len -= sizeof(u16);
6192 
6193 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6194 
6195 		/* Check if dcid is already in use */
6196 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6197 			/* If a device receives a
6198 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6199 			 * already-assigned Destination CID, then both the
6200 			 * original channel and the new channel shall be
6201 			 * immediately discarded and not used.
6202 			 */
6203 			l2cap_chan_del(chan, ECONNREFUSED);
6204 			l2cap_chan_unlock(chan);
6205 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6206 			l2cap_chan_lock(chan);
6207 			l2cap_chan_del(chan, ECONNRESET);
6208 			l2cap_chan_unlock(chan);
6209 			continue;
6210 		}
6211 
6212 		switch (result) {
6213 		case L2CAP_CR_LE_AUTHENTICATION:
6214 		case L2CAP_CR_LE_ENCRYPTION:
6215 			/* If we already have MITM protection we can't do
6216 			 * anything.
6217 			 */
6218 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6219 				l2cap_chan_del(chan, ECONNREFUSED);
6220 				break;
6221 			}
6222 
6223 			sec_level = hcon->sec_level + 1;
6224 			if (chan->sec_level < sec_level)
6225 				chan->sec_level = sec_level;
6226 
6227 			/* We'll need to send a new Connect Request */
6228 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6229 
6230 			smp_conn_security(hcon, chan->sec_level);
6231 			break;
6232 
6233 		case L2CAP_CR_LE_BAD_PSM:
6234 			l2cap_chan_del(chan, ECONNREFUSED);
6235 			break;
6236 
6237 		default:
6238 			/* If dcid was not set it means channels was refused */
6239 			if (!dcid) {
6240 				l2cap_chan_del(chan, ECONNREFUSED);
6241 				break;
6242 			}
6243 
6244 			chan->ident = 0;
6245 			chan->dcid = dcid;
6246 			chan->omtu = mtu;
6247 			chan->remote_mps = mps;
6248 			chan->tx_credits = credits;
6249 			l2cap_chan_ready(chan);
6250 			break;
6251 		}
6252 
6253 		l2cap_chan_unlock(chan);
6254 	}
6255 
6256 	mutex_unlock(&conn->chan_lock);
6257 
6258 	return err;
6259 }
6260 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6261 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6262 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6263 					 u8 *data)
6264 {
6265 	struct l2cap_ecred_reconf_req *req = (void *) data;
6266 	struct l2cap_ecred_reconf_rsp rsp;
6267 	u16 mtu, mps, result;
6268 	struct l2cap_chan *chan;
6269 	int i, num_scid;
6270 
6271 	if (!enable_ecred)
6272 		return -EINVAL;
6273 
6274 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6275 		result = L2CAP_CR_LE_INVALID_PARAMS;
6276 		goto respond;
6277 	}
6278 
6279 	mtu = __le16_to_cpu(req->mtu);
6280 	mps = __le16_to_cpu(req->mps);
6281 
6282 	BT_DBG("mtu %u mps %u", mtu, mps);
6283 
6284 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6285 		result = L2CAP_RECONF_INVALID_MTU;
6286 		goto respond;
6287 	}
6288 
6289 	if (mps < L2CAP_ECRED_MIN_MPS) {
6290 		result = L2CAP_RECONF_INVALID_MPS;
6291 		goto respond;
6292 	}
6293 
6294 	cmd_len -= sizeof(*req);
6295 	num_scid = cmd_len / sizeof(u16);
6296 	result = L2CAP_RECONF_SUCCESS;
6297 
6298 	for (i = 0; i < num_scid; i++) {
6299 		u16 scid;
6300 
6301 		scid = __le16_to_cpu(req->scid[i]);
6302 		if (!scid)
6303 			return -EPROTO;
6304 
6305 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6306 		if (!chan)
6307 			continue;
6308 
6309 		/* If the MTU value is decreased for any of the included
6310 		 * channels, then the receiver shall disconnect all
6311 		 * included channels.
6312 		 */
6313 		if (chan->omtu > mtu) {
6314 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6315 			       chan->omtu, mtu);
6316 			result = L2CAP_RECONF_INVALID_MTU;
6317 		}
6318 
6319 		chan->omtu = mtu;
6320 		chan->remote_mps = mps;
6321 	}
6322 
6323 respond:
6324 	rsp.result = cpu_to_le16(result);
6325 
6326 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6327 		       &rsp);
6328 
6329 	return 0;
6330 }
6331 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6332 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6333 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6334 					 u8 *data)
6335 {
6336 	struct l2cap_chan *chan, *tmp;
6337 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6338 	u16 result;
6339 
6340 	if (cmd_len < sizeof(*rsp))
6341 		return -EPROTO;
6342 
6343 	result = __le16_to_cpu(rsp->result);
6344 
6345 	BT_DBG("result 0x%4.4x", rsp->result);
6346 
6347 	if (!result)
6348 		return 0;
6349 
6350 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6351 		if (chan->ident != cmd->ident)
6352 			continue;
6353 
6354 		l2cap_chan_del(chan, ECONNRESET);
6355 	}
6356 
6357 	return 0;
6358 }
6359 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6360 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6361 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6362 				       u8 *data)
6363 {
6364 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6365 	struct l2cap_chan *chan;
6366 
6367 	if (cmd_len < sizeof(*rej))
6368 		return -EPROTO;
6369 
6370 	mutex_lock(&conn->chan_lock);
6371 
6372 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6373 	if (!chan)
6374 		goto done;
6375 
6376 	chan = l2cap_chan_hold_unless_zero(chan);
6377 	if (!chan)
6378 		goto done;
6379 
6380 	l2cap_chan_lock(chan);
6381 	l2cap_chan_del(chan, ECONNREFUSED);
6382 	l2cap_chan_unlock(chan);
6383 	l2cap_chan_put(chan);
6384 
6385 done:
6386 	mutex_unlock(&conn->chan_lock);
6387 	return 0;
6388 }
6389 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6390 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6391 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6392 				   u8 *data)
6393 {
6394 	int err = 0;
6395 
6396 	switch (cmd->code) {
6397 	case L2CAP_COMMAND_REJ:
6398 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6399 		break;
6400 
6401 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6402 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6403 		break;
6404 
6405 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6406 		break;
6407 
6408 	case L2CAP_LE_CONN_RSP:
6409 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6410 		break;
6411 
6412 	case L2CAP_LE_CONN_REQ:
6413 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6414 		break;
6415 
6416 	case L2CAP_LE_CREDITS:
6417 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6418 		break;
6419 
6420 	case L2CAP_ECRED_CONN_REQ:
6421 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6422 		break;
6423 
6424 	case L2CAP_ECRED_CONN_RSP:
6425 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6426 		break;
6427 
6428 	case L2CAP_ECRED_RECONF_REQ:
6429 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6430 		break;
6431 
6432 	case L2CAP_ECRED_RECONF_RSP:
6433 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6434 		break;
6435 
6436 	case L2CAP_DISCONN_REQ:
6437 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6438 		break;
6439 
6440 	case L2CAP_DISCONN_RSP:
6441 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6442 		break;
6443 
6444 	default:
6445 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6446 		err = -EINVAL;
6447 		break;
6448 	}
6449 
6450 	return err;
6451 }
6452 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6453 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6454 					struct sk_buff *skb)
6455 {
6456 	struct hci_conn *hcon = conn->hcon;
6457 	struct l2cap_cmd_hdr *cmd;
6458 	u16 len;
6459 	int err;
6460 
6461 	if (hcon->type != LE_LINK)
6462 		goto drop;
6463 
6464 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6465 		goto drop;
6466 
6467 	cmd = (void *) skb->data;
6468 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6469 
6470 	len = le16_to_cpu(cmd->len);
6471 
6472 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6473 
6474 	if (len != skb->len || !cmd->ident) {
6475 		BT_DBG("corrupted command");
6476 		goto drop;
6477 	}
6478 
6479 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6480 	if (err) {
6481 		struct l2cap_cmd_rej_unk rej;
6482 
6483 		BT_ERR("Wrong link type (%d)", err);
6484 
6485 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6486 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6487 			       sizeof(rej), &rej);
6488 	}
6489 
6490 drop:
6491 	kfree_skb(skb);
6492 }
6493 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)6494 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6495 {
6496 	struct l2cap_cmd_rej_unk rej;
6497 
6498 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6499 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6500 }
6501 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6502 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6503 				     struct sk_buff *skb)
6504 {
6505 	struct hci_conn *hcon = conn->hcon;
6506 	struct l2cap_cmd_hdr *cmd;
6507 	int err;
6508 
6509 	l2cap_raw_recv(conn, skb);
6510 
6511 	if (hcon->type != ACL_LINK)
6512 		goto drop;
6513 
6514 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6515 		u16 len;
6516 
6517 		cmd = (void *) skb->data;
6518 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6519 
6520 		len = le16_to_cpu(cmd->len);
6521 
6522 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6523 		       cmd->ident);
6524 
6525 		if (len > skb->len || !cmd->ident) {
6526 			BT_DBG("corrupted command");
6527 			l2cap_sig_send_rej(conn, cmd->ident);
6528 			break;
6529 		}
6530 
6531 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6532 		if (err) {
6533 			BT_ERR("Wrong link type (%d)", err);
6534 			l2cap_sig_send_rej(conn, cmd->ident);
6535 		}
6536 
6537 		skb_pull(skb, len);
6538 	}
6539 
6540 	if (skb->len > 0) {
6541 		BT_DBG("corrupted command");
6542 		l2cap_sig_send_rej(conn, 0);
6543 	}
6544 
6545 drop:
6546 	kfree_skb(skb);
6547 }
6548 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6549 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6550 {
6551 	u16 our_fcs, rcv_fcs;
6552 	int hdr_size;
6553 
6554 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6555 		hdr_size = L2CAP_EXT_HDR_SIZE;
6556 	else
6557 		hdr_size = L2CAP_ENH_HDR_SIZE;
6558 
6559 	if (chan->fcs == L2CAP_FCS_CRC16) {
6560 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6561 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6562 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6563 
6564 		if (our_fcs != rcv_fcs)
6565 			return -EBADMSG;
6566 	}
6567 	return 0;
6568 }
6569 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6570 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6571 {
6572 	struct l2cap_ctrl control;
6573 
6574 	BT_DBG("chan %p", chan);
6575 
6576 	memset(&control, 0, sizeof(control));
6577 	control.sframe = 1;
6578 	control.final = 1;
6579 	control.reqseq = chan->buffer_seq;
6580 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6581 
6582 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6583 		control.super = L2CAP_SUPER_RNR;
6584 		l2cap_send_sframe(chan, &control);
6585 	}
6586 
6587 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6588 	    chan->unacked_frames > 0)
6589 		__set_retrans_timer(chan);
6590 
6591 	/* Send pending iframes */
6592 	l2cap_ertm_send(chan);
6593 
6594 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6595 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6596 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6597 		 * send it now.
6598 		 */
6599 		control.super = L2CAP_SUPER_RR;
6600 		l2cap_send_sframe(chan, &control);
6601 	}
6602 }
6603 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6604 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6605 			    struct sk_buff **last_frag)
6606 {
6607 	/* skb->len reflects data in skb as well as all fragments
6608 	 * skb->data_len reflects only data in fragments
6609 	 */
6610 	if (!skb_has_frag_list(skb))
6611 		skb_shinfo(skb)->frag_list = new_frag;
6612 
6613 	new_frag->next = NULL;
6614 
6615 	(*last_frag)->next = new_frag;
6616 	*last_frag = new_frag;
6617 
6618 	skb->len += new_frag->len;
6619 	skb->data_len += new_frag->len;
6620 	skb->truesize += new_frag->truesize;
6621 }
6622 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6623 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6624 				struct l2cap_ctrl *control)
6625 {
6626 	int err = -EINVAL;
6627 
6628 	switch (control->sar) {
6629 	case L2CAP_SAR_UNSEGMENTED:
6630 		if (chan->sdu)
6631 			break;
6632 
6633 		err = chan->ops->recv(chan, skb);
6634 		break;
6635 
6636 	case L2CAP_SAR_START:
6637 		if (chan->sdu)
6638 			break;
6639 
6640 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6641 			break;
6642 
6643 		chan->sdu_len = get_unaligned_le16(skb->data);
6644 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6645 
6646 		if (chan->sdu_len > chan->imtu) {
6647 			err = -EMSGSIZE;
6648 			break;
6649 		}
6650 
6651 		if (skb->len >= chan->sdu_len)
6652 			break;
6653 
6654 		chan->sdu = skb;
6655 		chan->sdu_last_frag = skb;
6656 
6657 		skb = NULL;
6658 		err = 0;
6659 		break;
6660 
6661 	case L2CAP_SAR_CONTINUE:
6662 		if (!chan->sdu)
6663 			break;
6664 
6665 		append_skb_frag(chan->sdu, skb,
6666 				&chan->sdu_last_frag);
6667 		skb = NULL;
6668 
6669 		if (chan->sdu->len >= chan->sdu_len)
6670 			break;
6671 
6672 		err = 0;
6673 		break;
6674 
6675 	case L2CAP_SAR_END:
6676 		if (!chan->sdu)
6677 			break;
6678 
6679 		append_skb_frag(chan->sdu, skb,
6680 				&chan->sdu_last_frag);
6681 		skb = NULL;
6682 
6683 		if (chan->sdu->len != chan->sdu_len)
6684 			break;
6685 
6686 		err = chan->ops->recv(chan, chan->sdu);
6687 
6688 		if (!err) {
6689 			/* Reassembly complete */
6690 			chan->sdu = NULL;
6691 			chan->sdu_last_frag = NULL;
6692 			chan->sdu_len = 0;
6693 		}
6694 		break;
6695 	}
6696 
6697 	if (err) {
6698 		kfree_skb(skb);
6699 		kfree_skb(chan->sdu);
6700 		chan->sdu = NULL;
6701 		chan->sdu_last_frag = NULL;
6702 		chan->sdu_len = 0;
6703 	}
6704 
6705 	return err;
6706 }
6707 
l2cap_resegment(struct l2cap_chan * chan)6708 static int l2cap_resegment(struct l2cap_chan *chan)
6709 {
6710 	/* Placeholder */
6711 	return 0;
6712 }
6713 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6714 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6715 {
6716 	u8 event;
6717 
6718 	if (chan->mode != L2CAP_MODE_ERTM)
6719 		return;
6720 
6721 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6722 	l2cap_tx(chan, NULL, NULL, event);
6723 }
6724 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6725 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6726 {
6727 	int err = 0;
6728 	/* Pass sequential frames to l2cap_reassemble_sdu()
6729 	 * until a gap is encountered.
6730 	 */
6731 
6732 	BT_DBG("chan %p", chan);
6733 
6734 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6735 		struct sk_buff *skb;
6736 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6737 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6738 
6739 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6740 
6741 		if (!skb)
6742 			break;
6743 
6744 		skb_unlink(skb, &chan->srej_q);
6745 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6746 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6747 		if (err)
6748 			break;
6749 	}
6750 
6751 	if (skb_queue_empty(&chan->srej_q)) {
6752 		chan->rx_state = L2CAP_RX_STATE_RECV;
6753 		l2cap_send_ack(chan);
6754 	}
6755 
6756 	return err;
6757 }
6758 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6759 static void l2cap_handle_srej(struct l2cap_chan *chan,
6760 			      struct l2cap_ctrl *control)
6761 {
6762 	struct sk_buff *skb;
6763 
6764 	BT_DBG("chan %p, control %p", chan, control);
6765 
6766 	if (control->reqseq == chan->next_tx_seq) {
6767 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6768 		l2cap_send_disconn_req(chan, ECONNRESET);
6769 		return;
6770 	}
6771 
6772 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6773 
6774 	if (skb == NULL) {
6775 		BT_DBG("Seq %d not available for retransmission",
6776 		       control->reqseq);
6777 		return;
6778 	}
6779 
6780 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6781 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6782 		l2cap_send_disconn_req(chan, ECONNRESET);
6783 		return;
6784 	}
6785 
6786 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6787 
6788 	if (control->poll) {
6789 		l2cap_pass_to_tx(chan, control);
6790 
6791 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6792 		l2cap_retransmit(chan, control);
6793 		l2cap_ertm_send(chan);
6794 
6795 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6796 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6797 			chan->srej_save_reqseq = control->reqseq;
6798 		}
6799 	} else {
6800 		l2cap_pass_to_tx_fbit(chan, control);
6801 
6802 		if (control->final) {
6803 			if (chan->srej_save_reqseq != control->reqseq ||
6804 			    !test_and_clear_bit(CONN_SREJ_ACT,
6805 						&chan->conn_state))
6806 				l2cap_retransmit(chan, control);
6807 		} else {
6808 			l2cap_retransmit(chan, control);
6809 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6810 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6811 				chan->srej_save_reqseq = control->reqseq;
6812 			}
6813 		}
6814 	}
6815 }
6816 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6817 static void l2cap_handle_rej(struct l2cap_chan *chan,
6818 			     struct l2cap_ctrl *control)
6819 {
6820 	struct sk_buff *skb;
6821 
6822 	BT_DBG("chan %p, control %p", chan, control);
6823 
6824 	if (control->reqseq == chan->next_tx_seq) {
6825 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6826 		l2cap_send_disconn_req(chan, ECONNRESET);
6827 		return;
6828 	}
6829 
6830 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6831 
6832 	if (chan->max_tx && skb &&
6833 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6834 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6835 		l2cap_send_disconn_req(chan, ECONNRESET);
6836 		return;
6837 	}
6838 
6839 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6840 
6841 	l2cap_pass_to_tx(chan, control);
6842 
6843 	if (control->final) {
6844 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6845 			l2cap_retransmit_all(chan, control);
6846 	} else {
6847 		l2cap_retransmit_all(chan, control);
6848 		l2cap_ertm_send(chan);
6849 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6850 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6851 	}
6852 }
6853 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6854 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6855 {
6856 	BT_DBG("chan %p, txseq %d", chan, txseq);
6857 
6858 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6859 	       chan->expected_tx_seq);
6860 
6861 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6862 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6863 		    chan->tx_win) {
6864 			/* See notes below regarding "double poll" and
6865 			 * invalid packets.
6866 			 */
6867 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6868 				BT_DBG("Invalid/Ignore - after SREJ");
6869 				return L2CAP_TXSEQ_INVALID_IGNORE;
6870 			} else {
6871 				BT_DBG("Invalid - in window after SREJ sent");
6872 				return L2CAP_TXSEQ_INVALID;
6873 			}
6874 		}
6875 
6876 		if (chan->srej_list.head == txseq) {
6877 			BT_DBG("Expected SREJ");
6878 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6879 		}
6880 
6881 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6882 			BT_DBG("Duplicate SREJ - txseq already stored");
6883 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6884 		}
6885 
6886 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6887 			BT_DBG("Unexpected SREJ - not requested");
6888 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6889 		}
6890 	}
6891 
6892 	if (chan->expected_tx_seq == txseq) {
6893 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6894 		    chan->tx_win) {
6895 			BT_DBG("Invalid - txseq outside tx window");
6896 			return L2CAP_TXSEQ_INVALID;
6897 		} else {
6898 			BT_DBG("Expected");
6899 			return L2CAP_TXSEQ_EXPECTED;
6900 		}
6901 	}
6902 
6903 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6904 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6905 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6906 		return L2CAP_TXSEQ_DUPLICATE;
6907 	}
6908 
6909 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6910 		/* A source of invalid packets is a "double poll" condition,
6911 		 * where delays cause us to send multiple poll packets.  If
6912 		 * the remote stack receives and processes both polls,
6913 		 * sequence numbers can wrap around in such a way that a
6914 		 * resent frame has a sequence number that looks like new data
6915 		 * with a sequence gap.  This would trigger an erroneous SREJ
6916 		 * request.
6917 		 *
6918 		 * Fortunately, this is impossible with a tx window that's
6919 		 * less than half of the maximum sequence number, which allows
6920 		 * invalid frames to be safely ignored.
6921 		 *
6922 		 * With tx window sizes greater than half of the tx window
6923 		 * maximum, the frame is invalid and cannot be ignored.  This
6924 		 * causes a disconnect.
6925 		 */
6926 
6927 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6928 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6929 			return L2CAP_TXSEQ_INVALID_IGNORE;
6930 		} else {
6931 			BT_DBG("Invalid - txseq outside tx window");
6932 			return L2CAP_TXSEQ_INVALID;
6933 		}
6934 	} else {
6935 		BT_DBG("Unexpected - txseq indicates missing frames");
6936 		return L2CAP_TXSEQ_UNEXPECTED;
6937 	}
6938 }
6939 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6940 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6941 			       struct l2cap_ctrl *control,
6942 			       struct sk_buff *skb, u8 event)
6943 {
6944 	struct l2cap_ctrl local_control;
6945 	int err = 0;
6946 	bool skb_in_use = false;
6947 
6948 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6949 	       event);
6950 
6951 	switch (event) {
6952 	case L2CAP_EV_RECV_IFRAME:
6953 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6954 		case L2CAP_TXSEQ_EXPECTED:
6955 			l2cap_pass_to_tx(chan, control);
6956 
6957 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6958 				BT_DBG("Busy, discarding expected seq %d",
6959 				       control->txseq);
6960 				break;
6961 			}
6962 
6963 			chan->expected_tx_seq = __next_seq(chan,
6964 							   control->txseq);
6965 
6966 			chan->buffer_seq = chan->expected_tx_seq;
6967 			skb_in_use = true;
6968 
6969 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6970 			 * control, so make a copy in advance to use it after
6971 			 * l2cap_reassemble_sdu returns and to avoid the race
6972 			 * condition, for example:
6973 			 *
6974 			 * The current thread calls:
6975 			 *   l2cap_reassemble_sdu
6976 			 *     chan->ops->recv == l2cap_sock_recv_cb
6977 			 *       __sock_queue_rcv_skb
6978 			 * Another thread calls:
6979 			 *   bt_sock_recvmsg
6980 			 *     skb_recv_datagram
6981 			 *     skb_free_datagram
6982 			 * Then the current thread tries to access control, but
6983 			 * it was freed by skb_free_datagram.
6984 			 */
6985 			local_control = *control;
6986 			err = l2cap_reassemble_sdu(chan, skb, control);
6987 			if (err)
6988 				break;
6989 
6990 			if (local_control.final) {
6991 				if (!test_and_clear_bit(CONN_REJ_ACT,
6992 							&chan->conn_state)) {
6993 					local_control.final = 0;
6994 					l2cap_retransmit_all(chan, &local_control);
6995 					l2cap_ertm_send(chan);
6996 				}
6997 			}
6998 
6999 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7000 				l2cap_send_ack(chan);
7001 			break;
7002 		case L2CAP_TXSEQ_UNEXPECTED:
7003 			l2cap_pass_to_tx(chan, control);
7004 
7005 			/* Can't issue SREJ frames in the local busy state.
7006 			 * Drop this frame, it will be seen as missing
7007 			 * when local busy is exited.
7008 			 */
7009 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7010 				BT_DBG("Busy, discarding unexpected seq %d",
7011 				       control->txseq);
7012 				break;
7013 			}
7014 
7015 			/* There was a gap in the sequence, so an SREJ
7016 			 * must be sent for each missing frame.  The
7017 			 * current frame is stored for later use.
7018 			 */
7019 			skb_queue_tail(&chan->srej_q, skb);
7020 			skb_in_use = true;
7021 			BT_DBG("Queued %p (queue len %d)", skb,
7022 			       skb_queue_len(&chan->srej_q));
7023 
7024 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7025 			l2cap_seq_list_clear(&chan->srej_list);
7026 			l2cap_send_srej(chan, control->txseq);
7027 
7028 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7029 			break;
7030 		case L2CAP_TXSEQ_DUPLICATE:
7031 			l2cap_pass_to_tx(chan, control);
7032 			break;
7033 		case L2CAP_TXSEQ_INVALID_IGNORE:
7034 			break;
7035 		case L2CAP_TXSEQ_INVALID:
7036 		default:
7037 			l2cap_send_disconn_req(chan, ECONNRESET);
7038 			break;
7039 		}
7040 		break;
7041 	case L2CAP_EV_RECV_RR:
7042 		l2cap_pass_to_tx(chan, control);
7043 		if (control->final) {
7044 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7045 
7046 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7047 			    !__chan_is_moving(chan)) {
7048 				control->final = 0;
7049 				l2cap_retransmit_all(chan, control);
7050 			}
7051 
7052 			l2cap_ertm_send(chan);
7053 		} else if (control->poll) {
7054 			l2cap_send_i_or_rr_or_rnr(chan);
7055 		} else {
7056 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7057 					       &chan->conn_state) &&
7058 			    chan->unacked_frames)
7059 				__set_retrans_timer(chan);
7060 
7061 			l2cap_ertm_send(chan);
7062 		}
7063 		break;
7064 	case L2CAP_EV_RECV_RNR:
7065 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7066 		l2cap_pass_to_tx(chan, control);
7067 		if (control && control->poll) {
7068 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7069 			l2cap_send_rr_or_rnr(chan, 0);
7070 		}
7071 		__clear_retrans_timer(chan);
7072 		l2cap_seq_list_clear(&chan->retrans_list);
7073 		break;
7074 	case L2CAP_EV_RECV_REJ:
7075 		l2cap_handle_rej(chan, control);
7076 		break;
7077 	case L2CAP_EV_RECV_SREJ:
7078 		l2cap_handle_srej(chan, control);
7079 		break;
7080 	default:
7081 		break;
7082 	}
7083 
7084 	if (skb && !skb_in_use) {
7085 		BT_DBG("Freeing %p", skb);
7086 		kfree_skb(skb);
7087 	}
7088 
7089 	return err;
7090 }
7091 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7092 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7093 				    struct l2cap_ctrl *control,
7094 				    struct sk_buff *skb, u8 event)
7095 {
7096 	int err = 0;
7097 	u16 txseq = control->txseq;
7098 	bool skb_in_use = false;
7099 
7100 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7101 	       event);
7102 
7103 	switch (event) {
7104 	case L2CAP_EV_RECV_IFRAME:
7105 		switch (l2cap_classify_txseq(chan, txseq)) {
7106 		case L2CAP_TXSEQ_EXPECTED:
7107 			/* Keep frame for reassembly later */
7108 			l2cap_pass_to_tx(chan, control);
7109 			skb_queue_tail(&chan->srej_q, skb);
7110 			skb_in_use = true;
7111 			BT_DBG("Queued %p (queue len %d)", skb,
7112 			       skb_queue_len(&chan->srej_q));
7113 
7114 			chan->expected_tx_seq = __next_seq(chan, txseq);
7115 			break;
7116 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7117 			l2cap_seq_list_pop(&chan->srej_list);
7118 
7119 			l2cap_pass_to_tx(chan, control);
7120 			skb_queue_tail(&chan->srej_q, skb);
7121 			skb_in_use = true;
7122 			BT_DBG("Queued %p (queue len %d)", skb,
7123 			       skb_queue_len(&chan->srej_q));
7124 
7125 			err = l2cap_rx_queued_iframes(chan);
7126 			if (err)
7127 				break;
7128 
7129 			break;
7130 		case L2CAP_TXSEQ_UNEXPECTED:
7131 			/* Got a frame that can't be reassembled yet.
7132 			 * Save it for later, and send SREJs to cover
7133 			 * the missing frames.
7134 			 */
7135 			skb_queue_tail(&chan->srej_q, skb);
7136 			skb_in_use = true;
7137 			BT_DBG("Queued %p (queue len %d)", skb,
7138 			       skb_queue_len(&chan->srej_q));
7139 
7140 			l2cap_pass_to_tx(chan, control);
7141 			l2cap_send_srej(chan, control->txseq);
7142 			break;
7143 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7144 			/* This frame was requested with an SREJ, but
7145 			 * some expected retransmitted frames are
7146 			 * missing.  Request retransmission of missing
7147 			 * SREJ'd frames.
7148 			 */
7149 			skb_queue_tail(&chan->srej_q, skb);
7150 			skb_in_use = true;
7151 			BT_DBG("Queued %p (queue len %d)", skb,
7152 			       skb_queue_len(&chan->srej_q));
7153 
7154 			l2cap_pass_to_tx(chan, control);
7155 			l2cap_send_srej_list(chan, control->txseq);
7156 			break;
7157 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7158 			/* We've already queued this frame.  Drop this copy. */
7159 			l2cap_pass_to_tx(chan, control);
7160 			break;
7161 		case L2CAP_TXSEQ_DUPLICATE:
7162 			/* Expecting a later sequence number, so this frame
7163 			 * was already received.  Ignore it completely.
7164 			 */
7165 			break;
7166 		case L2CAP_TXSEQ_INVALID_IGNORE:
7167 			break;
7168 		case L2CAP_TXSEQ_INVALID:
7169 		default:
7170 			l2cap_send_disconn_req(chan, ECONNRESET);
7171 			break;
7172 		}
7173 		break;
7174 	case L2CAP_EV_RECV_RR:
7175 		l2cap_pass_to_tx(chan, control);
7176 		if (control->final) {
7177 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7178 
7179 			if (!test_and_clear_bit(CONN_REJ_ACT,
7180 						&chan->conn_state)) {
7181 				control->final = 0;
7182 				l2cap_retransmit_all(chan, control);
7183 			}
7184 
7185 			l2cap_ertm_send(chan);
7186 		} else if (control->poll) {
7187 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7188 					       &chan->conn_state) &&
7189 			    chan->unacked_frames) {
7190 				__set_retrans_timer(chan);
7191 			}
7192 
7193 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7194 			l2cap_send_srej_tail(chan);
7195 		} else {
7196 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7197 					       &chan->conn_state) &&
7198 			    chan->unacked_frames)
7199 				__set_retrans_timer(chan);
7200 
7201 			l2cap_send_ack(chan);
7202 		}
7203 		break;
7204 	case L2CAP_EV_RECV_RNR:
7205 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7206 		l2cap_pass_to_tx(chan, control);
7207 		if (control->poll) {
7208 			l2cap_send_srej_tail(chan);
7209 		} else {
7210 			struct l2cap_ctrl rr_control;
7211 			memset(&rr_control, 0, sizeof(rr_control));
7212 			rr_control.sframe = 1;
7213 			rr_control.super = L2CAP_SUPER_RR;
7214 			rr_control.reqseq = chan->buffer_seq;
7215 			l2cap_send_sframe(chan, &rr_control);
7216 		}
7217 
7218 		break;
7219 	case L2CAP_EV_RECV_REJ:
7220 		l2cap_handle_rej(chan, control);
7221 		break;
7222 	case L2CAP_EV_RECV_SREJ:
7223 		l2cap_handle_srej(chan, control);
7224 		break;
7225 	}
7226 
7227 	if (skb && !skb_in_use) {
7228 		BT_DBG("Freeing %p", skb);
7229 		kfree_skb(skb);
7230 	}
7231 
7232 	return err;
7233 }
7234 
l2cap_finish_move(struct l2cap_chan * chan)7235 static int l2cap_finish_move(struct l2cap_chan *chan)
7236 {
7237 	BT_DBG("chan %p", chan);
7238 
7239 	chan->rx_state = L2CAP_RX_STATE_RECV;
7240 
7241 	if (chan->hs_hcon)
7242 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7243 	else
7244 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7245 
7246 	return l2cap_resegment(chan);
7247 }
7248 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7249 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7250 				 struct l2cap_ctrl *control,
7251 				 struct sk_buff *skb, u8 event)
7252 {
7253 	int err;
7254 
7255 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7256 	       event);
7257 
7258 	if (!control->poll)
7259 		return -EPROTO;
7260 
7261 	l2cap_process_reqseq(chan, control->reqseq);
7262 
7263 	if (!skb_queue_empty(&chan->tx_q))
7264 		chan->tx_send_head = skb_peek(&chan->tx_q);
7265 	else
7266 		chan->tx_send_head = NULL;
7267 
7268 	/* Rewind next_tx_seq to the point expected
7269 	 * by the receiver.
7270 	 */
7271 	chan->next_tx_seq = control->reqseq;
7272 	chan->unacked_frames = 0;
7273 
7274 	err = l2cap_finish_move(chan);
7275 	if (err)
7276 		return err;
7277 
7278 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7279 	l2cap_send_i_or_rr_or_rnr(chan);
7280 
7281 	if (event == L2CAP_EV_RECV_IFRAME)
7282 		return -EPROTO;
7283 
7284 	return l2cap_rx_state_recv(chan, control, NULL, event);
7285 }
7286 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7287 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7288 				 struct l2cap_ctrl *control,
7289 				 struct sk_buff *skb, u8 event)
7290 {
7291 	int err;
7292 
7293 	if (!control->final)
7294 		return -EPROTO;
7295 
7296 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7297 
7298 	chan->rx_state = L2CAP_RX_STATE_RECV;
7299 	l2cap_process_reqseq(chan, control->reqseq);
7300 
7301 	if (!skb_queue_empty(&chan->tx_q))
7302 		chan->tx_send_head = skb_peek(&chan->tx_q);
7303 	else
7304 		chan->tx_send_head = NULL;
7305 
7306 	/* Rewind next_tx_seq to the point expected
7307 	 * by the receiver.
7308 	 */
7309 	chan->next_tx_seq = control->reqseq;
7310 	chan->unacked_frames = 0;
7311 
7312 	if (chan->hs_hcon)
7313 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7314 	else
7315 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7316 
7317 	err = l2cap_resegment(chan);
7318 
7319 	if (!err)
7320 		err = l2cap_rx_state_recv(chan, control, skb, event);
7321 
7322 	return err;
7323 }
7324 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7325 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7326 {
7327 	/* Make sure reqseq is for a packet that has been sent but not acked */
7328 	u16 unacked;
7329 
7330 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7331 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7332 }
7333 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7334 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7335 		    struct sk_buff *skb, u8 event)
7336 {
7337 	int err = 0;
7338 
7339 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7340 	       control, skb, event, chan->rx_state);
7341 
7342 	if (__valid_reqseq(chan, control->reqseq)) {
7343 		switch (chan->rx_state) {
7344 		case L2CAP_RX_STATE_RECV:
7345 			err = l2cap_rx_state_recv(chan, control, skb, event);
7346 			break;
7347 		case L2CAP_RX_STATE_SREJ_SENT:
7348 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7349 						       event);
7350 			break;
7351 		case L2CAP_RX_STATE_WAIT_P:
7352 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7353 			break;
7354 		case L2CAP_RX_STATE_WAIT_F:
7355 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7356 			break;
7357 		default:
7358 			/* shut it down */
7359 			break;
7360 		}
7361 	} else {
7362 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7363 		       control->reqseq, chan->next_tx_seq,
7364 		       chan->expected_ack_seq);
7365 		l2cap_send_disconn_req(chan, ECONNRESET);
7366 	}
7367 
7368 	return err;
7369 }
7370 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7371 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7372 			   struct sk_buff *skb)
7373 {
7374 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7375 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7376 	 * returns and to avoid the race condition, for example:
7377 	 *
7378 	 * The current thread calls:
7379 	 *   l2cap_reassemble_sdu
7380 	 *     chan->ops->recv == l2cap_sock_recv_cb
7381 	 *       __sock_queue_rcv_skb
7382 	 * Another thread calls:
7383 	 *   bt_sock_recvmsg
7384 	 *     skb_recv_datagram
7385 	 *     skb_free_datagram
7386 	 * Then the current thread tries to access control, but it was freed by
7387 	 * skb_free_datagram.
7388 	 */
7389 	u16 txseq = control->txseq;
7390 
7391 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7392 	       chan->rx_state);
7393 
7394 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7395 		l2cap_pass_to_tx(chan, control);
7396 
7397 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7398 		       __next_seq(chan, chan->buffer_seq));
7399 
7400 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7401 
7402 		l2cap_reassemble_sdu(chan, skb, control);
7403 	} else {
7404 		if (chan->sdu) {
7405 			kfree_skb(chan->sdu);
7406 			chan->sdu = NULL;
7407 		}
7408 		chan->sdu_last_frag = NULL;
7409 		chan->sdu_len = 0;
7410 
7411 		if (skb) {
7412 			BT_DBG("Freeing %p", skb);
7413 			kfree_skb(skb);
7414 		}
7415 	}
7416 
7417 	chan->last_acked_seq = txseq;
7418 	chan->expected_tx_seq = __next_seq(chan, txseq);
7419 
7420 	return 0;
7421 }
7422 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7423 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7424 {
7425 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7426 	u16 len;
7427 	u8 event;
7428 
7429 	__unpack_control(chan, skb);
7430 
7431 	len = skb->len;
7432 
7433 	/*
7434 	 * We can just drop the corrupted I-frame here.
7435 	 * Receiver will miss it and start proper recovery
7436 	 * procedures and ask for retransmission.
7437 	 */
7438 	if (l2cap_check_fcs(chan, skb))
7439 		goto drop;
7440 
7441 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7442 		len -= L2CAP_SDULEN_SIZE;
7443 
7444 	if (chan->fcs == L2CAP_FCS_CRC16)
7445 		len -= L2CAP_FCS_SIZE;
7446 
7447 	if (len > chan->mps) {
7448 		l2cap_send_disconn_req(chan, ECONNRESET);
7449 		goto drop;
7450 	}
7451 
7452 	if (chan->ops->filter) {
7453 		if (chan->ops->filter(chan, skb))
7454 			goto drop;
7455 	}
7456 
7457 	if (!control->sframe) {
7458 		int err;
7459 
7460 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7461 		       control->sar, control->reqseq, control->final,
7462 		       control->txseq);
7463 
7464 		/* Validate F-bit - F=0 always valid, F=1 only
7465 		 * valid in TX WAIT_F
7466 		 */
7467 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7468 			goto drop;
7469 
7470 		if (chan->mode != L2CAP_MODE_STREAMING) {
7471 			event = L2CAP_EV_RECV_IFRAME;
7472 			err = l2cap_rx(chan, control, skb, event);
7473 		} else {
7474 			err = l2cap_stream_rx(chan, control, skb);
7475 		}
7476 
7477 		if (err)
7478 			l2cap_send_disconn_req(chan, ECONNRESET);
7479 	} else {
7480 		const u8 rx_func_to_event[4] = {
7481 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7482 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7483 		};
7484 
7485 		/* Only I-frames are expected in streaming mode */
7486 		if (chan->mode == L2CAP_MODE_STREAMING)
7487 			goto drop;
7488 
7489 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7490 		       control->reqseq, control->final, control->poll,
7491 		       control->super);
7492 
7493 		if (len != 0) {
7494 			BT_ERR("Trailing bytes: %d in sframe", len);
7495 			l2cap_send_disconn_req(chan, ECONNRESET);
7496 			goto drop;
7497 		}
7498 
7499 		/* Validate F and P bits */
7500 		if (control->final && (control->poll ||
7501 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7502 			goto drop;
7503 
7504 		event = rx_func_to_event[control->super];
7505 		if (l2cap_rx(chan, control, skb, event))
7506 			l2cap_send_disconn_req(chan, ECONNRESET);
7507 	}
7508 
7509 	return 0;
7510 
7511 drop:
7512 	kfree_skb(skb);
7513 	return 0;
7514 }
7515 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7516 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7517 {
7518 	struct l2cap_conn *conn = chan->conn;
7519 	struct l2cap_le_credits pkt;
7520 	u16 return_credits;
7521 
7522 	return_credits = (chan->imtu / chan->mps) + 1;
7523 
7524 	if (chan->rx_credits >= return_credits)
7525 		return;
7526 
7527 	return_credits -= chan->rx_credits;
7528 
7529 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7530 
7531 	chan->rx_credits += return_credits;
7532 
7533 	pkt.cid     = cpu_to_le16(chan->scid);
7534 	pkt.credits = cpu_to_le16(return_credits);
7535 
7536 	chan->ident = l2cap_get_ident(conn);
7537 
7538 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7539 }
7540 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7541 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7542 {
7543 	int err;
7544 
7545 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7546 
7547 	/* Wait recv to confirm reception before updating the credits */
7548 	err = chan->ops->recv(chan, skb);
7549 
7550 	/* Update credits whenever an SDU is received */
7551 	l2cap_chan_le_send_credits(chan);
7552 
7553 	return err;
7554 }
7555 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7556 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7557 {
7558 	int err;
7559 
7560 	if (!chan->rx_credits) {
7561 		BT_ERR("No credits to receive LE L2CAP data");
7562 		l2cap_send_disconn_req(chan, ECONNRESET);
7563 		return -ENOBUFS;
7564 	}
7565 
7566 	if (chan->imtu < skb->len) {
7567 		BT_ERR("Too big LE L2CAP PDU");
7568 		return -ENOBUFS;
7569 	}
7570 
7571 	chan->rx_credits--;
7572 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7573 
7574 	/* Update if remote had run out of credits, this should only happens
7575 	 * if the remote is not using the entire MPS.
7576 	 */
7577 	if (!chan->rx_credits)
7578 		l2cap_chan_le_send_credits(chan);
7579 
7580 	err = 0;
7581 
7582 	if (!chan->sdu) {
7583 		u16 sdu_len;
7584 
7585 		sdu_len = get_unaligned_le16(skb->data);
7586 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7587 
7588 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7589 		       sdu_len, skb->len, chan->imtu);
7590 
7591 		if (sdu_len > chan->imtu) {
7592 			BT_ERR("Too big LE L2CAP SDU length received");
7593 			err = -EMSGSIZE;
7594 			goto failed;
7595 		}
7596 
7597 		if (skb->len > sdu_len) {
7598 			BT_ERR("Too much LE L2CAP data received");
7599 			err = -EINVAL;
7600 			goto failed;
7601 		}
7602 
7603 		if (skb->len == sdu_len)
7604 			return l2cap_ecred_recv(chan, skb);
7605 
7606 		chan->sdu = skb;
7607 		chan->sdu_len = sdu_len;
7608 		chan->sdu_last_frag = skb;
7609 
7610 		/* Detect if remote is not able to use the selected MPS */
7611 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7612 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7613 
7614 			/* Adjust the number of credits */
7615 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7616 			chan->mps = mps_len;
7617 			l2cap_chan_le_send_credits(chan);
7618 		}
7619 
7620 		return 0;
7621 	}
7622 
7623 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7624 	       chan->sdu->len, skb->len, chan->sdu_len);
7625 
7626 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7627 		BT_ERR("Too much LE L2CAP data received");
7628 		err = -EINVAL;
7629 		goto failed;
7630 	}
7631 
7632 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7633 	skb = NULL;
7634 
7635 	if (chan->sdu->len == chan->sdu_len) {
7636 		err = l2cap_ecred_recv(chan, chan->sdu);
7637 		if (!err) {
7638 			chan->sdu = NULL;
7639 			chan->sdu_last_frag = NULL;
7640 			chan->sdu_len = 0;
7641 		}
7642 	}
7643 
7644 failed:
7645 	if (err) {
7646 		kfree_skb(skb);
7647 		kfree_skb(chan->sdu);
7648 		chan->sdu = NULL;
7649 		chan->sdu_last_frag = NULL;
7650 		chan->sdu_len = 0;
7651 	}
7652 
7653 	/* We can't return an error here since we took care of the skb
7654 	 * freeing internally. An error return would cause the caller to
7655 	 * do a double-free of the skb.
7656 	 */
7657 	return 0;
7658 }
7659 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7660 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7661 			       struct sk_buff *skb)
7662 {
7663 	struct l2cap_chan *chan;
7664 
7665 	chan = l2cap_get_chan_by_scid(conn, cid);
7666 	if (!chan) {
7667 		if (cid == L2CAP_CID_A2MP) {
7668 			chan = a2mp_channel_create(conn, skb);
7669 			if (!chan) {
7670 				kfree_skb(skb);
7671 				return;
7672 			}
7673 
7674 			l2cap_chan_hold(chan);
7675 			l2cap_chan_lock(chan);
7676 		} else {
7677 			BT_DBG("unknown cid 0x%4.4x", cid);
7678 			/* Drop packet and return */
7679 			kfree_skb(skb);
7680 			return;
7681 		}
7682 	}
7683 
7684 	BT_DBG("chan %p, len %d", chan, skb->len);
7685 
7686 	/* If we receive data on a fixed channel before the info req/rsp
7687 	 * procedure is done simply assume that the channel is supported
7688 	 * and mark it as ready.
7689 	 */
7690 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7691 		l2cap_chan_ready(chan);
7692 
7693 	if (chan->state != BT_CONNECTED)
7694 		goto drop;
7695 
7696 	switch (chan->mode) {
7697 	case L2CAP_MODE_LE_FLOWCTL:
7698 	case L2CAP_MODE_EXT_FLOWCTL:
7699 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7700 			goto drop;
7701 
7702 		goto done;
7703 
7704 	case L2CAP_MODE_BASIC:
7705 		/* If socket recv buffers overflows we drop data here
7706 		 * which is *bad* because L2CAP has to be reliable.
7707 		 * But we don't have any other choice. L2CAP doesn't
7708 		 * provide flow control mechanism. */
7709 
7710 		if (chan->imtu < skb->len) {
7711 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7712 			goto drop;
7713 		}
7714 
7715 		if (!chan->ops->recv(chan, skb))
7716 			goto done;
7717 		break;
7718 
7719 	case L2CAP_MODE_ERTM:
7720 	case L2CAP_MODE_STREAMING:
7721 		l2cap_data_rcv(chan, skb);
7722 		goto done;
7723 
7724 	default:
7725 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7726 		break;
7727 	}
7728 
7729 drop:
7730 	kfree_skb(skb);
7731 
7732 done:
7733 	l2cap_chan_unlock(chan);
7734 	l2cap_chan_put(chan);
7735 }
7736 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7737 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7738 				  struct sk_buff *skb)
7739 {
7740 	struct hci_conn *hcon = conn->hcon;
7741 	struct l2cap_chan *chan;
7742 
7743 	if (hcon->type != ACL_LINK)
7744 		goto free_skb;
7745 
7746 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7747 					ACL_LINK);
7748 	if (!chan)
7749 		goto free_skb;
7750 
7751 	BT_DBG("chan %p, len %d", chan, skb->len);
7752 
7753 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7754 		goto drop;
7755 
7756 	if (chan->imtu < skb->len)
7757 		goto drop;
7758 
7759 	/* Store remote BD_ADDR and PSM for msg_name */
7760 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7761 	bt_cb(skb)->l2cap.psm = psm;
7762 
7763 	if (!chan->ops->recv(chan, skb)) {
7764 		l2cap_chan_put(chan);
7765 		return;
7766 	}
7767 
7768 drop:
7769 	l2cap_chan_put(chan);
7770 free_skb:
7771 	kfree_skb(skb);
7772 }
7773 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7774 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7775 {
7776 	struct l2cap_hdr *lh = (void *) skb->data;
7777 	struct hci_conn *hcon = conn->hcon;
7778 	u16 cid, len;
7779 	__le16 psm;
7780 
7781 	if (hcon->state != BT_CONNECTED) {
7782 		BT_DBG("queueing pending rx skb");
7783 		skb_queue_tail(&conn->pending_rx, skb);
7784 		return;
7785 	}
7786 
7787 	skb_pull(skb, L2CAP_HDR_SIZE);
7788 	cid = __le16_to_cpu(lh->cid);
7789 	len = __le16_to_cpu(lh->len);
7790 
7791 	if (len != skb->len) {
7792 		kfree_skb(skb);
7793 		return;
7794 	}
7795 
7796 	/* Since we can't actively block incoming LE connections we must
7797 	 * at least ensure that we ignore incoming data from them.
7798 	 */
7799 	if (hcon->type == LE_LINK &&
7800 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7801 				   bdaddr_dst_type(hcon))) {
7802 		kfree_skb(skb);
7803 		return;
7804 	}
7805 
7806 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7807 
7808 	switch (cid) {
7809 	case L2CAP_CID_SIGNALING:
7810 		l2cap_sig_channel(conn, skb);
7811 		break;
7812 
7813 	case L2CAP_CID_CONN_LESS:
7814 		psm = get_unaligned((__le16 *) skb->data);
7815 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7816 		l2cap_conless_channel(conn, psm, skb);
7817 		break;
7818 
7819 	case L2CAP_CID_LE_SIGNALING:
7820 		l2cap_le_sig_channel(conn, skb);
7821 		break;
7822 
7823 	default:
7824 		l2cap_data_channel(conn, cid, skb);
7825 		break;
7826 	}
7827 }
7828 
process_pending_rx(struct work_struct * work)7829 static void process_pending_rx(struct work_struct *work)
7830 {
7831 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7832 					       pending_rx_work);
7833 	struct sk_buff *skb;
7834 
7835 	BT_DBG("");
7836 
7837 	while ((skb = skb_dequeue(&conn->pending_rx)))
7838 		l2cap_recv_frame(conn, skb);
7839 }
7840 
l2cap_conn_add(struct hci_conn * hcon)7841 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7842 {
7843 	struct l2cap_conn *conn = hcon->l2cap_data;
7844 	struct hci_chan *hchan;
7845 
7846 	if (conn)
7847 		return conn;
7848 
7849 	hchan = hci_chan_create(hcon);
7850 	if (!hchan)
7851 		return NULL;
7852 
7853 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7854 	if (!conn) {
7855 		hci_chan_del(hchan);
7856 		return NULL;
7857 	}
7858 
7859 	kref_init(&conn->ref);
7860 	hcon->l2cap_data = conn;
7861 	conn->hcon = hci_conn_get(hcon);
7862 	conn->hchan = hchan;
7863 
7864 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7865 
7866 	switch (hcon->type) {
7867 	case LE_LINK:
7868 		if (hcon->hdev->le_mtu) {
7869 			conn->mtu = hcon->hdev->le_mtu;
7870 			break;
7871 		}
7872 		fallthrough;
7873 	default:
7874 		conn->mtu = hcon->hdev->acl_mtu;
7875 		break;
7876 	}
7877 
7878 	conn->feat_mask = 0;
7879 
7880 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7881 
7882 	if (hcon->type == ACL_LINK &&
7883 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7884 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7885 
7886 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7887 	    (bredr_sc_enabled(hcon->hdev) ||
7888 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7889 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7890 
7891 	mutex_init(&conn->ident_lock);
7892 	mutex_init(&conn->chan_lock);
7893 
7894 	INIT_LIST_HEAD(&conn->chan_l);
7895 	INIT_LIST_HEAD(&conn->users);
7896 
7897 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7898 
7899 	skb_queue_head_init(&conn->pending_rx);
7900 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7901 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7902 
7903 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7904 
7905 	return conn;
7906 }
7907 
is_valid_psm(u16 psm,u8 dst_type)7908 static bool is_valid_psm(u16 psm, u8 dst_type) {
7909 	if (!psm)
7910 		return false;
7911 
7912 	if (bdaddr_type_is_le(dst_type))
7913 		return (psm <= 0x00ff);
7914 
7915 	/* PSM must be odd and lsb of upper byte must be 0 */
7916 	return ((psm & 0x0101) == 0x0001);
7917 }
7918 
7919 struct l2cap_chan_data {
7920 	struct l2cap_chan *chan;
7921 	struct pid *pid;
7922 	int count;
7923 };
7924 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7925 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7926 {
7927 	struct l2cap_chan_data *d = data;
7928 	struct pid *pid;
7929 
7930 	if (chan == d->chan)
7931 		return;
7932 
7933 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7934 		return;
7935 
7936 	pid = chan->ops->get_peer_pid(chan);
7937 
7938 	/* Only count deferred channels with the same PID/PSM */
7939 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7940 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7941 		return;
7942 
7943 	d->count++;
7944 }
7945 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7946 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7947 		       bdaddr_t *dst, u8 dst_type)
7948 {
7949 	struct l2cap_conn *conn;
7950 	struct hci_conn *hcon;
7951 	struct hci_dev *hdev;
7952 	int err;
7953 
7954 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7955 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7956 
7957 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7958 	if (!hdev)
7959 		return -EHOSTUNREACH;
7960 
7961 	hci_dev_lock(hdev);
7962 
7963 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7964 	    chan->chan_type != L2CAP_CHAN_RAW) {
7965 		err = -EINVAL;
7966 		goto done;
7967 	}
7968 
7969 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7970 		err = -EINVAL;
7971 		goto done;
7972 	}
7973 
7974 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7975 		err = -EINVAL;
7976 		goto done;
7977 	}
7978 
7979 	switch (chan->mode) {
7980 	case L2CAP_MODE_BASIC:
7981 		break;
7982 	case L2CAP_MODE_LE_FLOWCTL:
7983 		break;
7984 	case L2CAP_MODE_EXT_FLOWCTL:
7985 		if (!enable_ecred) {
7986 			err = -EOPNOTSUPP;
7987 			goto done;
7988 		}
7989 		break;
7990 	case L2CAP_MODE_ERTM:
7991 	case L2CAP_MODE_STREAMING:
7992 		if (!disable_ertm)
7993 			break;
7994 		fallthrough;
7995 	default:
7996 		err = -EOPNOTSUPP;
7997 		goto done;
7998 	}
7999 
8000 	switch (chan->state) {
8001 	case BT_CONNECT:
8002 	case BT_CONNECT2:
8003 	case BT_CONFIG:
8004 		/* Already connecting */
8005 		err = 0;
8006 		goto done;
8007 
8008 	case BT_CONNECTED:
8009 		/* Already connected */
8010 		err = -EISCONN;
8011 		goto done;
8012 
8013 	case BT_OPEN:
8014 	case BT_BOUND:
8015 		/* Can connect */
8016 		break;
8017 
8018 	default:
8019 		err = -EBADFD;
8020 		goto done;
8021 	}
8022 
8023 	/* Set destination address and psm */
8024 	bacpy(&chan->dst, dst);
8025 	chan->dst_type = dst_type;
8026 
8027 	chan->psm = psm;
8028 	chan->dcid = cid;
8029 
8030 	if (bdaddr_type_is_le(dst_type)) {
8031 		/* Convert from L2CAP channel address type to HCI address type
8032 		 */
8033 		if (dst_type == BDADDR_LE_PUBLIC)
8034 			dst_type = ADDR_LE_DEV_PUBLIC;
8035 		else
8036 			dst_type = ADDR_LE_DEV_RANDOM;
8037 
8038 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8039 			hcon = hci_connect_le(hdev, dst, dst_type,
8040 					      chan->sec_level,
8041 					      HCI_LE_CONN_TIMEOUT,
8042 					      HCI_ROLE_SLAVE, NULL);
8043 		else
8044 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8045 						   chan->sec_level,
8046 						   HCI_LE_CONN_TIMEOUT,
8047 						   CONN_REASON_L2CAP_CHAN);
8048 
8049 	} else {
8050 		u8 auth_type = l2cap_get_auth_type(chan);
8051 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8052 				       CONN_REASON_L2CAP_CHAN);
8053 	}
8054 
8055 	if (IS_ERR(hcon)) {
8056 		err = PTR_ERR(hcon);
8057 		goto done;
8058 	}
8059 
8060 	conn = l2cap_conn_add(hcon);
8061 	if (!conn) {
8062 		hci_conn_drop(hcon);
8063 		err = -ENOMEM;
8064 		goto done;
8065 	}
8066 
8067 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8068 		struct l2cap_chan_data data;
8069 
8070 		data.chan = chan;
8071 		data.pid = chan->ops->get_peer_pid(chan);
8072 		data.count = 1;
8073 
8074 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8075 
8076 		/* Check if there isn't too many channels being connected */
8077 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8078 			hci_conn_drop(hcon);
8079 			err = -EPROTO;
8080 			goto done;
8081 		}
8082 	}
8083 
8084 	mutex_lock(&conn->chan_lock);
8085 	l2cap_chan_lock(chan);
8086 
8087 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8088 		hci_conn_drop(hcon);
8089 		err = -EBUSY;
8090 		goto chan_unlock;
8091 	}
8092 
8093 	/* Update source addr of the socket */
8094 	bacpy(&chan->src, &hcon->src);
8095 	chan->src_type = bdaddr_src_type(hcon);
8096 
8097 	__l2cap_chan_add(conn, chan);
8098 
8099 	/* l2cap_chan_add takes its own ref so we can drop this one */
8100 	hci_conn_drop(hcon);
8101 
8102 	l2cap_state_change(chan, BT_CONNECT);
8103 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8104 
8105 	/* Release chan->sport so that it can be reused by other
8106 	 * sockets (as it's only used for listening sockets).
8107 	 */
8108 	write_lock(&chan_list_lock);
8109 	chan->sport = 0;
8110 	write_unlock(&chan_list_lock);
8111 
8112 	if (hcon->state == BT_CONNECTED) {
8113 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8114 			__clear_chan_timer(chan);
8115 			if (l2cap_chan_check_security(chan, true))
8116 				l2cap_state_change(chan, BT_CONNECTED);
8117 		} else
8118 			l2cap_do_start(chan);
8119 	}
8120 
8121 	err = 0;
8122 
8123 chan_unlock:
8124 	l2cap_chan_unlock(chan);
8125 	mutex_unlock(&conn->chan_lock);
8126 done:
8127 	hci_dev_unlock(hdev);
8128 	hci_dev_put(hdev);
8129 	return err;
8130 }
8131 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8132 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8133 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8134 {
8135 	struct l2cap_conn *conn = chan->conn;
8136 	struct {
8137 		struct l2cap_ecred_reconf_req req;
8138 		__le16 scid;
8139 	} pdu;
8140 
8141 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8142 	pdu.req.mps = cpu_to_le16(chan->mps);
8143 	pdu.scid    = cpu_to_le16(chan->scid);
8144 
8145 	chan->ident = l2cap_get_ident(conn);
8146 
8147 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8148 		       sizeof(pdu), &pdu);
8149 }
8150 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8151 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8152 {
8153 	if (chan->imtu > mtu)
8154 		return -EINVAL;
8155 
8156 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8157 
8158 	chan->imtu = mtu;
8159 
8160 	l2cap_ecred_reconfigure(chan);
8161 
8162 	return 0;
8163 }
8164 
8165 /* ---- L2CAP interface with lower layer (HCI) ---- */
8166 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8167 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8168 {
8169 	int exact = 0, lm1 = 0, lm2 = 0;
8170 	struct l2cap_chan *c;
8171 
8172 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8173 
8174 	/* Find listening sockets and check their link_mode */
8175 	read_lock(&chan_list_lock);
8176 	list_for_each_entry(c, &chan_list, global_l) {
8177 		if (c->state != BT_LISTEN)
8178 			continue;
8179 
8180 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8181 			lm1 |= HCI_LM_ACCEPT;
8182 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8183 				lm1 |= HCI_LM_MASTER;
8184 			exact++;
8185 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8186 			lm2 |= HCI_LM_ACCEPT;
8187 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8188 				lm2 |= HCI_LM_MASTER;
8189 		}
8190 	}
8191 	read_unlock(&chan_list_lock);
8192 
8193 	return exact ? lm1 : lm2;
8194 }
8195 
8196 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8197  * from an existing channel in the list or from the beginning of the
8198  * global list (by passing NULL as first parameter).
8199  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8200 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8201 						  struct hci_conn *hcon)
8202 {
8203 	u8 src_type = bdaddr_src_type(hcon);
8204 
8205 	read_lock(&chan_list_lock);
8206 
8207 	if (c)
8208 		c = list_next_entry(c, global_l);
8209 	else
8210 		c = list_entry(chan_list.next, typeof(*c), global_l);
8211 
8212 	list_for_each_entry_from(c, &chan_list, global_l) {
8213 		if (c->chan_type != L2CAP_CHAN_FIXED)
8214 			continue;
8215 		if (c->state != BT_LISTEN)
8216 			continue;
8217 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8218 			continue;
8219 		if (src_type != c->src_type)
8220 			continue;
8221 
8222 		c = l2cap_chan_hold_unless_zero(c);
8223 		read_unlock(&chan_list_lock);
8224 		return c;
8225 	}
8226 
8227 	read_unlock(&chan_list_lock);
8228 
8229 	return NULL;
8230 }
8231 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8232 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8233 {
8234 	struct hci_dev *hdev = hcon->hdev;
8235 	struct l2cap_conn *conn;
8236 	struct l2cap_chan *pchan;
8237 	u8 dst_type;
8238 
8239 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8240 		return;
8241 
8242 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8243 
8244 	if (status) {
8245 		l2cap_conn_del(hcon, bt_to_errno(status));
8246 		return;
8247 	}
8248 
8249 	conn = l2cap_conn_add(hcon);
8250 	if (!conn)
8251 		return;
8252 
8253 	dst_type = bdaddr_dst_type(hcon);
8254 
8255 	/* If device is blocked, do not create channels for it */
8256 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8257 		return;
8258 
8259 	/* Find fixed channels and notify them of the new connection. We
8260 	 * use multiple individual lookups, continuing each time where
8261 	 * we left off, because the list lock would prevent calling the
8262 	 * potentially sleeping l2cap_chan_lock() function.
8263 	 */
8264 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8265 	while (pchan) {
8266 		struct l2cap_chan *chan, *next;
8267 
8268 		/* Client fixed channels should override server ones */
8269 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8270 			goto next;
8271 
8272 		l2cap_chan_lock(pchan);
8273 		chan = pchan->ops->new_connection(pchan);
8274 		if (chan) {
8275 			bacpy(&chan->src, &hcon->src);
8276 			bacpy(&chan->dst, &hcon->dst);
8277 			chan->src_type = bdaddr_src_type(hcon);
8278 			chan->dst_type = dst_type;
8279 
8280 			__l2cap_chan_add(conn, chan);
8281 		}
8282 
8283 		l2cap_chan_unlock(pchan);
8284 next:
8285 		next = l2cap_global_fixed_chan(pchan, hcon);
8286 		l2cap_chan_put(pchan);
8287 		pchan = next;
8288 	}
8289 
8290 	l2cap_conn_ready(conn);
8291 }
8292 
l2cap_disconn_ind(struct hci_conn * hcon)8293 int l2cap_disconn_ind(struct hci_conn *hcon)
8294 {
8295 	struct l2cap_conn *conn = hcon->l2cap_data;
8296 
8297 	BT_DBG("hcon %p", hcon);
8298 
8299 	if (!conn)
8300 		return HCI_ERROR_REMOTE_USER_TERM;
8301 	return conn->disc_reason;
8302 }
8303 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8304 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8305 {
8306 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8307 		return;
8308 
8309 	BT_DBG("hcon %p reason %d", hcon, reason);
8310 
8311 	l2cap_conn_del(hcon, bt_to_errno(reason));
8312 }
8313 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8314 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8315 {
8316 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8317 		return;
8318 
8319 	if (encrypt == 0x00) {
8320 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8321 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8322 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8323 			   chan->sec_level == BT_SECURITY_FIPS)
8324 			l2cap_chan_close(chan, ECONNREFUSED);
8325 	} else {
8326 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8327 			__clear_chan_timer(chan);
8328 	}
8329 }
8330 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8331 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8332 {
8333 	struct l2cap_conn *conn = hcon->l2cap_data;
8334 	struct l2cap_chan *chan;
8335 
8336 	if (!conn)
8337 		return;
8338 
8339 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8340 
8341 	mutex_lock(&conn->chan_lock);
8342 
8343 	list_for_each_entry(chan, &conn->chan_l, list) {
8344 		l2cap_chan_lock(chan);
8345 
8346 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8347 		       state_to_string(chan->state));
8348 
8349 		if (chan->scid == L2CAP_CID_A2MP) {
8350 			l2cap_chan_unlock(chan);
8351 			continue;
8352 		}
8353 
8354 		if (!status && encrypt)
8355 			chan->sec_level = hcon->sec_level;
8356 
8357 		if (!__l2cap_no_conn_pending(chan)) {
8358 			l2cap_chan_unlock(chan);
8359 			continue;
8360 		}
8361 
8362 		if (!status && (chan->state == BT_CONNECTED ||
8363 				chan->state == BT_CONFIG)) {
8364 			chan->ops->resume(chan);
8365 			l2cap_check_encryption(chan, encrypt);
8366 			l2cap_chan_unlock(chan);
8367 			continue;
8368 		}
8369 
8370 		if (chan->state == BT_CONNECT) {
8371 			if (!status && l2cap_check_enc_key_size(hcon))
8372 				l2cap_start_connection(chan);
8373 			else
8374 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8375 		} else if (chan->state == BT_CONNECT2 &&
8376 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8377 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8378 			struct l2cap_conn_rsp rsp;
8379 			__u16 res, stat;
8380 
8381 			if (!status && l2cap_check_enc_key_size(hcon)) {
8382 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8383 					res = L2CAP_CR_PEND;
8384 					stat = L2CAP_CS_AUTHOR_PEND;
8385 					chan->ops->defer(chan);
8386 				} else {
8387 					l2cap_state_change(chan, BT_CONFIG);
8388 					res = L2CAP_CR_SUCCESS;
8389 					stat = L2CAP_CS_NO_INFO;
8390 				}
8391 			} else {
8392 				l2cap_state_change(chan, BT_DISCONN);
8393 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8394 				res = L2CAP_CR_SEC_BLOCK;
8395 				stat = L2CAP_CS_NO_INFO;
8396 			}
8397 
8398 			rsp.scid   = cpu_to_le16(chan->dcid);
8399 			rsp.dcid   = cpu_to_le16(chan->scid);
8400 			rsp.result = cpu_to_le16(res);
8401 			rsp.status = cpu_to_le16(stat);
8402 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8403 				       sizeof(rsp), &rsp);
8404 
8405 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8406 			    res == L2CAP_CR_SUCCESS) {
8407 				char buf[128];
8408 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8409 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8410 					       L2CAP_CONF_REQ,
8411 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8412 					       buf);
8413 				chan->num_conf_req++;
8414 			}
8415 		}
8416 
8417 		l2cap_chan_unlock(chan);
8418 	}
8419 
8420 	mutex_unlock(&conn->chan_lock);
8421 }
8422 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8423 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8424 {
8425 	struct l2cap_conn *conn = hcon->l2cap_data;
8426 	struct l2cap_hdr *hdr;
8427 	int len;
8428 
8429 	/* For AMP controller do not create l2cap conn */
8430 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8431 		goto drop;
8432 
8433 	if (!conn)
8434 		conn = l2cap_conn_add(hcon);
8435 
8436 	if (!conn)
8437 		goto drop;
8438 
8439 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8440 
8441 	switch (flags) {
8442 	case ACL_START:
8443 	case ACL_START_NO_FLUSH:
8444 	case ACL_COMPLETE:
8445 		if (conn->rx_len) {
8446 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8447 			kfree_skb(conn->rx_skb);
8448 			conn->rx_skb = NULL;
8449 			conn->rx_len = 0;
8450 			l2cap_conn_unreliable(conn, ECOMM);
8451 		}
8452 
8453 		/* Start fragment always begin with Basic L2CAP header */
8454 		if (skb->len < L2CAP_HDR_SIZE) {
8455 			BT_ERR("Frame is too short (len %d)", skb->len);
8456 			l2cap_conn_unreliable(conn, ECOMM);
8457 			goto drop;
8458 		}
8459 
8460 		hdr = (struct l2cap_hdr *) skb->data;
8461 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8462 
8463 		if (len == skb->len) {
8464 			/* Complete frame received */
8465 			l2cap_recv_frame(conn, skb);
8466 			return;
8467 		}
8468 
8469 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8470 
8471 		if (skb->len > len) {
8472 			BT_ERR("Frame is too long (len %d, expected len %d)",
8473 			       skb->len, len);
8474 			l2cap_conn_unreliable(conn, ECOMM);
8475 			goto drop;
8476 		}
8477 
8478 		/* Allocate skb for the complete frame (with header) */
8479 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8480 		if (!conn->rx_skb)
8481 			goto drop;
8482 
8483 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8484 					  skb->len);
8485 		conn->rx_len = len - skb->len;
8486 		break;
8487 
8488 	case ACL_CONT:
8489 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8490 
8491 		if (!conn->rx_len) {
8492 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8493 			l2cap_conn_unreliable(conn, ECOMM);
8494 			goto drop;
8495 		}
8496 
8497 		if (skb->len > conn->rx_len) {
8498 			BT_ERR("Fragment is too long (len %d, expected %d)",
8499 			       skb->len, conn->rx_len);
8500 			kfree_skb(conn->rx_skb);
8501 			conn->rx_skb = NULL;
8502 			conn->rx_len = 0;
8503 			l2cap_conn_unreliable(conn, ECOMM);
8504 			goto drop;
8505 		}
8506 
8507 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8508 					  skb->len);
8509 		conn->rx_len -= skb->len;
8510 
8511 		if (!conn->rx_len) {
8512 			/* Complete frame received. l2cap_recv_frame
8513 			 * takes ownership of the skb so set the global
8514 			 * rx_skb pointer to NULL first.
8515 			 */
8516 			struct sk_buff *rx_skb = conn->rx_skb;
8517 			conn->rx_skb = NULL;
8518 			l2cap_recv_frame(conn, rx_skb);
8519 		}
8520 		break;
8521 	}
8522 
8523 drop:
8524 	kfree_skb(skb);
8525 }
8526 
8527 static struct hci_cb l2cap_cb = {
8528 	.name		= "L2CAP",
8529 	.connect_cfm	= l2cap_connect_cfm,
8530 	.disconn_cfm	= l2cap_disconn_cfm,
8531 	.security_cfm	= l2cap_security_cfm,
8532 };
8533 
l2cap_debugfs_show(struct seq_file * f,void * p)8534 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8535 {
8536 	struct l2cap_chan *c;
8537 
8538 	read_lock(&chan_list_lock);
8539 
8540 	list_for_each_entry(c, &chan_list, global_l) {
8541 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8542 			   &c->src, c->src_type, &c->dst, c->dst_type,
8543 			   c->state, __le16_to_cpu(c->psm),
8544 			   c->scid, c->dcid, c->imtu, c->omtu,
8545 			   c->sec_level, c->mode);
8546 	}
8547 
8548 	read_unlock(&chan_list_lock);
8549 
8550 	return 0;
8551 }
8552 
8553 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8554 
8555 static struct dentry *l2cap_debugfs;
8556 
l2cap_init(void)8557 int __init l2cap_init(void)
8558 {
8559 	int err;
8560 
8561 	err = l2cap_init_sockets();
8562 	if (err < 0)
8563 		return err;
8564 
8565 	hci_register_cb(&l2cap_cb);
8566 
8567 	if (IS_ERR_OR_NULL(bt_debugfs))
8568 		return 0;
8569 
8570 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8571 					    NULL, &l2cap_debugfs_fops);
8572 
8573 	return 0;
8574 }
8575 
l2cap_exit(void)8576 void l2cap_exit(void)
8577 {
8578 	debugfs_remove(l2cap_debugfs);
8579 	hci_unregister_cb(&l2cap_cb);
8580 	l2cap_cleanup_sockets();
8581 }
8582 
8583 module_param(disable_ertm, bool, 0644);
8584 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8585 
8586 module_param(enable_ecred, bool, 0644);
8587 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8588