• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	if (!conn)
439 		return;
440 
441 	mutex_lock(&conn->chan_lock);
442 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
443 	 * this work. No need to call l2cap_chan_hold(chan) here again.
444 	 */
445 	l2cap_chan_lock(chan);
446 
447 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
448 		reason = ECONNREFUSED;
449 	else if (chan->state == BT_CONNECT &&
450 		 chan->sec_level != BT_SECURITY_SDP)
451 		reason = ECONNREFUSED;
452 	else
453 		reason = ETIMEDOUT;
454 
455 	l2cap_chan_close(chan, reason);
456 
457 	chan->ops->close(chan);
458 
459 	l2cap_chan_unlock(chan);
460 	l2cap_chan_put(chan);
461 
462 	mutex_unlock(&conn->chan_lock);
463 }
464 
l2cap_chan_create(void)465 struct l2cap_chan *l2cap_chan_create(void)
466 {
467 	struct l2cap_chan *chan;
468 
469 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 	if (!chan)
471 		return NULL;
472 
473 	skb_queue_head_init(&chan->tx_q);
474 	skb_queue_head_init(&chan->srej_q);
475 	mutex_init(&chan->lock);
476 
477 	/* Set default lock nesting level */
478 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
479 
480 	write_lock(&chan_list_lock);
481 	list_add(&chan->global_l, &chan_list);
482 	write_unlock(&chan_list_lock);
483 
484 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
485 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
486 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
487 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
488 
489 	chan->state = BT_OPEN;
490 
491 	kref_init(&chan->kref);
492 
493 	/* This flag is cleared in l2cap_chan_ready() */
494 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
495 
496 	BT_DBG("chan %p", chan);
497 
498 	return chan;
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_create);
501 
l2cap_chan_destroy(struct kref * kref)502 static void l2cap_chan_destroy(struct kref *kref)
503 {
504 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
505 
506 	BT_DBG("chan %p", chan);
507 
508 	write_lock(&chan_list_lock);
509 	list_del(&chan->global_l);
510 	write_unlock(&chan_list_lock);
511 
512 	kfree(chan);
513 }
514 
l2cap_chan_hold(struct l2cap_chan * c)515 void l2cap_chan_hold(struct l2cap_chan *c)
516 {
517 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
518 
519 	kref_get(&c->kref);
520 }
521 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)522 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
523 {
524 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
525 
526 	if (!kref_get_unless_zero(&c->kref))
527 		return NULL;
528 
529 	return c;
530 }
531 
l2cap_chan_put(struct l2cap_chan * c)532 void l2cap_chan_put(struct l2cap_chan *c)
533 {
534 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
535 
536 	kref_put(&c->kref, l2cap_chan_destroy);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_put);
539 
l2cap_chan_set_defaults(struct l2cap_chan * chan)540 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
541 {
542 	chan->fcs  = L2CAP_FCS_CRC16;
543 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
544 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
545 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->remote_max_tx = chan->max_tx;
547 	chan->remote_tx_win = chan->tx_win;
548 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
549 	chan->sec_level = BT_SECURITY_LOW;
550 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
551 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
552 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
553 
554 	chan->conf_state = 0;
555 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
556 
557 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
558 }
559 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
560 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)561 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
562 {
563 	chan->sdu = NULL;
564 	chan->sdu_last_frag = NULL;
565 	chan->sdu_len = 0;
566 	chan->tx_credits = tx_credits;
567 	/* Derive MPS from connection MTU to stop HCI fragmentation */
568 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
569 	/* Give enough credits for a full packet */
570 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 	}
584 }
585 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	list_add(&chan->list, &conn->chan_l);
636 }
637 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)638 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
639 {
640 	mutex_lock(&conn->chan_lock);
641 	__l2cap_chan_add(conn, chan);
642 	mutex_unlock(&conn->chan_lock);
643 }
644 
l2cap_chan_del(struct l2cap_chan * chan,int err)645 void l2cap_chan_del(struct l2cap_chan *chan, int err)
646 {
647 	struct l2cap_conn *conn = chan->conn;
648 
649 	__clear_chan_timer(chan);
650 
651 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
652 	       state_to_string(chan->state));
653 
654 	chan->ops->teardown(chan, err);
655 
656 	if (conn) {
657 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 
673 		if (mgr && mgr->bredr_chan == chan)
674 			mgr->bredr_chan = NULL;
675 	}
676 
677 	if (chan->hs_hchan) {
678 		struct hci_chan *hs_hchan = chan->hs_hchan;
679 
680 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
681 		amp_disconnect_logical_link(hs_hchan);
682 	}
683 
684 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
685 		return;
686 
687 	switch(chan->mode) {
688 	case L2CAP_MODE_BASIC:
689 		break;
690 
691 	case L2CAP_MODE_LE_FLOWCTL:
692 	case L2CAP_MODE_EXT_FLOWCTL:
693 		skb_queue_purge(&chan->tx_q);
694 		break;
695 
696 	case L2CAP_MODE_ERTM:
697 		__clear_retrans_timer(chan);
698 		__clear_monitor_timer(chan);
699 		__clear_ack_timer(chan);
700 
701 		skb_queue_purge(&chan->srej_q);
702 
703 		l2cap_seq_list_free(&chan->srej_list);
704 		l2cap_seq_list_free(&chan->retrans_list);
705 		fallthrough;
706 
707 	case L2CAP_MODE_STREAMING:
708 		skb_queue_purge(&chan->tx_q);
709 		break;
710 	}
711 
712 	return;
713 }
714 EXPORT_SYMBOL_GPL(l2cap_chan_del);
715 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)716 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
717 				 l2cap_chan_func_t func, void *data)
718 {
719 	struct l2cap_chan *chan, *l;
720 
721 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
722 		if (chan->ident == id)
723 			func(chan, data);
724 	}
725 }
726 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)727 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
728 			      void *data)
729 {
730 	struct l2cap_chan *chan;
731 
732 	list_for_each_entry(chan, &conn->chan_l, list) {
733 		func(chan, data);
734 	}
735 }
736 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)737 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
738 		     void *data)
739 {
740 	if (!conn)
741 		return;
742 
743 	mutex_lock(&conn->chan_lock);
744 	__l2cap_chan_list(conn, func, data);
745 	mutex_unlock(&conn->chan_lock);
746 }
747 
748 EXPORT_SYMBOL_GPL(l2cap_chan_list);
749 
l2cap_conn_update_id_addr(struct work_struct * work)750 static void l2cap_conn_update_id_addr(struct work_struct *work)
751 {
752 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
753 					       id_addr_update_work);
754 	struct hci_conn *hcon = conn->hcon;
755 	struct l2cap_chan *chan;
756 
757 	mutex_lock(&conn->chan_lock);
758 
759 	list_for_each_entry(chan, &conn->chan_l, list) {
760 		l2cap_chan_lock(chan);
761 		bacpy(&chan->dst, &hcon->dst);
762 		chan->dst_type = bdaddr_dst_type(hcon);
763 		l2cap_chan_unlock(chan);
764 	}
765 
766 	mutex_unlock(&conn->chan_lock);
767 }
768 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)769 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
770 {
771 	struct l2cap_conn *conn = chan->conn;
772 	struct l2cap_le_conn_rsp rsp;
773 	u16 result;
774 
775 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
776 		result = L2CAP_CR_LE_AUTHORIZATION;
777 	else
778 		result = L2CAP_CR_LE_BAD_PSM;
779 
780 	l2cap_state_change(chan, BT_DISCONN);
781 
782 	rsp.dcid    = cpu_to_le16(chan->scid);
783 	rsp.mtu     = cpu_to_le16(chan->imtu);
784 	rsp.mps     = cpu_to_le16(chan->mps);
785 	rsp.credits = cpu_to_le16(chan->rx_credits);
786 	rsp.result  = cpu_to_le16(result);
787 
788 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
789 		       &rsp);
790 }
791 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)792 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
793 {
794 	l2cap_state_change(chan, BT_DISCONN);
795 
796 	__l2cap_ecred_conn_rsp_defer(chan);
797 }
798 
l2cap_chan_connect_reject(struct l2cap_chan * chan)799 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
800 {
801 	struct l2cap_conn *conn = chan->conn;
802 	struct l2cap_conn_rsp rsp;
803 	u16 result;
804 
805 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
806 		result = L2CAP_CR_SEC_BLOCK;
807 	else
808 		result = L2CAP_CR_BAD_PSM;
809 
810 	l2cap_state_change(chan, BT_DISCONN);
811 
812 	rsp.scid   = cpu_to_le16(chan->dcid);
813 	rsp.dcid   = cpu_to_le16(chan->scid);
814 	rsp.result = cpu_to_le16(result);
815 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
816 
817 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
818 }
819 
l2cap_chan_close(struct l2cap_chan * chan,int reason)820 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
821 {
822 	struct l2cap_conn *conn = chan->conn;
823 
824 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
825 
826 	switch (chan->state) {
827 	case BT_LISTEN:
828 		chan->ops->teardown(chan, 0);
829 		break;
830 
831 	case BT_CONNECTED:
832 	case BT_CONFIG:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
835 			l2cap_send_disconn_req(chan, reason);
836 		} else
837 			l2cap_chan_del(chan, reason);
838 		break;
839 
840 	case BT_CONNECT2:
841 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
842 			if (conn->hcon->type == ACL_LINK)
843 				l2cap_chan_connect_reject(chan);
844 			else if (conn->hcon->type == LE_LINK) {
845 				switch (chan->mode) {
846 				case L2CAP_MODE_LE_FLOWCTL:
847 					l2cap_chan_le_connect_reject(chan);
848 					break;
849 				case L2CAP_MODE_EXT_FLOWCTL:
850 					l2cap_chan_ecred_connect_reject(chan);
851 					return;
852 				}
853 			}
854 		}
855 
856 		l2cap_chan_del(chan, reason);
857 		break;
858 
859 	case BT_CONNECT:
860 	case BT_DISCONN:
861 		l2cap_chan_del(chan, reason);
862 		break;
863 
864 	default:
865 		chan->ops->teardown(chan, 0);
866 		break;
867 	}
868 }
869 EXPORT_SYMBOL(l2cap_chan_close);
870 
l2cap_get_auth_type(struct l2cap_chan * chan)871 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
872 {
873 	switch (chan->chan_type) {
874 	case L2CAP_CHAN_RAW:
875 		switch (chan->sec_level) {
876 		case BT_SECURITY_HIGH:
877 		case BT_SECURITY_FIPS:
878 			return HCI_AT_DEDICATED_BONDING_MITM;
879 		case BT_SECURITY_MEDIUM:
880 			return HCI_AT_DEDICATED_BONDING;
881 		default:
882 			return HCI_AT_NO_BONDING;
883 		}
884 		break;
885 	case L2CAP_CHAN_CONN_LESS:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 		}
890 		if (chan->sec_level == BT_SECURITY_HIGH ||
891 		    chan->sec_level == BT_SECURITY_FIPS)
892 			return HCI_AT_NO_BONDING_MITM;
893 		else
894 			return HCI_AT_NO_BONDING;
895 		break;
896 	case L2CAP_CHAN_CONN_ORIENTED:
897 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
898 			if (chan->sec_level == BT_SECURITY_LOW)
899 				chan->sec_level = BT_SECURITY_SDP;
900 
901 			if (chan->sec_level == BT_SECURITY_HIGH ||
902 			    chan->sec_level == BT_SECURITY_FIPS)
903 				return HCI_AT_NO_BONDING_MITM;
904 			else
905 				return HCI_AT_NO_BONDING;
906 		}
907 		fallthrough;
908 
909 	default:
910 		switch (chan->sec_level) {
911 		case BT_SECURITY_HIGH:
912 		case BT_SECURITY_FIPS:
913 			return HCI_AT_GENERAL_BONDING_MITM;
914 		case BT_SECURITY_MEDIUM:
915 			return HCI_AT_GENERAL_BONDING;
916 		default:
917 			return HCI_AT_NO_BONDING;
918 		}
919 		break;
920 	}
921 }
922 
923 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)924 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
925 {
926 	struct l2cap_conn *conn = chan->conn;
927 	__u8 auth_type;
928 
929 	if (conn->hcon->type == LE_LINK)
930 		return smp_conn_security(conn->hcon, chan->sec_level);
931 
932 	auth_type = l2cap_get_auth_type(chan);
933 
934 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
935 				 initiator);
936 }
937 
l2cap_get_ident(struct l2cap_conn * conn)938 static u8 l2cap_get_ident(struct l2cap_conn *conn)
939 {
940 	u8 id;
941 
942 	/* Get next available identificator.
943 	 *    1 - 128 are used by kernel.
944 	 *  129 - 199 are reserved.
945 	 *  200 - 254 are used by utilities like l2ping, etc.
946 	 */
947 
948 	mutex_lock(&conn->ident_lock);
949 
950 	if (++conn->tx_ident > 128)
951 		conn->tx_ident = 1;
952 
953 	id = conn->tx_ident;
954 
955 	mutex_unlock(&conn->ident_lock);
956 
957 	return id;
958 }
959 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)960 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
961 			   void *data)
962 {
963 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
964 	u8 flags;
965 
966 	BT_DBG("code 0x%2.2x", code);
967 
968 	if (!skb)
969 		return;
970 
971 	/* Use NO_FLUSH if supported or we have an LE link (which does
972 	 * not support auto-flushing packets) */
973 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
974 	    conn->hcon->type == LE_LINK)
975 		flags = ACL_START_NO_FLUSH;
976 	else
977 		flags = ACL_START;
978 
979 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
980 	skb->priority = HCI_PRIO_MAX;
981 
982 	hci_send_acl(conn->hchan, skb, flags);
983 }
984 
__chan_is_moving(struct l2cap_chan * chan)985 static bool __chan_is_moving(struct l2cap_chan *chan)
986 {
987 	return chan->move_state != L2CAP_MOVE_STABLE &&
988 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
989 }
990 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)991 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
992 {
993 	struct hci_conn *hcon = chan->conn->hcon;
994 	u16 flags;
995 
996 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
997 	       skb->priority);
998 
999 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
1000 		if (chan->hs_hchan)
1001 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1002 		else
1003 			kfree_skb(skb);
1004 
1005 		return;
1006 	}
1007 
1008 	/* Use NO_FLUSH for LE links (where this is the only option) or
1009 	 * if the BR/EDR link supports it and flushing has not been
1010 	 * explicitly requested (through FLAG_FLUSHABLE).
1011 	 */
1012 	if (hcon->type == LE_LINK ||
1013 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1014 	     lmp_no_flush_capable(hcon->hdev)))
1015 		flags = ACL_START_NO_FLUSH;
1016 	else
1017 		flags = ACL_START;
1018 
1019 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1020 	hci_send_acl(chan->conn->hchan, skb, flags);
1021 }
1022 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1023 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1024 {
1025 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1026 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1027 
1028 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1029 		/* S-Frame */
1030 		control->sframe = 1;
1031 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1032 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1033 
1034 		control->sar = 0;
1035 		control->txseq = 0;
1036 	} else {
1037 		/* I-Frame */
1038 		control->sframe = 0;
1039 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1040 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1041 
1042 		control->poll = 0;
1043 		control->super = 0;
1044 	}
1045 }
1046 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1047 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1048 {
1049 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1050 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1051 
1052 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1053 		/* S-Frame */
1054 		control->sframe = 1;
1055 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1056 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1057 
1058 		control->sar = 0;
1059 		control->txseq = 0;
1060 	} else {
1061 		/* I-Frame */
1062 		control->sframe = 0;
1063 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1064 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1065 
1066 		control->poll = 0;
1067 		control->super = 0;
1068 	}
1069 }
1070 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1071 static inline void __unpack_control(struct l2cap_chan *chan,
1072 				    struct sk_buff *skb)
1073 {
1074 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1075 		__unpack_extended_control(get_unaligned_le32(skb->data),
1076 					  &bt_cb(skb)->l2cap);
1077 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1078 	} else {
1079 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1080 					  &bt_cb(skb)->l2cap);
1081 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1082 	}
1083 }
1084 
__pack_extended_control(struct l2cap_ctrl * control)1085 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1086 {
1087 	u32 packed;
1088 
1089 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1090 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1091 
1092 	if (control->sframe) {
1093 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1094 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1095 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1096 	} else {
1097 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1098 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1099 	}
1100 
1101 	return packed;
1102 }
1103 
__pack_enhanced_control(struct l2cap_ctrl * control)1104 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1105 {
1106 	u16 packed;
1107 
1108 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1109 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1110 
1111 	if (control->sframe) {
1112 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1113 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1114 		packed |= L2CAP_CTRL_FRAME_TYPE;
1115 	} else {
1116 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1117 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1118 	}
1119 
1120 	return packed;
1121 }
1122 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1123 static inline void __pack_control(struct l2cap_chan *chan,
1124 				  struct l2cap_ctrl *control,
1125 				  struct sk_buff *skb)
1126 {
1127 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1128 		put_unaligned_le32(__pack_extended_control(control),
1129 				   skb->data + L2CAP_HDR_SIZE);
1130 	} else {
1131 		put_unaligned_le16(__pack_enhanced_control(control),
1132 				   skb->data + L2CAP_HDR_SIZE);
1133 	}
1134 }
1135 
__ertm_hdr_size(struct l2cap_chan * chan)1136 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1137 {
1138 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1139 		return L2CAP_EXT_HDR_SIZE;
1140 	else
1141 		return L2CAP_ENH_HDR_SIZE;
1142 }
1143 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1144 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1145 					       u32 control)
1146 {
1147 	struct sk_buff *skb;
1148 	struct l2cap_hdr *lh;
1149 	int hlen = __ertm_hdr_size(chan);
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16)
1152 		hlen += L2CAP_FCS_SIZE;
1153 
1154 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1155 
1156 	if (!skb)
1157 		return ERR_PTR(-ENOMEM);
1158 
1159 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1160 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1161 	lh->cid = cpu_to_le16(chan->dcid);
1162 
1163 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1164 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1165 	else
1166 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1167 
1168 	if (chan->fcs == L2CAP_FCS_CRC16) {
1169 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1170 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1171 	}
1172 
1173 	skb->priority = HCI_PRIO_MAX;
1174 	return skb;
1175 }
1176 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1177 static void l2cap_send_sframe(struct l2cap_chan *chan,
1178 			      struct l2cap_ctrl *control)
1179 {
1180 	struct sk_buff *skb;
1181 	u32 control_field;
1182 
1183 	BT_DBG("chan %p, control %p", chan, control);
1184 
1185 	if (!control->sframe)
1186 		return;
1187 
1188 	if (__chan_is_moving(chan))
1189 		return;
1190 
1191 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1192 	    !control->poll)
1193 		control->final = 1;
1194 
1195 	if (control->super == L2CAP_SUPER_RR)
1196 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1197 	else if (control->super == L2CAP_SUPER_RNR)
1198 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1199 
1200 	if (control->super != L2CAP_SUPER_SREJ) {
1201 		chan->last_acked_seq = control->reqseq;
1202 		__clear_ack_timer(chan);
1203 	}
1204 
1205 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1206 	       control->final, control->poll, control->super);
1207 
1208 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1209 		control_field = __pack_extended_control(control);
1210 	else
1211 		control_field = __pack_enhanced_control(control);
1212 
1213 	skb = l2cap_create_sframe_pdu(chan, control_field);
1214 	if (!IS_ERR(skb))
1215 		l2cap_do_send(chan, skb);
1216 }
1217 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1218 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1219 {
1220 	struct l2cap_ctrl control;
1221 
1222 	BT_DBG("chan %p, poll %d", chan, poll);
1223 
1224 	memset(&control, 0, sizeof(control));
1225 	control.sframe = 1;
1226 	control.poll = poll;
1227 
1228 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1229 		control.super = L2CAP_SUPER_RNR;
1230 	else
1231 		control.super = L2CAP_SUPER_RR;
1232 
1233 	control.reqseq = chan->buffer_seq;
1234 	l2cap_send_sframe(chan, &control);
1235 }
1236 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1237 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1238 {
1239 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1240 		return true;
1241 
1242 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1243 }
1244 
__amp_capable(struct l2cap_chan * chan)1245 static bool __amp_capable(struct l2cap_chan *chan)
1246 {
1247 	struct l2cap_conn *conn = chan->conn;
1248 	struct hci_dev *hdev;
1249 	bool amp_available = false;
1250 
1251 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1252 		return false;
1253 
1254 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1255 		return false;
1256 
1257 	read_lock(&hci_dev_list_lock);
1258 	list_for_each_entry(hdev, &hci_dev_list, list) {
1259 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1260 		    test_bit(HCI_UP, &hdev->flags)) {
1261 			amp_available = true;
1262 			break;
1263 		}
1264 	}
1265 	read_unlock(&hci_dev_list_lock);
1266 
1267 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1268 		return amp_available;
1269 
1270 	return false;
1271 }
1272 
l2cap_check_efs(struct l2cap_chan * chan)1273 static bool l2cap_check_efs(struct l2cap_chan *chan)
1274 {
1275 	/* Check EFS parameters */
1276 	return true;
1277 }
1278 
l2cap_send_conn_req(struct l2cap_chan * chan)1279 void l2cap_send_conn_req(struct l2cap_chan *chan)
1280 {
1281 	struct l2cap_conn *conn = chan->conn;
1282 	struct l2cap_conn_req req;
1283 
1284 	req.scid = cpu_to_le16(chan->scid);
1285 	req.psm  = chan->psm;
1286 
1287 	chan->ident = l2cap_get_ident(conn);
1288 
1289 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1290 
1291 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1292 }
1293 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1294 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1295 {
1296 	struct l2cap_create_chan_req req;
1297 	req.scid = cpu_to_le16(chan->scid);
1298 	req.psm  = chan->psm;
1299 	req.amp_id = amp_id;
1300 
1301 	chan->ident = l2cap_get_ident(chan->conn);
1302 
1303 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1304 		       sizeof(req), &req);
1305 }
1306 
l2cap_move_setup(struct l2cap_chan * chan)1307 static void l2cap_move_setup(struct l2cap_chan *chan)
1308 {
1309 	struct sk_buff *skb;
1310 
1311 	BT_DBG("chan %p", chan);
1312 
1313 	if (chan->mode != L2CAP_MODE_ERTM)
1314 		return;
1315 
1316 	__clear_retrans_timer(chan);
1317 	__clear_monitor_timer(chan);
1318 	__clear_ack_timer(chan);
1319 
1320 	chan->retry_count = 0;
1321 	skb_queue_walk(&chan->tx_q, skb) {
1322 		if (bt_cb(skb)->l2cap.retries)
1323 			bt_cb(skb)->l2cap.retries = 1;
1324 		else
1325 			break;
1326 	}
1327 
1328 	chan->expected_tx_seq = chan->buffer_seq;
1329 
1330 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1331 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1332 	l2cap_seq_list_clear(&chan->retrans_list);
1333 	l2cap_seq_list_clear(&chan->srej_list);
1334 	skb_queue_purge(&chan->srej_q);
1335 
1336 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1337 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1338 
1339 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1340 }
1341 
l2cap_move_done(struct l2cap_chan * chan)1342 static void l2cap_move_done(struct l2cap_chan *chan)
1343 {
1344 	u8 move_role = chan->move_role;
1345 	BT_DBG("chan %p", chan);
1346 
1347 	chan->move_state = L2CAP_MOVE_STABLE;
1348 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1349 
1350 	if (chan->mode != L2CAP_MODE_ERTM)
1351 		return;
1352 
1353 	switch (move_role) {
1354 	case L2CAP_MOVE_ROLE_INITIATOR:
1355 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1356 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1357 		break;
1358 	case L2CAP_MOVE_ROLE_RESPONDER:
1359 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1360 		break;
1361 	}
1362 }
1363 
l2cap_chan_ready(struct l2cap_chan * chan)1364 static void l2cap_chan_ready(struct l2cap_chan *chan)
1365 {
1366 	/* The channel may have already been flagged as connected in
1367 	 * case of receiving data before the L2CAP info req/rsp
1368 	 * procedure is complete.
1369 	 */
1370 	if (chan->state == BT_CONNECTED)
1371 		return;
1372 
1373 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1374 	chan->conf_state = 0;
1375 	__clear_chan_timer(chan);
1376 
1377 	switch (chan->mode) {
1378 	case L2CAP_MODE_LE_FLOWCTL:
1379 	case L2CAP_MODE_EXT_FLOWCTL:
1380 		if (!chan->tx_credits)
1381 			chan->ops->suspend(chan);
1382 		break;
1383 	}
1384 
1385 	chan->state = BT_CONNECTED;
1386 
1387 	chan->ops->ready(chan);
1388 }
1389 
l2cap_le_connect(struct l2cap_chan * chan)1390 static void l2cap_le_connect(struct l2cap_chan *chan)
1391 {
1392 	struct l2cap_conn *conn = chan->conn;
1393 	struct l2cap_le_conn_req req;
1394 
1395 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1396 		return;
1397 
1398 	if (!chan->imtu)
1399 		chan->imtu = chan->conn->mtu;
1400 
1401 	l2cap_le_flowctl_init(chan, 0);
1402 
1403 	req.psm     = chan->psm;
1404 	req.scid    = cpu_to_le16(chan->scid);
1405 	req.mtu     = cpu_to_le16(chan->imtu);
1406 	req.mps     = cpu_to_le16(chan->mps);
1407 	req.credits = cpu_to_le16(chan->rx_credits);
1408 
1409 	chan->ident = l2cap_get_ident(conn);
1410 
1411 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1412 		       sizeof(req), &req);
1413 }
1414 
1415 struct l2cap_ecred_conn_data {
1416 	struct {
1417 		struct l2cap_ecred_conn_req req;
1418 		__le16 scid[5];
1419 	} __packed pdu;
1420 	struct l2cap_chan *chan;
1421 	struct pid *pid;
1422 	int count;
1423 };
1424 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1425 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1426 {
1427 	struct l2cap_ecred_conn_data *conn = data;
1428 	struct pid *pid;
1429 
1430 	if (chan == conn->chan)
1431 		return;
1432 
1433 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1434 		return;
1435 
1436 	pid = chan->ops->get_peer_pid(chan);
1437 
1438 	/* Only add deferred channels with the same PID/PSM */
1439 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1440 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1441 		return;
1442 
1443 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1444 		return;
1445 
1446 	l2cap_ecred_init(chan, 0);
1447 
1448 	/* Set the same ident so we can match on the rsp */
1449 	chan->ident = conn->chan->ident;
1450 
1451 	/* Include all channels deferred */
1452 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1453 
1454 	conn->count++;
1455 }
1456 
l2cap_ecred_connect(struct l2cap_chan * chan)1457 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1458 {
1459 	struct l2cap_conn *conn = chan->conn;
1460 	struct l2cap_ecred_conn_data data;
1461 
1462 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1463 		return;
1464 
1465 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1466 		return;
1467 
1468 	l2cap_ecred_init(chan, 0);
1469 
1470 	memset(&data, 0, sizeof(data));
1471 	data.pdu.req.psm     = chan->psm;
1472 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1473 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1474 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1475 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1476 
1477 	chan->ident = l2cap_get_ident(conn);
1478 	data.pid = chan->ops->get_peer_pid(chan);
1479 
1480 	data.count = 1;
1481 	data.chan = chan;
1482 	data.pid = chan->ops->get_peer_pid(chan);
1483 
1484 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1485 
1486 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1487 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1488 		       &data.pdu);
1489 }
1490 
l2cap_le_start(struct l2cap_chan * chan)1491 static void l2cap_le_start(struct l2cap_chan *chan)
1492 {
1493 	struct l2cap_conn *conn = chan->conn;
1494 
1495 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1496 		return;
1497 
1498 	if (!chan->psm) {
1499 		l2cap_chan_ready(chan);
1500 		return;
1501 	}
1502 
1503 	if (chan->state == BT_CONNECT) {
1504 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1505 			l2cap_ecred_connect(chan);
1506 		else
1507 			l2cap_le_connect(chan);
1508 	}
1509 }
1510 
l2cap_start_connection(struct l2cap_chan * chan)1511 static void l2cap_start_connection(struct l2cap_chan *chan)
1512 {
1513 	if (__amp_capable(chan)) {
1514 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1515 		a2mp_discover_amp(chan);
1516 	} else if (chan->conn->hcon->type == LE_LINK) {
1517 		l2cap_le_start(chan);
1518 	} else {
1519 		l2cap_send_conn_req(chan);
1520 	}
1521 }
1522 
l2cap_request_info(struct l2cap_conn * conn)1523 static void l2cap_request_info(struct l2cap_conn *conn)
1524 {
1525 	struct l2cap_info_req req;
1526 
1527 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1528 		return;
1529 
1530 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1531 
1532 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1533 	conn->info_ident = l2cap_get_ident(conn);
1534 
1535 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1536 
1537 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1538 		       sizeof(req), &req);
1539 }
1540 
l2cap_check_enc_key_size(struct hci_conn * hcon)1541 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1542 {
1543 	/* The minimum encryption key size needs to be enforced by the
1544 	 * host stack before establishing any L2CAP connections. The
1545 	 * specification in theory allows a minimum of 1, but to align
1546 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1547 	 *
1548 	 * This check might also be called for unencrypted connections
1549 	 * that have no key size requirements. Ensure that the link is
1550 	 * actually encrypted before enforcing a key size.
1551 	 */
1552 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1553 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1554 }
1555 
l2cap_do_start(struct l2cap_chan * chan)1556 static void l2cap_do_start(struct l2cap_chan *chan)
1557 {
1558 	struct l2cap_conn *conn = chan->conn;
1559 
1560 	if (conn->hcon->type == LE_LINK) {
1561 		l2cap_le_start(chan);
1562 		return;
1563 	}
1564 
1565 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1566 		l2cap_request_info(conn);
1567 		return;
1568 	}
1569 
1570 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1571 		return;
1572 
1573 	if (!l2cap_chan_check_security(chan, true) ||
1574 	    !__l2cap_no_conn_pending(chan))
1575 		return;
1576 
1577 	if (l2cap_check_enc_key_size(conn->hcon))
1578 		l2cap_start_connection(chan);
1579 	else
1580 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1581 }
1582 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1583 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1584 {
1585 	u32 local_feat_mask = l2cap_feat_mask;
1586 	if (!disable_ertm)
1587 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1588 
1589 	switch (mode) {
1590 	case L2CAP_MODE_ERTM:
1591 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1592 	case L2CAP_MODE_STREAMING:
1593 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1594 	default:
1595 		return 0x00;
1596 	}
1597 }
1598 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1599 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1600 {
1601 	struct l2cap_conn *conn = chan->conn;
1602 	struct l2cap_disconn_req req;
1603 
1604 	if (!conn)
1605 		return;
1606 
1607 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1608 		__clear_retrans_timer(chan);
1609 		__clear_monitor_timer(chan);
1610 		__clear_ack_timer(chan);
1611 	}
1612 
1613 	if (chan->scid == L2CAP_CID_A2MP) {
1614 		l2cap_state_change(chan, BT_DISCONN);
1615 		return;
1616 	}
1617 
1618 	req.dcid = cpu_to_le16(chan->dcid);
1619 	req.scid = cpu_to_le16(chan->scid);
1620 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1621 		       sizeof(req), &req);
1622 
1623 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1624 }
1625 
1626 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1627 static void l2cap_conn_start(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan, *tmp;
1630 
1631 	BT_DBG("conn %p", conn);
1632 
1633 	mutex_lock(&conn->chan_lock);
1634 
1635 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1636 		l2cap_chan_lock(chan);
1637 
1638 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1639 			l2cap_chan_ready(chan);
1640 			l2cap_chan_unlock(chan);
1641 			continue;
1642 		}
1643 
1644 		if (chan->state == BT_CONNECT) {
1645 			if (!l2cap_chan_check_security(chan, true) ||
1646 			    !__l2cap_no_conn_pending(chan)) {
1647 				l2cap_chan_unlock(chan);
1648 				continue;
1649 			}
1650 
1651 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1652 			    && test_bit(CONF_STATE2_DEVICE,
1653 					&chan->conf_state)) {
1654 				l2cap_chan_close(chan, ECONNRESET);
1655 				l2cap_chan_unlock(chan);
1656 				continue;
1657 			}
1658 
1659 			if (l2cap_check_enc_key_size(conn->hcon))
1660 				l2cap_start_connection(chan);
1661 			else
1662 				l2cap_chan_close(chan, ECONNREFUSED);
1663 
1664 		} else if (chan->state == BT_CONNECT2) {
1665 			struct l2cap_conn_rsp rsp;
1666 			char buf[128];
1667 			rsp.scid = cpu_to_le16(chan->dcid);
1668 			rsp.dcid = cpu_to_le16(chan->scid);
1669 
1670 			if (l2cap_chan_check_security(chan, false)) {
1671 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1672 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1673 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1674 					chan->ops->defer(chan);
1675 
1676 				} else {
1677 					l2cap_state_change(chan, BT_CONFIG);
1678 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1679 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1680 				}
1681 			} else {
1682 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1683 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1684 			}
1685 
1686 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1687 				       sizeof(rsp), &rsp);
1688 
1689 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1690 			    rsp.result != L2CAP_CR_SUCCESS) {
1691 				l2cap_chan_unlock(chan);
1692 				continue;
1693 			}
1694 
1695 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1696 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1697 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1698 			chan->num_conf_req++;
1699 		}
1700 
1701 		l2cap_chan_unlock(chan);
1702 	}
1703 
1704 	mutex_unlock(&conn->chan_lock);
1705 }
1706 
l2cap_le_conn_ready(struct l2cap_conn * conn)1707 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1708 {
1709 	struct hci_conn *hcon = conn->hcon;
1710 	struct hci_dev *hdev = hcon->hdev;
1711 
1712 	BT_DBG("%s conn %p", hdev->name, conn);
1713 
1714 	/* For outgoing pairing which doesn't necessarily have an
1715 	 * associated socket (e.g. mgmt_pair_device).
1716 	 */
1717 	if (hcon->out)
1718 		smp_conn_security(hcon, hcon->pending_sec_level);
1719 
1720 	/* For LE peripheral connections, make sure the connection interval
1721 	 * is in the range of the minimum and maximum interval that has
1722 	 * been configured for this connection. If not, then trigger
1723 	 * the connection update procedure.
1724 	 */
1725 	if (hcon->role == HCI_ROLE_SLAVE &&
1726 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1727 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1728 		struct l2cap_conn_param_update_req req;
1729 
1730 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1731 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1732 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1733 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1734 
1735 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1736 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1737 	}
1738 }
1739 
l2cap_conn_ready(struct l2cap_conn * conn)1740 static void l2cap_conn_ready(struct l2cap_conn *conn)
1741 {
1742 	struct l2cap_chan *chan;
1743 	struct hci_conn *hcon = conn->hcon;
1744 
1745 	BT_DBG("conn %p", conn);
1746 
1747 	if (hcon->type == ACL_LINK)
1748 		l2cap_request_info(conn);
1749 
1750 	mutex_lock(&conn->chan_lock);
1751 
1752 	list_for_each_entry(chan, &conn->chan_l, list) {
1753 
1754 		l2cap_chan_lock(chan);
1755 
1756 		if (chan->scid == L2CAP_CID_A2MP) {
1757 			l2cap_chan_unlock(chan);
1758 			continue;
1759 		}
1760 
1761 		if (hcon->type == LE_LINK) {
1762 			l2cap_le_start(chan);
1763 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1764 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1765 				l2cap_chan_ready(chan);
1766 		} else if (chan->state == BT_CONNECT) {
1767 			l2cap_do_start(chan);
1768 		}
1769 
1770 		l2cap_chan_unlock(chan);
1771 	}
1772 
1773 	mutex_unlock(&conn->chan_lock);
1774 
1775 	if (hcon->type == LE_LINK)
1776 		l2cap_le_conn_ready(conn);
1777 
1778 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1779 }
1780 
1781 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1782 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1783 {
1784 	struct l2cap_chan *chan;
1785 
1786 	BT_DBG("conn %p", conn);
1787 
1788 	mutex_lock(&conn->chan_lock);
1789 
1790 	list_for_each_entry(chan, &conn->chan_l, list) {
1791 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1792 			l2cap_chan_set_err(chan, err);
1793 	}
1794 
1795 	mutex_unlock(&conn->chan_lock);
1796 }
1797 
l2cap_info_timeout(struct work_struct * work)1798 static void l2cap_info_timeout(struct work_struct *work)
1799 {
1800 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1801 					       info_timer.work);
1802 
1803 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1804 	conn->info_ident = 0;
1805 
1806 	l2cap_conn_start(conn);
1807 }
1808 
1809 /*
1810  * l2cap_user
1811  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1812  * callback is called during registration. The ->remove callback is called
1813  * during unregistration.
1814  * An l2cap_user object can either be explicitly unregistered or when the
1815  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1816  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1817  * External modules must own a reference to the l2cap_conn object if they intend
1818  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1819  * any time if they don't.
1820  */
1821 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1822 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1823 {
1824 	struct hci_dev *hdev = conn->hcon->hdev;
1825 	int ret;
1826 
1827 	/* We need to check whether l2cap_conn is registered. If it is not, we
1828 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1829 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1830 	 * relies on the parent hci_conn object to be locked. This itself relies
1831 	 * on the hci_dev object to be locked. So we must lock the hci device
1832 	 * here, too. */
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (!list_empty(&user->list)) {
1837 		ret = -EINVAL;
1838 		goto out_unlock;
1839 	}
1840 
1841 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1842 	if (!conn->hchan) {
1843 		ret = -ENODEV;
1844 		goto out_unlock;
1845 	}
1846 
1847 	ret = user->probe(conn, user);
1848 	if (ret)
1849 		goto out_unlock;
1850 
1851 	list_add(&user->list, &conn->users);
1852 	ret = 0;
1853 
1854 out_unlock:
1855 	hci_dev_unlock(hdev);
1856 	return ret;
1857 }
1858 EXPORT_SYMBOL(l2cap_register_user);
1859 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1860 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1861 {
1862 	struct hci_dev *hdev = conn->hcon->hdev;
1863 
1864 	hci_dev_lock(hdev);
1865 
1866 	if (list_empty(&user->list))
1867 		goto out_unlock;
1868 
1869 	list_del_init(&user->list);
1870 	user->remove(conn, user);
1871 
1872 out_unlock:
1873 	hci_dev_unlock(hdev);
1874 }
1875 EXPORT_SYMBOL(l2cap_unregister_user);
1876 
l2cap_unregister_all_users(struct l2cap_conn * conn)1877 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1878 {
1879 	struct l2cap_user *user;
1880 
1881 	while (!list_empty(&conn->users)) {
1882 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1883 		list_del_init(&user->list);
1884 		user->remove(conn, user);
1885 	}
1886 }
1887 
l2cap_conn_del(struct hci_conn * hcon,int err)1888 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1889 {
1890 	struct l2cap_conn *conn = hcon->l2cap_data;
1891 	struct l2cap_chan *chan, *l;
1892 
1893 	if (!conn)
1894 		return;
1895 
1896 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1897 
1898 	kfree_skb(conn->rx_skb);
1899 
1900 	skb_queue_purge(&conn->pending_rx);
1901 
1902 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1903 	 * might block if we are running on a worker from the same workqueue
1904 	 * pending_rx_work is waiting on.
1905 	 */
1906 	if (work_pending(&conn->pending_rx_work))
1907 		cancel_work_sync(&conn->pending_rx_work);
1908 
1909 	if (work_pending(&conn->id_addr_update_work))
1910 		cancel_work_sync(&conn->id_addr_update_work);
1911 
1912 	l2cap_unregister_all_users(conn);
1913 
1914 	/* Force the connection to be immediately dropped */
1915 	hcon->disc_timeout = 0;
1916 
1917 	mutex_lock(&conn->chan_lock);
1918 
1919 	/* Kill channels */
1920 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1921 		l2cap_chan_hold(chan);
1922 		l2cap_chan_lock(chan);
1923 
1924 		l2cap_chan_del(chan, err);
1925 
1926 		chan->ops->close(chan);
1927 
1928 		l2cap_chan_unlock(chan);
1929 		l2cap_chan_put(chan);
1930 	}
1931 
1932 	mutex_unlock(&conn->chan_lock);
1933 
1934 	hci_chan_del(conn->hchan);
1935 
1936 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1937 		cancel_delayed_work_sync(&conn->info_timer);
1938 
1939 	hcon->l2cap_data = NULL;
1940 	conn->hchan = NULL;
1941 	l2cap_conn_put(conn);
1942 }
1943 
l2cap_conn_free(struct kref * ref)1944 static void l2cap_conn_free(struct kref *ref)
1945 {
1946 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1947 
1948 	hci_conn_put(conn->hcon);
1949 	kfree(conn);
1950 }
1951 
l2cap_conn_get(struct l2cap_conn * conn)1952 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1953 {
1954 	kref_get(&conn->ref);
1955 	return conn;
1956 }
1957 EXPORT_SYMBOL(l2cap_conn_get);
1958 
l2cap_conn_put(struct l2cap_conn * conn)1959 void l2cap_conn_put(struct l2cap_conn *conn)
1960 {
1961 	kref_put(&conn->ref, l2cap_conn_free);
1962 }
1963 EXPORT_SYMBOL(l2cap_conn_put);
1964 
1965 /* ---- Socket interface ---- */
1966 
1967 /* Find socket with psm and source / destination bdaddr.
1968  * Returns closest match.
1969  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1970 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1971 						   bdaddr_t *src,
1972 						   bdaddr_t *dst,
1973 						   u8 link_type)
1974 {
1975 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1976 
1977 	read_lock(&chan_list_lock);
1978 
1979 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1980 		if (state && c->state != state)
1981 			continue;
1982 
1983 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1987 			continue;
1988 
1989 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1990 			int src_match, dst_match;
1991 			int src_any, dst_any;
1992 
1993 			/* Exact match. */
1994 			src_match = !bacmp(&c->src, src);
1995 			dst_match = !bacmp(&c->dst, dst);
1996 			if (src_match && dst_match) {
1997 				if (!l2cap_chan_hold_unless_zero(c))
1998 					continue;
1999 
2000 				read_unlock(&chan_list_lock);
2001 				return c;
2002 			}
2003 
2004 			/* Closest match */
2005 			src_any = !bacmp(&c->src, BDADDR_ANY);
2006 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2007 			if ((src_match && dst_any) || (src_any && dst_match) ||
2008 			    (src_any && dst_any))
2009 				c1 = c;
2010 		}
2011 	}
2012 
2013 	if (c1)
2014 		c1 = l2cap_chan_hold_unless_zero(c1);
2015 
2016 	read_unlock(&chan_list_lock);
2017 
2018 	return c1;
2019 }
2020 
l2cap_monitor_timeout(struct work_struct * work)2021 static void l2cap_monitor_timeout(struct work_struct *work)
2022 {
2023 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2024 					       monitor_timer.work);
2025 
2026 	BT_DBG("chan %p", chan);
2027 
2028 	l2cap_chan_lock(chan);
2029 
2030 	if (!chan->conn) {
2031 		l2cap_chan_unlock(chan);
2032 		l2cap_chan_put(chan);
2033 		return;
2034 	}
2035 
2036 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2037 
2038 	l2cap_chan_unlock(chan);
2039 	l2cap_chan_put(chan);
2040 }
2041 
l2cap_retrans_timeout(struct work_struct * work)2042 static void l2cap_retrans_timeout(struct work_struct *work)
2043 {
2044 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2045 					       retrans_timer.work);
2046 
2047 	BT_DBG("chan %p", chan);
2048 
2049 	l2cap_chan_lock(chan);
2050 
2051 	if (!chan->conn) {
2052 		l2cap_chan_unlock(chan);
2053 		l2cap_chan_put(chan);
2054 		return;
2055 	}
2056 
2057 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2058 	l2cap_chan_unlock(chan);
2059 	l2cap_chan_put(chan);
2060 }
2061 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2062 static void l2cap_streaming_send(struct l2cap_chan *chan,
2063 				 struct sk_buff_head *skbs)
2064 {
2065 	struct sk_buff *skb;
2066 	struct l2cap_ctrl *control;
2067 
2068 	BT_DBG("chan %p, skbs %p", chan, skbs);
2069 
2070 	if (__chan_is_moving(chan))
2071 		return;
2072 
2073 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2074 
2075 	while (!skb_queue_empty(&chan->tx_q)) {
2076 
2077 		skb = skb_dequeue(&chan->tx_q);
2078 
2079 		bt_cb(skb)->l2cap.retries = 1;
2080 		control = &bt_cb(skb)->l2cap;
2081 
2082 		control->reqseq = 0;
2083 		control->txseq = chan->next_tx_seq;
2084 
2085 		__pack_control(chan, control, skb);
2086 
2087 		if (chan->fcs == L2CAP_FCS_CRC16) {
2088 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2089 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2090 		}
2091 
2092 		l2cap_do_send(chan, skb);
2093 
2094 		BT_DBG("Sent txseq %u", control->txseq);
2095 
2096 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2097 		chan->frames_sent++;
2098 	}
2099 }
2100 
l2cap_ertm_send(struct l2cap_chan * chan)2101 static int l2cap_ertm_send(struct l2cap_chan *chan)
2102 {
2103 	struct sk_buff *skb, *tx_skb;
2104 	struct l2cap_ctrl *control;
2105 	int sent = 0;
2106 
2107 	BT_DBG("chan %p", chan);
2108 
2109 	if (chan->state != BT_CONNECTED)
2110 		return -ENOTCONN;
2111 
2112 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2113 		return 0;
2114 
2115 	if (__chan_is_moving(chan))
2116 		return 0;
2117 
2118 	while (chan->tx_send_head &&
2119 	       chan->unacked_frames < chan->remote_tx_win &&
2120 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2121 
2122 		skb = chan->tx_send_head;
2123 
2124 		bt_cb(skb)->l2cap.retries = 1;
2125 		control = &bt_cb(skb)->l2cap;
2126 
2127 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2128 			control->final = 1;
2129 
2130 		control->reqseq = chan->buffer_seq;
2131 		chan->last_acked_seq = chan->buffer_seq;
2132 		control->txseq = chan->next_tx_seq;
2133 
2134 		__pack_control(chan, control, skb);
2135 
2136 		if (chan->fcs == L2CAP_FCS_CRC16) {
2137 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2138 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2139 		}
2140 
2141 		/* Clone after data has been modified. Data is assumed to be
2142 		   read-only (for locking purposes) on cloned sk_buffs.
2143 		 */
2144 		tx_skb = skb_clone(skb, GFP_KERNEL);
2145 
2146 		if (!tx_skb)
2147 			break;
2148 
2149 		__set_retrans_timer(chan);
2150 
2151 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2152 		chan->unacked_frames++;
2153 		chan->frames_sent++;
2154 		sent++;
2155 
2156 		if (skb_queue_is_last(&chan->tx_q, skb))
2157 			chan->tx_send_head = NULL;
2158 		else
2159 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2160 
2161 		l2cap_do_send(chan, tx_skb);
2162 		BT_DBG("Sent txseq %u", control->txseq);
2163 	}
2164 
2165 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2166 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2167 
2168 	return sent;
2169 }
2170 
l2cap_ertm_resend(struct l2cap_chan * chan)2171 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2172 {
2173 	struct l2cap_ctrl control;
2174 	struct sk_buff *skb;
2175 	struct sk_buff *tx_skb;
2176 	u16 seq;
2177 
2178 	BT_DBG("chan %p", chan);
2179 
2180 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2181 		return;
2182 
2183 	if (__chan_is_moving(chan))
2184 		return;
2185 
2186 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2187 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2188 
2189 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2190 		if (!skb) {
2191 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2192 			       seq);
2193 			continue;
2194 		}
2195 
2196 		bt_cb(skb)->l2cap.retries++;
2197 		control = bt_cb(skb)->l2cap;
2198 
2199 		if (chan->max_tx != 0 &&
2200 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2201 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2202 			l2cap_send_disconn_req(chan, ECONNRESET);
2203 			l2cap_seq_list_clear(&chan->retrans_list);
2204 			break;
2205 		}
2206 
2207 		control.reqseq = chan->buffer_seq;
2208 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2209 			control.final = 1;
2210 		else
2211 			control.final = 0;
2212 
2213 		if (skb_cloned(skb)) {
2214 			/* Cloned sk_buffs are read-only, so we need a
2215 			 * writeable copy
2216 			 */
2217 			tx_skb = skb_copy(skb, GFP_KERNEL);
2218 		} else {
2219 			tx_skb = skb_clone(skb, GFP_KERNEL);
2220 		}
2221 
2222 		if (!tx_skb) {
2223 			l2cap_seq_list_clear(&chan->retrans_list);
2224 			break;
2225 		}
2226 
2227 		/* Update skb contents */
2228 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2229 			put_unaligned_le32(__pack_extended_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		} else {
2232 			put_unaligned_le16(__pack_enhanced_control(&control),
2233 					   tx_skb->data + L2CAP_HDR_SIZE);
2234 		}
2235 
2236 		/* Update FCS */
2237 		if (chan->fcs == L2CAP_FCS_CRC16) {
2238 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2239 					tx_skb->len - L2CAP_FCS_SIZE);
2240 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2241 						L2CAP_FCS_SIZE);
2242 		}
2243 
2244 		l2cap_do_send(chan, tx_skb);
2245 
2246 		BT_DBG("Resent txseq %d", control.txseq);
2247 
2248 		chan->last_acked_seq = chan->buffer_seq;
2249 	}
2250 }
2251 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2252 static void l2cap_retransmit(struct l2cap_chan *chan,
2253 			     struct l2cap_ctrl *control)
2254 {
2255 	BT_DBG("chan %p, control %p", chan, control);
2256 
2257 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2258 	l2cap_ertm_resend(chan);
2259 }
2260 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2261 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2262 				 struct l2cap_ctrl *control)
2263 {
2264 	struct sk_buff *skb;
2265 
2266 	BT_DBG("chan %p, control %p", chan, control);
2267 
2268 	if (control->poll)
2269 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2270 
2271 	l2cap_seq_list_clear(&chan->retrans_list);
2272 
2273 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2274 		return;
2275 
2276 	if (chan->unacked_frames) {
2277 		skb_queue_walk(&chan->tx_q, skb) {
2278 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2279 			    skb == chan->tx_send_head)
2280 				break;
2281 		}
2282 
2283 		skb_queue_walk_from(&chan->tx_q, skb) {
2284 			if (skb == chan->tx_send_head)
2285 				break;
2286 
2287 			l2cap_seq_list_append(&chan->retrans_list,
2288 					      bt_cb(skb)->l2cap.txseq);
2289 		}
2290 
2291 		l2cap_ertm_resend(chan);
2292 	}
2293 }
2294 
l2cap_send_ack(struct l2cap_chan * chan)2295 static void l2cap_send_ack(struct l2cap_chan *chan)
2296 {
2297 	struct l2cap_ctrl control;
2298 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2299 					 chan->last_acked_seq);
2300 	int threshold;
2301 
2302 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2303 	       chan, chan->last_acked_seq, chan->buffer_seq);
2304 
2305 	memset(&control, 0, sizeof(control));
2306 	control.sframe = 1;
2307 
2308 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2309 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2310 		__clear_ack_timer(chan);
2311 		control.super = L2CAP_SUPER_RNR;
2312 		control.reqseq = chan->buffer_seq;
2313 		l2cap_send_sframe(chan, &control);
2314 	} else {
2315 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2316 			l2cap_ertm_send(chan);
2317 			/* If any i-frames were sent, they included an ack */
2318 			if (chan->buffer_seq == chan->last_acked_seq)
2319 				frames_to_ack = 0;
2320 		}
2321 
2322 		/* Ack now if the window is 3/4ths full.
2323 		 * Calculate without mul or div
2324 		 */
2325 		threshold = chan->ack_win;
2326 		threshold += threshold << 1;
2327 		threshold >>= 2;
2328 
2329 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2330 		       threshold);
2331 
2332 		if (frames_to_ack >= threshold) {
2333 			__clear_ack_timer(chan);
2334 			control.super = L2CAP_SUPER_RR;
2335 			control.reqseq = chan->buffer_seq;
2336 			l2cap_send_sframe(chan, &control);
2337 			frames_to_ack = 0;
2338 		}
2339 
2340 		if (frames_to_ack)
2341 			__set_ack_timer(chan);
2342 	}
2343 }
2344 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2345 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2346 					 struct msghdr *msg, int len,
2347 					 int count, struct sk_buff *skb)
2348 {
2349 	struct l2cap_conn *conn = chan->conn;
2350 	struct sk_buff **frag;
2351 	int sent = 0;
2352 
2353 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2354 		return -EFAULT;
2355 
2356 	sent += count;
2357 	len  -= count;
2358 
2359 	/* Continuation fragments (no L2CAP header) */
2360 	frag = &skb_shinfo(skb)->frag_list;
2361 	while (len) {
2362 		struct sk_buff *tmp;
2363 
2364 		count = min_t(unsigned int, conn->mtu, len);
2365 
2366 		tmp = chan->ops->alloc_skb(chan, 0, count,
2367 					   msg->msg_flags & MSG_DONTWAIT);
2368 		if (IS_ERR(tmp))
2369 			return PTR_ERR(tmp);
2370 
2371 		*frag = tmp;
2372 
2373 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2374 				   &msg->msg_iter))
2375 			return -EFAULT;
2376 
2377 		sent += count;
2378 		len  -= count;
2379 
2380 		skb->len += (*frag)->len;
2381 		skb->data_len += (*frag)->len;
2382 
2383 		frag = &(*frag)->next;
2384 	}
2385 
2386 	return sent;
2387 }
2388 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2389 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2390 						 struct msghdr *msg, size_t len)
2391 {
2392 	struct l2cap_conn *conn = chan->conn;
2393 	struct sk_buff *skb;
2394 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2395 	struct l2cap_hdr *lh;
2396 
2397 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2398 	       __le16_to_cpu(chan->psm), len);
2399 
2400 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2401 
2402 	skb = chan->ops->alloc_skb(chan, hlen, count,
2403 				   msg->msg_flags & MSG_DONTWAIT);
2404 	if (IS_ERR(skb))
2405 		return skb;
2406 
2407 	/* Create L2CAP header */
2408 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 	lh->cid = cpu_to_le16(chan->dcid);
2410 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2411 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2421 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2422 					      struct msghdr *msg, size_t len)
2423 {
2424 	struct l2cap_conn *conn = chan->conn;
2425 	struct sk_buff *skb;
2426 	int err, count;
2427 	struct l2cap_hdr *lh;
2428 
2429 	BT_DBG("chan %p len %zu", chan, len);
2430 
2431 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2432 
2433 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2434 				   msg->msg_flags & MSG_DONTWAIT);
2435 	if (IS_ERR(skb))
2436 		return skb;
2437 
2438 	/* Create L2CAP header */
2439 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2440 	lh->cid = cpu_to_le16(chan->dcid);
2441 	lh->len = cpu_to_le16(len);
2442 
2443 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 	if (unlikely(err < 0)) {
2445 		kfree_skb(skb);
2446 		return ERR_PTR(err);
2447 	}
2448 	return skb;
2449 }
2450 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2451 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2452 					       struct msghdr *msg, size_t len,
2453 					       u16 sdulen)
2454 {
2455 	struct l2cap_conn *conn = chan->conn;
2456 	struct sk_buff *skb;
2457 	int err, count, hlen;
2458 	struct l2cap_hdr *lh;
2459 
2460 	BT_DBG("chan %p len %zu", chan, len);
2461 
2462 	if (!conn)
2463 		return ERR_PTR(-ENOTCONN);
2464 
2465 	hlen = __ertm_hdr_size(chan);
2466 
2467 	if (sdulen)
2468 		hlen += L2CAP_SDULEN_SIZE;
2469 
2470 	if (chan->fcs == L2CAP_FCS_CRC16)
2471 		hlen += L2CAP_FCS_SIZE;
2472 
2473 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2474 
2475 	skb = chan->ops->alloc_skb(chan, hlen, count,
2476 				   msg->msg_flags & MSG_DONTWAIT);
2477 	if (IS_ERR(skb))
2478 		return skb;
2479 
2480 	/* Create L2CAP header */
2481 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2482 	lh->cid = cpu_to_le16(chan->dcid);
2483 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2484 
2485 	/* Control header is populated later */
2486 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2487 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2488 	else
2489 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2490 
2491 	if (sdulen)
2492 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2493 
2494 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2495 	if (unlikely(err < 0)) {
2496 		kfree_skb(skb);
2497 		return ERR_PTR(err);
2498 	}
2499 
2500 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2501 	bt_cb(skb)->l2cap.retries = 0;
2502 	return skb;
2503 }
2504 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2505 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2506 			     struct sk_buff_head *seg_queue,
2507 			     struct msghdr *msg, size_t len)
2508 {
2509 	struct sk_buff *skb;
2510 	u16 sdu_len;
2511 	size_t pdu_len;
2512 	u8 sar;
2513 
2514 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2515 
2516 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2517 	 * so fragmented skbs are not used.  The HCI layer's handling
2518 	 * of fragmented skbs is not compatible with ERTM's queueing.
2519 	 */
2520 
2521 	/* PDU size is derived from the HCI MTU */
2522 	pdu_len = chan->conn->mtu;
2523 
2524 	/* Constrain PDU size for BR/EDR connections */
2525 	if (!chan->hs_hcon)
2526 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2527 
2528 	/* Adjust for largest possible L2CAP overhead. */
2529 	if (chan->fcs)
2530 		pdu_len -= L2CAP_FCS_SIZE;
2531 
2532 	pdu_len -= __ertm_hdr_size(chan);
2533 
2534 	/* Remote device may have requested smaller PDUs */
2535 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2536 
2537 	if (len <= pdu_len) {
2538 		sar = L2CAP_SAR_UNSEGMENTED;
2539 		sdu_len = 0;
2540 		pdu_len = len;
2541 	} else {
2542 		sar = L2CAP_SAR_START;
2543 		sdu_len = len;
2544 	}
2545 
2546 	while (len > 0) {
2547 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2548 
2549 		if (IS_ERR(skb)) {
2550 			__skb_queue_purge(seg_queue);
2551 			return PTR_ERR(skb);
2552 		}
2553 
2554 		bt_cb(skb)->l2cap.sar = sar;
2555 		__skb_queue_tail(seg_queue, skb);
2556 
2557 		len -= pdu_len;
2558 		if (sdu_len)
2559 			sdu_len = 0;
2560 
2561 		if (len <= pdu_len) {
2562 			sar = L2CAP_SAR_END;
2563 			pdu_len = len;
2564 		} else {
2565 			sar = L2CAP_SAR_CONTINUE;
2566 		}
2567 	}
2568 
2569 	return 0;
2570 }
2571 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2573 						   struct msghdr *msg,
2574 						   size_t len, u16 sdulen)
2575 {
2576 	struct l2cap_conn *conn = chan->conn;
2577 	struct sk_buff *skb;
2578 	int err, count, hlen;
2579 	struct l2cap_hdr *lh;
2580 
2581 	BT_DBG("chan %p len %zu", chan, len);
2582 
2583 	if (!conn)
2584 		return ERR_PTR(-ENOTCONN);
2585 
2586 	hlen = L2CAP_HDR_SIZE;
2587 
2588 	if (sdulen)
2589 		hlen += L2CAP_SDULEN_SIZE;
2590 
2591 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2592 
2593 	skb = chan->ops->alloc_skb(chan, hlen, count,
2594 				   msg->msg_flags & MSG_DONTWAIT);
2595 	if (IS_ERR(skb))
2596 		return skb;
2597 
2598 	/* Create L2CAP header */
2599 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2600 	lh->cid = cpu_to_le16(chan->dcid);
2601 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602 
2603 	if (sdulen)
2604 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2605 
2606 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 	if (unlikely(err < 0)) {
2608 		kfree_skb(skb);
2609 		return ERR_PTR(err);
2610 	}
2611 
2612 	return skb;
2613 }
2614 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 				struct sk_buff_head *seg_queue,
2617 				struct msghdr *msg, size_t len)
2618 {
2619 	struct sk_buff *skb;
2620 	size_t pdu_len;
2621 	u16 sdu_len;
2622 
2623 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624 
2625 	sdu_len = len;
2626 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2627 
2628 	while (len > 0) {
2629 		if (len <= pdu_len)
2630 			pdu_len = len;
2631 
2632 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2633 		if (IS_ERR(skb)) {
2634 			__skb_queue_purge(seg_queue);
2635 			return PTR_ERR(skb);
2636 		}
2637 
2638 		__skb_queue_tail(seg_queue, skb);
2639 
2640 		len -= pdu_len;
2641 
2642 		if (sdu_len) {
2643 			sdu_len = 0;
2644 			pdu_len += L2CAP_SDULEN_SIZE;
2645 		}
2646 	}
2647 
2648 	return 0;
2649 }
2650 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2651 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2652 {
2653 	int sent = 0;
2654 
2655 	BT_DBG("chan %p", chan);
2656 
2657 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2658 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2659 		chan->tx_credits--;
2660 		sent++;
2661 	}
2662 
2663 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2664 	       skb_queue_len(&chan->tx_q));
2665 }
2666 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2667 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2668 {
2669 	struct sk_buff *skb;
2670 	int err;
2671 	struct sk_buff_head seg_queue;
2672 
2673 	if (!chan->conn)
2674 		return -ENOTCONN;
2675 
2676 	/* Connectionless channel */
2677 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2678 		skb = l2cap_create_connless_pdu(chan, msg, len);
2679 		if (IS_ERR(skb))
2680 			return PTR_ERR(skb);
2681 
2682 		l2cap_do_send(chan, skb);
2683 		return len;
2684 	}
2685 
2686 	switch (chan->mode) {
2687 	case L2CAP_MODE_LE_FLOWCTL:
2688 	case L2CAP_MODE_EXT_FLOWCTL:
2689 		/* Check outgoing MTU */
2690 		if (len > chan->omtu)
2691 			return -EMSGSIZE;
2692 
2693 		__skb_queue_head_init(&seg_queue);
2694 
2695 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2696 
2697 		if (chan->state != BT_CONNECTED) {
2698 			__skb_queue_purge(&seg_queue);
2699 			err = -ENOTCONN;
2700 		}
2701 
2702 		if (err)
2703 			return err;
2704 
2705 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2706 
2707 		l2cap_le_flowctl_send(chan);
2708 
2709 		if (!chan->tx_credits)
2710 			chan->ops->suspend(chan);
2711 
2712 		err = len;
2713 
2714 		break;
2715 
2716 	case L2CAP_MODE_BASIC:
2717 		/* Check outgoing MTU */
2718 		if (len > chan->omtu)
2719 			return -EMSGSIZE;
2720 
2721 		/* Create a basic PDU */
2722 		skb = l2cap_create_basic_pdu(chan, msg, len);
2723 		if (IS_ERR(skb))
2724 			return PTR_ERR(skb);
2725 
2726 		l2cap_do_send(chan, skb);
2727 		err = len;
2728 		break;
2729 
2730 	case L2CAP_MODE_ERTM:
2731 	case L2CAP_MODE_STREAMING:
2732 		/* Check outgoing MTU */
2733 		if (len > chan->omtu) {
2734 			err = -EMSGSIZE;
2735 			break;
2736 		}
2737 
2738 		__skb_queue_head_init(&seg_queue);
2739 
2740 		/* Do segmentation before calling in to the state machine,
2741 		 * since it's possible to block while waiting for memory
2742 		 * allocation.
2743 		 */
2744 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2745 
2746 		if (err)
2747 			break;
2748 
2749 		if (chan->mode == L2CAP_MODE_ERTM)
2750 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2751 		else
2752 			l2cap_streaming_send(chan, &seg_queue);
2753 
2754 		err = len;
2755 
2756 		/* If the skbs were not queued for sending, they'll still be in
2757 		 * seg_queue and need to be purged.
2758 		 */
2759 		__skb_queue_purge(&seg_queue);
2760 		break;
2761 
2762 	default:
2763 		BT_DBG("bad state %1.1x", chan->mode);
2764 		err = -EBADFD;
2765 	}
2766 
2767 	return err;
2768 }
2769 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2770 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2771 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2772 {
2773 	struct l2cap_ctrl control;
2774 	u16 seq;
2775 
2776 	BT_DBG("chan %p, txseq %u", chan, txseq);
2777 
2778 	memset(&control, 0, sizeof(control));
2779 	control.sframe = 1;
2780 	control.super = L2CAP_SUPER_SREJ;
2781 
2782 	for (seq = chan->expected_tx_seq; seq != txseq;
2783 	     seq = __next_seq(chan, seq)) {
2784 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2785 			control.reqseq = seq;
2786 			l2cap_send_sframe(chan, &control);
2787 			l2cap_seq_list_append(&chan->srej_list, seq);
2788 		}
2789 	}
2790 
2791 	chan->expected_tx_seq = __next_seq(chan, txseq);
2792 }
2793 
l2cap_send_srej_tail(struct l2cap_chan * chan)2794 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2795 {
2796 	struct l2cap_ctrl control;
2797 
2798 	BT_DBG("chan %p", chan);
2799 
2800 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2801 		return;
2802 
2803 	memset(&control, 0, sizeof(control));
2804 	control.sframe = 1;
2805 	control.super = L2CAP_SUPER_SREJ;
2806 	control.reqseq = chan->srej_list.tail;
2807 	l2cap_send_sframe(chan, &control);
2808 }
2809 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2810 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2811 {
2812 	struct l2cap_ctrl control;
2813 	u16 initial_head;
2814 	u16 seq;
2815 
2816 	BT_DBG("chan %p, txseq %u", chan, txseq);
2817 
2818 	memset(&control, 0, sizeof(control));
2819 	control.sframe = 1;
2820 	control.super = L2CAP_SUPER_SREJ;
2821 
2822 	/* Capture initial list head to allow only one pass through the list. */
2823 	initial_head = chan->srej_list.head;
2824 
2825 	do {
2826 		seq = l2cap_seq_list_pop(&chan->srej_list);
2827 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2828 			break;
2829 
2830 		control.reqseq = seq;
2831 		l2cap_send_sframe(chan, &control);
2832 		l2cap_seq_list_append(&chan->srej_list, seq);
2833 	} while (chan->srej_list.head != initial_head);
2834 }
2835 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2836 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2837 {
2838 	struct sk_buff *acked_skb;
2839 	u16 ackseq;
2840 
2841 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2842 
2843 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2844 		return;
2845 
2846 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2847 	       chan->expected_ack_seq, chan->unacked_frames);
2848 
2849 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2850 	     ackseq = __next_seq(chan, ackseq)) {
2851 
2852 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2853 		if (acked_skb) {
2854 			skb_unlink(acked_skb, &chan->tx_q);
2855 			kfree_skb(acked_skb);
2856 			chan->unacked_frames--;
2857 		}
2858 	}
2859 
2860 	chan->expected_ack_seq = reqseq;
2861 
2862 	if (chan->unacked_frames == 0)
2863 		__clear_retrans_timer(chan);
2864 
2865 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2866 }
2867 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2868 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2869 {
2870 	BT_DBG("chan %p", chan);
2871 
2872 	chan->expected_tx_seq = chan->buffer_seq;
2873 	l2cap_seq_list_clear(&chan->srej_list);
2874 	skb_queue_purge(&chan->srej_q);
2875 	chan->rx_state = L2CAP_RX_STATE_RECV;
2876 }
2877 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2878 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2879 				struct l2cap_ctrl *control,
2880 				struct sk_buff_head *skbs, u8 event)
2881 {
2882 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2883 	       event);
2884 
2885 	switch (event) {
2886 	case L2CAP_EV_DATA_REQUEST:
2887 		if (chan->tx_send_head == NULL)
2888 			chan->tx_send_head = skb_peek(skbs);
2889 
2890 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2891 		l2cap_ertm_send(chan);
2892 		break;
2893 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2894 		BT_DBG("Enter LOCAL_BUSY");
2895 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2896 
2897 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2898 			/* The SREJ_SENT state must be aborted if we are to
2899 			 * enter the LOCAL_BUSY state.
2900 			 */
2901 			l2cap_abort_rx_srej_sent(chan);
2902 		}
2903 
2904 		l2cap_send_ack(chan);
2905 
2906 		break;
2907 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2908 		BT_DBG("Exit LOCAL_BUSY");
2909 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2910 
2911 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2912 			struct l2cap_ctrl local_control;
2913 
2914 			memset(&local_control, 0, sizeof(local_control));
2915 			local_control.sframe = 1;
2916 			local_control.super = L2CAP_SUPER_RR;
2917 			local_control.poll = 1;
2918 			local_control.reqseq = chan->buffer_seq;
2919 			l2cap_send_sframe(chan, &local_control);
2920 
2921 			chan->retry_count = 1;
2922 			__set_monitor_timer(chan);
2923 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 		}
2925 		break;
2926 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2927 		l2cap_process_reqseq(chan, control->reqseq);
2928 		break;
2929 	case L2CAP_EV_EXPLICIT_POLL:
2930 		l2cap_send_rr_or_rnr(chan, 1);
2931 		chan->retry_count = 1;
2932 		__set_monitor_timer(chan);
2933 		__clear_ack_timer(chan);
2934 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2935 		break;
2936 	case L2CAP_EV_RETRANS_TO:
2937 		l2cap_send_rr_or_rnr(chan, 1);
2938 		chan->retry_count = 1;
2939 		__set_monitor_timer(chan);
2940 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2941 		break;
2942 	case L2CAP_EV_RECV_FBIT:
2943 		/* Nothing to process */
2944 		break;
2945 	default:
2946 		break;
2947 	}
2948 }
2949 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2950 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2951 				  struct l2cap_ctrl *control,
2952 				  struct sk_buff_head *skbs, u8 event)
2953 {
2954 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2955 	       event);
2956 
2957 	switch (event) {
2958 	case L2CAP_EV_DATA_REQUEST:
2959 		if (chan->tx_send_head == NULL)
2960 			chan->tx_send_head = skb_peek(skbs);
2961 		/* Queue data, but don't send. */
2962 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2963 		break;
2964 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2965 		BT_DBG("Enter LOCAL_BUSY");
2966 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2967 
2968 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2969 			/* The SREJ_SENT state must be aborted if we are to
2970 			 * enter the LOCAL_BUSY state.
2971 			 */
2972 			l2cap_abort_rx_srej_sent(chan);
2973 		}
2974 
2975 		l2cap_send_ack(chan);
2976 
2977 		break;
2978 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2979 		BT_DBG("Exit LOCAL_BUSY");
2980 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2981 
2982 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2983 			struct l2cap_ctrl local_control;
2984 			memset(&local_control, 0, sizeof(local_control));
2985 			local_control.sframe = 1;
2986 			local_control.super = L2CAP_SUPER_RR;
2987 			local_control.poll = 1;
2988 			local_control.reqseq = chan->buffer_seq;
2989 			l2cap_send_sframe(chan, &local_control);
2990 
2991 			chan->retry_count = 1;
2992 			__set_monitor_timer(chan);
2993 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2994 		}
2995 		break;
2996 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2997 		l2cap_process_reqseq(chan, control->reqseq);
2998 		fallthrough;
2999 
3000 	case L2CAP_EV_RECV_FBIT:
3001 		if (control && control->final) {
3002 			__clear_monitor_timer(chan);
3003 			if (chan->unacked_frames > 0)
3004 				__set_retrans_timer(chan);
3005 			chan->retry_count = 0;
3006 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3007 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3008 		}
3009 		break;
3010 	case L2CAP_EV_EXPLICIT_POLL:
3011 		/* Ignore */
3012 		break;
3013 	case L2CAP_EV_MONITOR_TO:
3014 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3015 			l2cap_send_rr_or_rnr(chan, 1);
3016 			__set_monitor_timer(chan);
3017 			chan->retry_count++;
3018 		} else {
3019 			l2cap_send_disconn_req(chan, ECONNABORTED);
3020 		}
3021 		break;
3022 	default:
3023 		break;
3024 	}
3025 }
3026 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3027 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3028 		     struct sk_buff_head *skbs, u8 event)
3029 {
3030 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3031 	       chan, control, skbs, event, chan->tx_state);
3032 
3033 	switch (chan->tx_state) {
3034 	case L2CAP_TX_STATE_XMIT:
3035 		l2cap_tx_state_xmit(chan, control, skbs, event);
3036 		break;
3037 	case L2CAP_TX_STATE_WAIT_F:
3038 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3039 		break;
3040 	default:
3041 		/* Ignore event */
3042 		break;
3043 	}
3044 }
3045 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3046 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3047 			     struct l2cap_ctrl *control)
3048 {
3049 	BT_DBG("chan %p, control %p", chan, control);
3050 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3051 }
3052 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3053 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3054 				  struct l2cap_ctrl *control)
3055 {
3056 	BT_DBG("chan %p, control %p", chan, control);
3057 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3058 }
3059 
3060 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3061 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3062 {
3063 	struct sk_buff *nskb;
3064 	struct l2cap_chan *chan;
3065 
3066 	BT_DBG("conn %p", conn);
3067 
3068 	mutex_lock(&conn->chan_lock);
3069 
3070 	list_for_each_entry(chan, &conn->chan_l, list) {
3071 		if (chan->chan_type != L2CAP_CHAN_RAW)
3072 			continue;
3073 
3074 		/* Don't send frame to the channel it came from */
3075 		if (bt_cb(skb)->l2cap.chan == chan)
3076 			continue;
3077 
3078 		nskb = skb_clone(skb, GFP_KERNEL);
3079 		if (!nskb)
3080 			continue;
3081 		if (chan->ops->recv(chan, nskb))
3082 			kfree_skb(nskb);
3083 	}
3084 
3085 	mutex_unlock(&conn->chan_lock);
3086 }
3087 
3088 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3089 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3090 				       u8 ident, u16 dlen, void *data)
3091 {
3092 	struct sk_buff *skb, **frag;
3093 	struct l2cap_cmd_hdr *cmd;
3094 	struct l2cap_hdr *lh;
3095 	int len, count;
3096 
3097 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3098 	       conn, code, ident, dlen);
3099 
3100 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3101 		return NULL;
3102 
3103 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3104 	count = min_t(unsigned int, conn->mtu, len);
3105 
3106 	skb = bt_skb_alloc(count, GFP_KERNEL);
3107 	if (!skb)
3108 		return NULL;
3109 
3110 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3111 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3112 
3113 	if (conn->hcon->type == LE_LINK)
3114 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3115 	else
3116 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3117 
3118 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3119 	cmd->code  = code;
3120 	cmd->ident = ident;
3121 	cmd->len   = cpu_to_le16(dlen);
3122 
3123 	if (dlen) {
3124 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3125 		skb_put_data(skb, data, count);
3126 		data += count;
3127 	}
3128 
3129 	len -= skb->len;
3130 
3131 	/* Continuation fragments (no L2CAP header) */
3132 	frag = &skb_shinfo(skb)->frag_list;
3133 	while (len) {
3134 		count = min_t(unsigned int, conn->mtu, len);
3135 
3136 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3137 		if (!*frag)
3138 			goto fail;
3139 
3140 		skb_put_data(*frag, data, count);
3141 
3142 		len  -= count;
3143 		data += count;
3144 
3145 		frag = &(*frag)->next;
3146 	}
3147 
3148 	return skb;
3149 
3150 fail:
3151 	kfree_skb(skb);
3152 	return NULL;
3153 }
3154 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3155 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3156 				     unsigned long *val)
3157 {
3158 	struct l2cap_conf_opt *opt = *ptr;
3159 	int len;
3160 
3161 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3162 	*ptr += len;
3163 
3164 	*type = opt->type;
3165 	*olen = opt->len;
3166 
3167 	switch (opt->len) {
3168 	case 1:
3169 		*val = *((u8 *) opt->val);
3170 		break;
3171 
3172 	case 2:
3173 		*val = get_unaligned_le16(opt->val);
3174 		break;
3175 
3176 	case 4:
3177 		*val = get_unaligned_le32(opt->val);
3178 		break;
3179 
3180 	default:
3181 		*val = (unsigned long) opt->val;
3182 		break;
3183 	}
3184 
3185 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3186 	return len;
3187 }
3188 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3189 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3190 {
3191 	struct l2cap_conf_opt *opt = *ptr;
3192 
3193 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3194 
3195 	if (size < L2CAP_CONF_OPT_SIZE + len)
3196 		return;
3197 
3198 	opt->type = type;
3199 	opt->len  = len;
3200 
3201 	switch (len) {
3202 	case 1:
3203 		*((u8 *) opt->val)  = val;
3204 		break;
3205 
3206 	case 2:
3207 		put_unaligned_le16(val, opt->val);
3208 		break;
3209 
3210 	case 4:
3211 		put_unaligned_le32(val, opt->val);
3212 		break;
3213 
3214 	default:
3215 		memcpy(opt->val, (void *) val, len);
3216 		break;
3217 	}
3218 
3219 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3220 }
3221 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3222 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3223 {
3224 	struct l2cap_conf_efs efs;
3225 
3226 	switch (chan->mode) {
3227 	case L2CAP_MODE_ERTM:
3228 		efs.id		= chan->local_id;
3229 		efs.stype	= chan->local_stype;
3230 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3231 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3232 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3233 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3234 		break;
3235 
3236 	case L2CAP_MODE_STREAMING:
3237 		efs.id		= 1;
3238 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3239 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3240 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3241 		efs.acc_lat	= 0;
3242 		efs.flush_to	= 0;
3243 		break;
3244 
3245 	default:
3246 		return;
3247 	}
3248 
3249 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3250 			   (unsigned long) &efs, size);
3251 }
3252 
l2cap_ack_timeout(struct work_struct * work)3253 static void l2cap_ack_timeout(struct work_struct *work)
3254 {
3255 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3256 					       ack_timer.work);
3257 	u16 frames_to_ack;
3258 
3259 	BT_DBG("chan %p", chan);
3260 
3261 	l2cap_chan_lock(chan);
3262 
3263 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3264 				     chan->last_acked_seq);
3265 
3266 	if (frames_to_ack)
3267 		l2cap_send_rr_or_rnr(chan, 0);
3268 
3269 	l2cap_chan_unlock(chan);
3270 	l2cap_chan_put(chan);
3271 }
3272 
l2cap_ertm_init(struct l2cap_chan * chan)3273 int l2cap_ertm_init(struct l2cap_chan *chan)
3274 {
3275 	int err;
3276 
3277 	chan->next_tx_seq = 0;
3278 	chan->expected_tx_seq = 0;
3279 	chan->expected_ack_seq = 0;
3280 	chan->unacked_frames = 0;
3281 	chan->buffer_seq = 0;
3282 	chan->frames_sent = 0;
3283 	chan->last_acked_seq = 0;
3284 	chan->sdu = NULL;
3285 	chan->sdu_last_frag = NULL;
3286 	chan->sdu_len = 0;
3287 
3288 	skb_queue_head_init(&chan->tx_q);
3289 
3290 	chan->local_amp_id = AMP_ID_BREDR;
3291 	chan->move_id = AMP_ID_BREDR;
3292 	chan->move_state = L2CAP_MOVE_STABLE;
3293 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3294 
3295 	if (chan->mode != L2CAP_MODE_ERTM)
3296 		return 0;
3297 
3298 	chan->rx_state = L2CAP_RX_STATE_RECV;
3299 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3300 
3301 	skb_queue_head_init(&chan->srej_q);
3302 
3303 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3304 	if (err < 0)
3305 		return err;
3306 
3307 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3308 	if (err < 0)
3309 		l2cap_seq_list_free(&chan->srej_list);
3310 
3311 	return err;
3312 }
3313 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3314 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3315 {
3316 	switch (mode) {
3317 	case L2CAP_MODE_STREAMING:
3318 	case L2CAP_MODE_ERTM:
3319 		if (l2cap_mode_supported(mode, remote_feat_mask))
3320 			return mode;
3321 		fallthrough;
3322 	default:
3323 		return L2CAP_MODE_BASIC;
3324 	}
3325 }
3326 
__l2cap_ews_supported(struct l2cap_conn * conn)3327 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3328 {
3329 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3330 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3331 }
3332 
__l2cap_efs_supported(struct l2cap_conn * conn)3333 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3334 {
3335 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3336 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3337 }
3338 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3339 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3340 				      struct l2cap_conf_rfc *rfc)
3341 {
3342 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3343 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3344 
3345 		/* Class 1 devices have must have ERTM timeouts
3346 		 * exceeding the Link Supervision Timeout.  The
3347 		 * default Link Supervision Timeout for AMP
3348 		 * controllers is 10 seconds.
3349 		 *
3350 		 * Class 1 devices use 0xffffffff for their
3351 		 * best-effort flush timeout, so the clamping logic
3352 		 * will result in a timeout that meets the above
3353 		 * requirement.  ERTM timeouts are 16-bit values, so
3354 		 * the maximum timeout is 65.535 seconds.
3355 		 */
3356 
3357 		/* Convert timeout to milliseconds and round */
3358 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3359 
3360 		/* This is the recommended formula for class 2 devices
3361 		 * that start ERTM timers when packets are sent to the
3362 		 * controller.
3363 		 */
3364 		ertm_to = 3 * ertm_to + 500;
3365 
3366 		if (ertm_to > 0xffff)
3367 			ertm_to = 0xffff;
3368 
3369 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3370 		rfc->monitor_timeout = rfc->retrans_timeout;
3371 	} else {
3372 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3373 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3374 	}
3375 }
3376 
l2cap_txwin_setup(struct l2cap_chan * chan)3377 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3378 {
3379 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3380 	    __l2cap_ews_supported(chan->conn)) {
3381 		/* use extended control field */
3382 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3383 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3384 	} else {
3385 		chan->tx_win = min_t(u16, chan->tx_win,
3386 				     L2CAP_DEFAULT_TX_WINDOW);
3387 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3388 	}
3389 	chan->ack_win = chan->tx_win;
3390 }
3391 
l2cap_mtu_auto(struct l2cap_chan * chan)3392 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3393 {
3394 	struct hci_conn *conn = chan->conn->hcon;
3395 
3396 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3397 
3398 	/* The 2-DH1 packet has between 2 and 56 information bytes
3399 	 * (including the 2-byte payload header)
3400 	 */
3401 	if (!(conn->pkt_type & HCI_2DH1))
3402 		chan->imtu = 54;
3403 
3404 	/* The 3-DH1 packet has between 2 and 85 information bytes
3405 	 * (including the 2-byte payload header)
3406 	 */
3407 	if (!(conn->pkt_type & HCI_3DH1))
3408 		chan->imtu = 83;
3409 
3410 	/* The 2-DH3 packet has between 2 and 369 information bytes
3411 	 * (including the 2-byte payload header)
3412 	 */
3413 	if (!(conn->pkt_type & HCI_2DH3))
3414 		chan->imtu = 367;
3415 
3416 	/* The 3-DH3 packet has between 2 and 554 information bytes
3417 	 * (including the 2-byte payload header)
3418 	 */
3419 	if (!(conn->pkt_type & HCI_3DH3))
3420 		chan->imtu = 552;
3421 
3422 	/* The 2-DH5 packet has between 2 and 681 information bytes
3423 	 * (including the 2-byte payload header)
3424 	 */
3425 	if (!(conn->pkt_type & HCI_2DH5))
3426 		chan->imtu = 679;
3427 
3428 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3429 	 * (including the 2-byte payload header)
3430 	 */
3431 	if (!(conn->pkt_type & HCI_3DH5))
3432 		chan->imtu = 1021;
3433 }
3434 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3435 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3436 {
3437 	struct l2cap_conf_req *req = data;
3438 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3439 	void *ptr = req->data;
3440 	void *endptr = data + data_size;
3441 	u16 size;
3442 
3443 	BT_DBG("chan %p", chan);
3444 
3445 	if (chan->num_conf_req || chan->num_conf_rsp)
3446 		goto done;
3447 
3448 	switch (chan->mode) {
3449 	case L2CAP_MODE_STREAMING:
3450 	case L2CAP_MODE_ERTM:
3451 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3452 			break;
3453 
3454 		if (__l2cap_efs_supported(chan->conn))
3455 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3456 
3457 		fallthrough;
3458 	default:
3459 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3460 		break;
3461 	}
3462 
3463 done:
3464 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3465 		if (!chan->imtu)
3466 			l2cap_mtu_auto(chan);
3467 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3468 				   endptr - ptr);
3469 	}
3470 
3471 	switch (chan->mode) {
3472 	case L2CAP_MODE_BASIC:
3473 		if (disable_ertm)
3474 			break;
3475 
3476 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3477 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3478 			break;
3479 
3480 		rfc.mode            = L2CAP_MODE_BASIC;
3481 		rfc.txwin_size      = 0;
3482 		rfc.max_transmit    = 0;
3483 		rfc.retrans_timeout = 0;
3484 		rfc.monitor_timeout = 0;
3485 		rfc.max_pdu_size    = 0;
3486 
3487 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3488 				   (unsigned long) &rfc, endptr - ptr);
3489 		break;
3490 
3491 	case L2CAP_MODE_ERTM:
3492 		rfc.mode            = L2CAP_MODE_ERTM;
3493 		rfc.max_transmit    = chan->max_tx;
3494 
3495 		__l2cap_set_ertm_timeouts(chan, &rfc);
3496 
3497 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3498 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3499 			     L2CAP_FCS_SIZE);
3500 		rfc.max_pdu_size = cpu_to_le16(size);
3501 
3502 		l2cap_txwin_setup(chan);
3503 
3504 		rfc.txwin_size = min_t(u16, chan->tx_win,
3505 				       L2CAP_DEFAULT_TX_WINDOW);
3506 
3507 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3508 				   (unsigned long) &rfc, endptr - ptr);
3509 
3510 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3511 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3512 
3513 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3514 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3515 					   chan->tx_win, endptr - ptr);
3516 
3517 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3518 			if (chan->fcs == L2CAP_FCS_NONE ||
3519 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3520 				chan->fcs = L2CAP_FCS_NONE;
3521 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3522 						   chan->fcs, endptr - ptr);
3523 			}
3524 		break;
3525 
3526 	case L2CAP_MODE_STREAMING:
3527 		l2cap_txwin_setup(chan);
3528 		rfc.mode            = L2CAP_MODE_STREAMING;
3529 		rfc.txwin_size      = 0;
3530 		rfc.max_transmit    = 0;
3531 		rfc.retrans_timeout = 0;
3532 		rfc.monitor_timeout = 0;
3533 
3534 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3535 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3536 			     L2CAP_FCS_SIZE);
3537 		rfc.max_pdu_size = cpu_to_le16(size);
3538 
3539 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3540 				   (unsigned long) &rfc, endptr - ptr);
3541 
3542 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3543 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 	}
3554 
3555 	req->dcid  = cpu_to_le16(chan->dcid);
3556 	req->flags = cpu_to_le16(0);
3557 
3558 	return ptr - data;
3559 }
3560 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3561 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3562 {
3563 	struct l2cap_conf_rsp *rsp = data;
3564 	void *ptr = rsp->data;
3565 	void *endptr = data + data_size;
3566 	void *req = chan->conf_req;
3567 	int len = chan->conf_len;
3568 	int type, hint, olen;
3569 	unsigned long val;
3570 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3571 	struct l2cap_conf_efs efs;
3572 	u8 remote_efs = 0;
3573 	u16 mtu = L2CAP_DEFAULT_MTU;
3574 	u16 result = L2CAP_CONF_SUCCESS;
3575 	u16 size;
3576 
3577 	BT_DBG("chan %p", chan);
3578 
3579 	while (len >= L2CAP_CONF_OPT_SIZE) {
3580 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3581 		if (len < 0)
3582 			break;
3583 
3584 		hint  = type & L2CAP_CONF_HINT;
3585 		type &= L2CAP_CONF_MASK;
3586 
3587 		switch (type) {
3588 		case L2CAP_CONF_MTU:
3589 			if (olen != 2)
3590 				break;
3591 			mtu = val;
3592 			break;
3593 
3594 		case L2CAP_CONF_FLUSH_TO:
3595 			if (olen != 2)
3596 				break;
3597 			chan->flush_to = val;
3598 			break;
3599 
3600 		case L2CAP_CONF_QOS:
3601 			break;
3602 
3603 		case L2CAP_CONF_RFC:
3604 			if (olen != sizeof(rfc))
3605 				break;
3606 			memcpy(&rfc, (void *) val, olen);
3607 			break;
3608 
3609 		case L2CAP_CONF_FCS:
3610 			if (olen != 1)
3611 				break;
3612 			if (val == L2CAP_FCS_NONE)
3613 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3614 			break;
3615 
3616 		case L2CAP_CONF_EFS:
3617 			if (olen != sizeof(efs))
3618 				break;
3619 			remote_efs = 1;
3620 			memcpy(&efs, (void *) val, olen);
3621 			break;
3622 
3623 		case L2CAP_CONF_EWS:
3624 			if (olen != 2)
3625 				break;
3626 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3627 				return -ECONNREFUSED;
3628 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3629 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3630 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3631 			chan->remote_tx_win = val;
3632 			break;
3633 
3634 		default:
3635 			if (hint)
3636 				break;
3637 			result = L2CAP_CONF_UNKNOWN;
3638 			*((u8 *) ptr++) = type;
3639 			break;
3640 		}
3641 	}
3642 
3643 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3644 		goto done;
3645 
3646 	switch (chan->mode) {
3647 	case L2CAP_MODE_STREAMING:
3648 	case L2CAP_MODE_ERTM:
3649 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3650 			chan->mode = l2cap_select_mode(rfc.mode,
3651 						       chan->conn->feat_mask);
3652 			break;
3653 		}
3654 
3655 		if (remote_efs) {
3656 			if (__l2cap_efs_supported(chan->conn))
3657 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3658 			else
3659 				return -ECONNREFUSED;
3660 		}
3661 
3662 		if (chan->mode != rfc.mode)
3663 			return -ECONNREFUSED;
3664 
3665 		break;
3666 	}
3667 
3668 done:
3669 	if (chan->mode != rfc.mode) {
3670 		result = L2CAP_CONF_UNACCEPT;
3671 		rfc.mode = chan->mode;
3672 
3673 		if (chan->num_conf_rsp == 1)
3674 			return -ECONNREFUSED;
3675 
3676 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3677 				   (unsigned long) &rfc, endptr - ptr);
3678 	}
3679 
3680 	if (result == L2CAP_CONF_SUCCESS) {
3681 		/* Configure output options and let the other side know
3682 		 * which ones we don't like. */
3683 
3684 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3685 			result = L2CAP_CONF_UNACCEPT;
3686 		else {
3687 			chan->omtu = mtu;
3688 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3689 		}
3690 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3691 
3692 		if (remote_efs) {
3693 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3694 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3695 			    efs.stype != chan->local_stype) {
3696 
3697 				result = L2CAP_CONF_UNACCEPT;
3698 
3699 				if (chan->num_conf_req >= 1)
3700 					return -ECONNREFUSED;
3701 
3702 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3703 						   sizeof(efs),
3704 						   (unsigned long) &efs, endptr - ptr);
3705 			} else {
3706 				/* Send PENDING Conf Rsp */
3707 				result = L2CAP_CONF_PENDING;
3708 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3709 			}
3710 		}
3711 
3712 		switch (rfc.mode) {
3713 		case L2CAP_MODE_BASIC:
3714 			chan->fcs = L2CAP_FCS_NONE;
3715 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3716 			break;
3717 
3718 		case L2CAP_MODE_ERTM:
3719 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3720 				chan->remote_tx_win = rfc.txwin_size;
3721 			else
3722 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3723 
3724 			chan->remote_max_tx = rfc.max_transmit;
3725 
3726 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3727 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3728 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3729 			rfc.max_pdu_size = cpu_to_le16(size);
3730 			chan->remote_mps = size;
3731 
3732 			__l2cap_set_ertm_timeouts(chan, &rfc);
3733 
3734 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3735 
3736 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3737 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3738 
3739 			if (remote_efs &&
3740 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 				chan->remote_id = efs.id;
3742 				chan->remote_stype = efs.stype;
3743 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 				chan->remote_flush_to =
3745 					le32_to_cpu(efs.flush_to);
3746 				chan->remote_acc_lat =
3747 					le32_to_cpu(efs.acc_lat);
3748 				chan->remote_sdu_itime =
3749 					le32_to_cpu(efs.sdu_itime);
3750 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3751 						   sizeof(efs),
3752 						   (unsigned long) &efs, endptr - ptr);
3753 			}
3754 			break;
3755 
3756 		case L2CAP_MODE_STREAMING:
3757 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 			rfc.max_pdu_size = cpu_to_le16(size);
3761 			chan->remote_mps = size;
3762 
3763 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3764 
3765 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 					   (unsigned long) &rfc, endptr - ptr);
3767 
3768 			break;
3769 
3770 		default:
3771 			result = L2CAP_CONF_UNACCEPT;
3772 
3773 			memset(&rfc, 0, sizeof(rfc));
3774 			rfc.mode = chan->mode;
3775 		}
3776 
3777 		if (result == L2CAP_CONF_SUCCESS)
3778 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3779 	}
3780 	rsp->scid   = cpu_to_le16(chan->dcid);
3781 	rsp->result = cpu_to_le16(result);
3782 	rsp->flags  = cpu_to_le16(0);
3783 
3784 	return ptr - data;
3785 }
3786 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 				void *data, size_t size, u16 *result)
3789 {
3790 	struct l2cap_conf_req *req = data;
3791 	void *ptr = req->data;
3792 	void *endptr = data + size;
3793 	int type, olen;
3794 	unsigned long val;
3795 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 	struct l2cap_conf_efs efs;
3797 
3798 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3799 
3800 	while (len >= L2CAP_CONF_OPT_SIZE) {
3801 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3802 		if (len < 0)
3803 			break;
3804 
3805 		switch (type) {
3806 		case L2CAP_CONF_MTU:
3807 			if (olen != 2)
3808 				break;
3809 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 				*result = L2CAP_CONF_UNACCEPT;
3811 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3812 			} else
3813 				chan->imtu = val;
3814 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3815 					   endptr - ptr);
3816 			break;
3817 
3818 		case L2CAP_CONF_FLUSH_TO:
3819 			if (olen != 2)
3820 				break;
3821 			chan->flush_to = val;
3822 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 					   chan->flush_to, endptr - ptr);
3824 			break;
3825 
3826 		case L2CAP_CONF_RFC:
3827 			if (olen != sizeof(rfc))
3828 				break;
3829 			memcpy(&rfc, (void *)val, olen);
3830 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 			    rfc.mode != chan->mode)
3832 				return -ECONNREFUSED;
3833 			chan->fcs = 0;
3834 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 					   (unsigned long) &rfc, endptr - ptr);
3836 			break;
3837 
3838 		case L2CAP_CONF_EWS:
3839 			if (olen != 2)
3840 				break;
3841 			chan->ack_win = min_t(u16, val, chan->ack_win);
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 					   chan->tx_win, endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_EFS:
3847 			if (olen != sizeof(efs))
3848 				break;
3849 			memcpy(&efs, (void *)val, olen);
3850 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 			    efs.stype != chan->local_stype)
3853 				return -ECONNREFUSED;
3854 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 					   (unsigned long) &efs, endptr - ptr);
3856 			break;
3857 
3858 		case L2CAP_CONF_FCS:
3859 			if (olen != 1)
3860 				break;
3861 			if (*result == L2CAP_CONF_PENDING)
3862 				if (val == L2CAP_FCS_NONE)
3863 					set_bit(CONF_RECV_NO_FCS,
3864 						&chan->conf_state);
3865 			break;
3866 		}
3867 	}
3868 
3869 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 		return -ECONNREFUSED;
3871 
3872 	chan->mode = rfc.mode;
3873 
3874 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3875 		switch (rfc.mode) {
3876 		case L2CAP_MODE_ERTM:
3877 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 				chan->ack_win = min_t(u16, chan->ack_win,
3882 						      rfc.txwin_size);
3883 
3884 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 				chan->local_msdu = le16_to_cpu(efs.msdu);
3886 				chan->local_sdu_itime =
3887 					le32_to_cpu(efs.sdu_itime);
3888 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 				chan->local_flush_to =
3890 					le32_to_cpu(efs.flush_to);
3891 			}
3892 			break;
3893 
3894 		case L2CAP_MODE_STREAMING:
3895 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3896 		}
3897 	}
3898 
3899 	req->dcid   = cpu_to_le16(chan->dcid);
3900 	req->flags  = cpu_to_le16(0);
3901 
3902 	return ptr - data;
3903 }
3904 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 				u16 result, u16 flags)
3907 {
3908 	struct l2cap_conf_rsp *rsp = data;
3909 	void *ptr = rsp->data;
3910 
3911 	BT_DBG("chan %p", chan);
3912 
3913 	rsp->scid   = cpu_to_le16(chan->dcid);
3914 	rsp->result = cpu_to_le16(result);
3915 	rsp->flags  = cpu_to_le16(flags);
3916 
3917 	return ptr - data;
3918 }
3919 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3921 {
3922 	struct l2cap_le_conn_rsp rsp;
3923 	struct l2cap_conn *conn = chan->conn;
3924 
3925 	BT_DBG("chan %p", chan);
3926 
3927 	rsp.dcid    = cpu_to_le16(chan->scid);
3928 	rsp.mtu     = cpu_to_le16(chan->imtu);
3929 	rsp.mps     = cpu_to_le16(chan->mps);
3930 	rsp.credits = cpu_to_le16(chan->rx_credits);
3931 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3932 
3933 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3934 		       &rsp);
3935 }
3936 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3937 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3938 {
3939 	int *result = data;
3940 
3941 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3942 		return;
3943 
3944 	switch (chan->state) {
3945 	case BT_CONNECT2:
3946 		/* If channel still pending accept add to result */
3947 		(*result)++;
3948 		return;
3949 	case BT_CONNECTED:
3950 		return;
3951 	default:
3952 		/* If not connected or pending accept it has been refused */
3953 		*result = -ECONNREFUSED;
3954 		return;
3955 	}
3956 }
3957 
3958 struct l2cap_ecred_rsp_data {
3959 	struct {
3960 		struct l2cap_ecred_conn_rsp rsp;
3961 		__le16 scid[L2CAP_ECRED_MAX_CID];
3962 	} __packed pdu;
3963 	int count;
3964 };
3965 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3966 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3967 {
3968 	struct l2cap_ecred_rsp_data *rsp = data;
3969 
3970 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3971 		return;
3972 
3973 	/* Reset ident so only one response is sent */
3974 	chan->ident = 0;
3975 
3976 	/* Include all channels pending with the same ident */
3977 	if (!rsp->pdu.rsp.result)
3978 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3979 	else
3980 		l2cap_chan_del(chan, ECONNRESET);
3981 }
3982 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3983 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3984 {
3985 	struct l2cap_conn *conn = chan->conn;
3986 	struct l2cap_ecred_rsp_data data;
3987 	u16 id = chan->ident;
3988 	int result = 0;
3989 
3990 	if (!id)
3991 		return;
3992 
3993 	BT_DBG("chan %p id %d", chan, id);
3994 
3995 	memset(&data, 0, sizeof(data));
3996 
3997 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3998 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3999 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4000 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4001 
4002 	/* Verify that all channels are ready */
4003 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4004 
4005 	if (result > 0)
4006 		return;
4007 
4008 	if (result < 0)
4009 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4010 
4011 	/* Build response */
4012 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4013 
4014 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4015 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4016 		       &data.pdu);
4017 }
4018 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4020 {
4021 	struct l2cap_conn_rsp rsp;
4022 	struct l2cap_conn *conn = chan->conn;
4023 	u8 buf[128];
4024 	u8 rsp_code;
4025 
4026 	rsp.scid   = cpu_to_le16(chan->dcid);
4027 	rsp.dcid   = cpu_to_le16(chan->scid);
4028 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4030 
4031 	if (chan->hs_hcon)
4032 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4033 	else
4034 		rsp_code = L2CAP_CONN_RSP;
4035 
4036 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4037 
4038 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4039 
4040 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4041 		return;
4042 
4043 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 	chan->num_conf_req++;
4046 }
4047 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4049 {
4050 	int type, olen;
4051 	unsigned long val;
4052 	/* Use sane default values in case a misbehaving remote device
4053 	 * did not send an RFC or extended window size option.
4054 	 */
4055 	u16 txwin_ext = chan->ack_win;
4056 	struct l2cap_conf_rfc rfc = {
4057 		.mode = chan->mode,
4058 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 		.max_pdu_size = cpu_to_le16(chan->imtu),
4061 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4062 	};
4063 
4064 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4065 
4066 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4067 		return;
4068 
4069 	while (len >= L2CAP_CONF_OPT_SIZE) {
4070 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4071 		if (len < 0)
4072 			break;
4073 
4074 		switch (type) {
4075 		case L2CAP_CONF_RFC:
4076 			if (olen != sizeof(rfc))
4077 				break;
4078 			memcpy(&rfc, (void *)val, olen);
4079 			break;
4080 		case L2CAP_CONF_EWS:
4081 			if (olen != 2)
4082 				break;
4083 			txwin_ext = val;
4084 			break;
4085 		}
4086 	}
4087 
4088 	switch (rfc.mode) {
4089 	case L2CAP_MODE_ERTM:
4090 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4095 		else
4096 			chan->ack_win = min_t(u16, chan->ack_win,
4097 					      rfc.txwin_size);
4098 		break;
4099 	case L2CAP_MODE_STREAMING:
4100 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4101 	}
4102 }
4103 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 				    u8 *data)
4107 {
4108 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4109 
4110 	if (cmd_len < sizeof(*rej))
4111 		return -EPROTO;
4112 
4113 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4114 		return 0;
4115 
4116 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 	    cmd->ident == conn->info_ident) {
4118 		cancel_delayed_work(&conn->info_timer);
4119 
4120 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 		conn->info_ident = 0;
4122 
4123 		l2cap_conn_start(conn);
4124 	}
4125 
4126 	return 0;
4127 }
4128 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 					struct l2cap_cmd_hdr *cmd,
4131 					u8 *data, u8 rsp_code, u8 amp_id)
4132 {
4133 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 	struct l2cap_conn_rsp rsp;
4135 	struct l2cap_chan *chan = NULL, *pchan;
4136 	int result, status = L2CAP_CS_NO_INFO;
4137 
4138 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 	__le16 psm = req->psm;
4140 
4141 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4142 
4143 	/* Check if we have socket listening on psm */
4144 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 					 &conn->hcon->dst, ACL_LINK);
4146 	if (!pchan) {
4147 		result = L2CAP_CR_BAD_PSM;
4148 		goto sendresp;
4149 	}
4150 
4151 	mutex_lock(&conn->chan_lock);
4152 	l2cap_chan_lock(pchan);
4153 
4154 	/* Check if the ACL is secure enough (if not SDP) */
4155 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 	    !hci_conn_check_link_mode(conn->hcon)) {
4157 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 		result = L2CAP_CR_SEC_BLOCK;
4159 		goto response;
4160 	}
4161 
4162 	result = L2CAP_CR_NO_MEM;
4163 
4164 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4165 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 		result = L2CAP_CR_INVALID_SCID;
4167 		goto response;
4168 	}
4169 
4170 	/* Check if we already have channel with that dcid */
4171 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 		result = L2CAP_CR_SCID_IN_USE;
4173 		goto response;
4174 	}
4175 
4176 	chan = pchan->ops->new_connection(pchan);
4177 	if (!chan)
4178 		goto response;
4179 
4180 	/* For certain devices (ex: HID mouse), support for authentication,
4181 	 * pairing and bonding is optional. For such devices, inorder to avoid
4182 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4184 	 */
4185 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4186 
4187 	bacpy(&chan->src, &conn->hcon->src);
4188 	bacpy(&chan->dst, &conn->hcon->dst);
4189 	chan->src_type = bdaddr_src_type(conn->hcon);
4190 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4191 	chan->psm  = psm;
4192 	chan->dcid = scid;
4193 	chan->local_amp_id = amp_id;
4194 
4195 	__l2cap_chan_add(conn, chan);
4196 
4197 	dcid = chan->scid;
4198 
4199 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4200 
4201 	chan->ident = cmd->ident;
4202 
4203 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 		if (l2cap_chan_check_security(chan, false)) {
4205 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 				l2cap_state_change(chan, BT_CONNECT2);
4207 				result = L2CAP_CR_PEND;
4208 				status = L2CAP_CS_AUTHOR_PEND;
4209 				chan->ops->defer(chan);
4210 			} else {
4211 				/* Force pending result for AMP controllers.
4212 				 * The connection will succeed after the
4213 				 * physical link is up.
4214 				 */
4215 				if (amp_id == AMP_ID_BREDR) {
4216 					l2cap_state_change(chan, BT_CONFIG);
4217 					result = L2CAP_CR_SUCCESS;
4218 				} else {
4219 					l2cap_state_change(chan, BT_CONNECT2);
4220 					result = L2CAP_CR_PEND;
4221 				}
4222 				status = L2CAP_CS_NO_INFO;
4223 			}
4224 		} else {
4225 			l2cap_state_change(chan, BT_CONNECT2);
4226 			result = L2CAP_CR_PEND;
4227 			status = L2CAP_CS_AUTHEN_PEND;
4228 		}
4229 	} else {
4230 		l2cap_state_change(chan, BT_CONNECT2);
4231 		result = L2CAP_CR_PEND;
4232 		status = L2CAP_CS_NO_INFO;
4233 	}
4234 
4235 response:
4236 	l2cap_chan_unlock(pchan);
4237 	mutex_unlock(&conn->chan_lock);
4238 	l2cap_chan_put(pchan);
4239 
4240 sendresp:
4241 	rsp.scid   = cpu_to_le16(scid);
4242 	rsp.dcid   = cpu_to_le16(dcid);
4243 	rsp.result = cpu_to_le16(result);
4244 	rsp.status = cpu_to_le16(status);
4245 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4246 
4247 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 		struct l2cap_info_req info;
4249 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4250 
4251 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 		conn->info_ident = l2cap_get_ident(conn);
4253 
4254 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4255 
4256 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 			       sizeof(info), &info);
4258 	}
4259 
4260 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 	    result == L2CAP_CR_SUCCESS) {
4262 		u8 buf[128];
4263 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 		chan->num_conf_req++;
4267 	}
4268 
4269 	return chan;
4270 }
4271 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4274 {
4275 	if (cmd_len < sizeof(struct l2cap_conn_req))
4276 		return -EPROTO;
4277 
4278 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4279 	return 0;
4280 }
4281 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4282 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4283 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4284 				    u8 *data)
4285 {
4286 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4287 	u16 scid, dcid, result, status;
4288 	struct l2cap_chan *chan;
4289 	u8 req[128];
4290 	int err;
4291 
4292 	if (cmd_len < sizeof(*rsp))
4293 		return -EPROTO;
4294 
4295 	scid   = __le16_to_cpu(rsp->scid);
4296 	dcid   = __le16_to_cpu(rsp->dcid);
4297 	result = __le16_to_cpu(rsp->result);
4298 	status = __le16_to_cpu(rsp->status);
4299 
4300 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4301 					   dcid > L2CAP_CID_DYN_END))
4302 		return -EPROTO;
4303 
4304 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4305 	       dcid, scid, result, status);
4306 
4307 	mutex_lock(&conn->chan_lock);
4308 
4309 	if (scid) {
4310 		chan = __l2cap_get_chan_by_scid(conn, scid);
4311 		if (!chan) {
4312 			err = -EBADSLT;
4313 			goto unlock;
4314 		}
4315 	} else {
4316 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4317 		if (!chan) {
4318 			err = -EBADSLT;
4319 			goto unlock;
4320 		}
4321 	}
4322 
4323 	chan = l2cap_chan_hold_unless_zero(chan);
4324 	if (!chan) {
4325 		err = -EBADSLT;
4326 		goto unlock;
4327 	}
4328 
4329 	err = 0;
4330 
4331 	l2cap_chan_lock(chan);
4332 
4333 	switch (result) {
4334 	case L2CAP_CR_SUCCESS:
4335 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4336 			err = -EBADSLT;
4337 			break;
4338 		}
4339 
4340 		l2cap_state_change(chan, BT_CONFIG);
4341 		chan->ident = 0;
4342 		chan->dcid = dcid;
4343 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4344 
4345 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4346 			break;
4347 
4348 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4349 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4350 		chan->num_conf_req++;
4351 		break;
4352 
4353 	case L2CAP_CR_PEND:
4354 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4355 		break;
4356 
4357 	default:
4358 		l2cap_chan_del(chan, ECONNREFUSED);
4359 		break;
4360 	}
4361 
4362 	l2cap_chan_unlock(chan);
4363 	l2cap_chan_put(chan);
4364 
4365 unlock:
4366 	mutex_unlock(&conn->chan_lock);
4367 
4368 	return err;
4369 }
4370 
set_default_fcs(struct l2cap_chan * chan)4371 static inline void set_default_fcs(struct l2cap_chan *chan)
4372 {
4373 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4374 	 * sides request it.
4375 	 */
4376 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4377 		chan->fcs = L2CAP_FCS_NONE;
4378 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4379 		chan->fcs = L2CAP_FCS_CRC16;
4380 }
4381 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4382 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4383 				    u8 ident, u16 flags)
4384 {
4385 	struct l2cap_conn *conn = chan->conn;
4386 
4387 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4388 	       flags);
4389 
4390 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4391 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4392 
4393 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4394 		       l2cap_build_conf_rsp(chan, data,
4395 					    L2CAP_CONF_SUCCESS, flags), data);
4396 }
4397 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4398 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4399 				   u16 scid, u16 dcid)
4400 {
4401 	struct l2cap_cmd_rej_cid rej;
4402 
4403 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4404 	rej.scid = __cpu_to_le16(scid);
4405 	rej.dcid = __cpu_to_le16(dcid);
4406 
4407 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4408 }
4409 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4410 static inline int l2cap_config_req(struct l2cap_conn *conn,
4411 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4412 				   u8 *data)
4413 {
4414 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4415 	u16 dcid, flags;
4416 	u8 rsp[64];
4417 	struct l2cap_chan *chan;
4418 	int len, err = 0;
4419 
4420 	if (cmd_len < sizeof(*req))
4421 		return -EPROTO;
4422 
4423 	dcid  = __le16_to_cpu(req->dcid);
4424 	flags = __le16_to_cpu(req->flags);
4425 
4426 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4427 
4428 	chan = l2cap_get_chan_by_scid(conn, dcid);
4429 	if (!chan) {
4430 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4431 		return 0;
4432 	}
4433 
4434 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4435 	    chan->state != BT_CONNECTED) {
4436 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4437 				       chan->dcid);
4438 		goto unlock;
4439 	}
4440 
4441 	/* Reject if config buffer is too small. */
4442 	len = cmd_len - sizeof(*req);
4443 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4444 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4445 			       l2cap_build_conf_rsp(chan, rsp,
4446 			       L2CAP_CONF_REJECT, flags), rsp);
4447 		goto unlock;
4448 	}
4449 
4450 	/* Store config. */
4451 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4452 	chan->conf_len += len;
4453 
4454 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4455 		/* Incomplete config. Send empty response. */
4456 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4457 			       l2cap_build_conf_rsp(chan, rsp,
4458 			       L2CAP_CONF_SUCCESS, flags), rsp);
4459 		goto unlock;
4460 	}
4461 
4462 	/* Complete config. */
4463 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4464 	if (len < 0) {
4465 		l2cap_send_disconn_req(chan, ECONNRESET);
4466 		goto unlock;
4467 	}
4468 
4469 	chan->ident = cmd->ident;
4470 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4471 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4472 		chan->num_conf_rsp++;
4473 
4474 	/* Reset config buffer. */
4475 	chan->conf_len = 0;
4476 
4477 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4478 		goto unlock;
4479 
4480 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4481 		set_default_fcs(chan);
4482 
4483 		if (chan->mode == L2CAP_MODE_ERTM ||
4484 		    chan->mode == L2CAP_MODE_STREAMING)
4485 			err = l2cap_ertm_init(chan);
4486 
4487 		if (err < 0)
4488 			l2cap_send_disconn_req(chan, -err);
4489 		else
4490 			l2cap_chan_ready(chan);
4491 
4492 		goto unlock;
4493 	}
4494 
4495 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4496 		u8 buf[64];
4497 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4498 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4499 		chan->num_conf_req++;
4500 	}
4501 
4502 	/* Got Conf Rsp PENDING from remote side and assume we sent
4503 	   Conf Rsp PENDING in the code above */
4504 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4505 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4506 
4507 		/* check compatibility */
4508 
4509 		/* Send rsp for BR/EDR channel */
4510 		if (!chan->hs_hcon)
4511 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4512 		else
4513 			chan->ident = cmd->ident;
4514 	}
4515 
4516 unlock:
4517 	l2cap_chan_unlock(chan);
4518 	l2cap_chan_put(chan);
4519 	return err;
4520 }
4521 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4522 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4523 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4524 				   u8 *data)
4525 {
4526 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4527 	u16 scid, flags, result;
4528 	struct l2cap_chan *chan;
4529 	int len = cmd_len - sizeof(*rsp);
4530 	int err = 0;
4531 
4532 	if (cmd_len < sizeof(*rsp))
4533 		return -EPROTO;
4534 
4535 	scid   = __le16_to_cpu(rsp->scid);
4536 	flags  = __le16_to_cpu(rsp->flags);
4537 	result = __le16_to_cpu(rsp->result);
4538 
4539 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4540 	       result, len);
4541 
4542 	chan = l2cap_get_chan_by_scid(conn, scid);
4543 	if (!chan)
4544 		return 0;
4545 
4546 	switch (result) {
4547 	case L2CAP_CONF_SUCCESS:
4548 		l2cap_conf_rfc_get(chan, rsp->data, len);
4549 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4550 		break;
4551 
4552 	case L2CAP_CONF_PENDING:
4553 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4554 
4555 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4556 			char buf[64];
4557 
4558 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4559 						   buf, sizeof(buf), &result);
4560 			if (len < 0) {
4561 				l2cap_send_disconn_req(chan, ECONNRESET);
4562 				goto done;
4563 			}
4564 
4565 			if (!chan->hs_hcon) {
4566 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4567 							0);
4568 			} else {
4569 				if (l2cap_check_efs(chan)) {
4570 					amp_create_logical_link(chan);
4571 					chan->ident = cmd->ident;
4572 				}
4573 			}
4574 		}
4575 		goto done;
4576 
4577 	case L2CAP_CONF_UNACCEPT:
4578 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4579 			char req[64];
4580 
4581 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4582 				l2cap_send_disconn_req(chan, ECONNRESET);
4583 				goto done;
4584 			}
4585 
4586 			/* throw out any old stored conf requests */
4587 			result = L2CAP_CONF_SUCCESS;
4588 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4589 						   req, sizeof(req), &result);
4590 			if (len < 0) {
4591 				l2cap_send_disconn_req(chan, ECONNRESET);
4592 				goto done;
4593 			}
4594 
4595 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4596 				       L2CAP_CONF_REQ, len, req);
4597 			chan->num_conf_req++;
4598 			if (result != L2CAP_CONF_SUCCESS)
4599 				goto done;
4600 			break;
4601 		}
4602 		fallthrough;
4603 
4604 	default:
4605 		l2cap_chan_set_err(chan, ECONNRESET);
4606 
4607 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4608 		l2cap_send_disconn_req(chan, ECONNRESET);
4609 		goto done;
4610 	}
4611 
4612 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4613 		goto done;
4614 
4615 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4616 
4617 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4618 		set_default_fcs(chan);
4619 
4620 		if (chan->mode == L2CAP_MODE_ERTM ||
4621 		    chan->mode == L2CAP_MODE_STREAMING)
4622 			err = l2cap_ertm_init(chan);
4623 
4624 		if (err < 0)
4625 			l2cap_send_disconn_req(chan, -err);
4626 		else
4627 			l2cap_chan_ready(chan);
4628 	}
4629 
4630 done:
4631 	l2cap_chan_unlock(chan);
4632 	l2cap_chan_put(chan);
4633 	return err;
4634 }
4635 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4636 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4637 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4638 				       u8 *data)
4639 {
4640 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4641 	struct l2cap_disconn_rsp rsp;
4642 	u16 dcid, scid;
4643 	struct l2cap_chan *chan;
4644 
4645 	if (cmd_len != sizeof(*req))
4646 		return -EPROTO;
4647 
4648 	scid = __le16_to_cpu(req->scid);
4649 	dcid = __le16_to_cpu(req->dcid);
4650 
4651 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4652 
4653 	chan = l2cap_get_chan_by_scid(conn, dcid);
4654 	if (!chan) {
4655 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4656 		return 0;
4657 	}
4658 
4659 	rsp.dcid = cpu_to_le16(chan->scid);
4660 	rsp.scid = cpu_to_le16(chan->dcid);
4661 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4662 
4663 	chan->ops->set_shutdown(chan);
4664 
4665 	l2cap_chan_unlock(chan);
4666 	mutex_lock(&conn->chan_lock);
4667 	l2cap_chan_lock(chan);
4668 	l2cap_chan_del(chan, ECONNRESET);
4669 	mutex_unlock(&conn->chan_lock);
4670 
4671 	chan->ops->close(chan);
4672 
4673 	l2cap_chan_unlock(chan);
4674 	l2cap_chan_put(chan);
4675 
4676 	return 0;
4677 }
4678 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4679 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4680 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4681 				       u8 *data)
4682 {
4683 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4684 	u16 dcid, scid;
4685 	struct l2cap_chan *chan;
4686 
4687 	if (cmd_len != sizeof(*rsp))
4688 		return -EPROTO;
4689 
4690 	scid = __le16_to_cpu(rsp->scid);
4691 	dcid = __le16_to_cpu(rsp->dcid);
4692 
4693 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4694 
4695 	chan = l2cap_get_chan_by_scid(conn, scid);
4696 	if (!chan) {
4697 		return 0;
4698 	}
4699 
4700 	if (chan->state != BT_DISCONN) {
4701 		l2cap_chan_unlock(chan);
4702 		l2cap_chan_put(chan);
4703 		return 0;
4704 	}
4705 
4706 	l2cap_chan_unlock(chan);
4707 	mutex_lock(&conn->chan_lock);
4708 	l2cap_chan_lock(chan);
4709 	l2cap_chan_del(chan, 0);
4710 	mutex_unlock(&conn->chan_lock);
4711 
4712 	chan->ops->close(chan);
4713 
4714 	l2cap_chan_unlock(chan);
4715 	l2cap_chan_put(chan);
4716 
4717 	return 0;
4718 }
4719 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4720 static inline int l2cap_information_req(struct l2cap_conn *conn,
4721 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4722 					u8 *data)
4723 {
4724 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4725 	u16 type;
4726 
4727 	if (cmd_len != sizeof(*req))
4728 		return -EPROTO;
4729 
4730 	type = __le16_to_cpu(req->type);
4731 
4732 	BT_DBG("type 0x%4.4x", type);
4733 
4734 	if (type == L2CAP_IT_FEAT_MASK) {
4735 		u8 buf[8];
4736 		u32 feat_mask = l2cap_feat_mask;
4737 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4738 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4739 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4740 		if (!disable_ertm)
4741 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4742 				| L2CAP_FEAT_FCS;
4743 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4744 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4745 				| L2CAP_FEAT_EXT_WINDOW;
4746 
4747 		put_unaligned_le32(feat_mask, rsp->data);
4748 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4749 			       buf);
4750 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4751 		u8 buf[12];
4752 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4753 
4754 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4755 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4756 		rsp->data[0] = conn->local_fixed_chan;
4757 		memset(rsp->data + 1, 0, 7);
4758 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4759 			       buf);
4760 	} else {
4761 		struct l2cap_info_rsp rsp;
4762 		rsp.type   = cpu_to_le16(type);
4763 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4764 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4765 			       &rsp);
4766 	}
4767 
4768 	return 0;
4769 }
4770 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4771 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4772 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4773 					u8 *data)
4774 {
4775 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4776 	u16 type, result;
4777 
4778 	if (cmd_len < sizeof(*rsp))
4779 		return -EPROTO;
4780 
4781 	type   = __le16_to_cpu(rsp->type);
4782 	result = __le16_to_cpu(rsp->result);
4783 
4784 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4785 
4786 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4787 	if (cmd->ident != conn->info_ident ||
4788 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4789 		return 0;
4790 
4791 	cancel_delayed_work(&conn->info_timer);
4792 
4793 	if (result != L2CAP_IR_SUCCESS) {
4794 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4795 		conn->info_ident = 0;
4796 
4797 		l2cap_conn_start(conn);
4798 
4799 		return 0;
4800 	}
4801 
4802 	switch (type) {
4803 	case L2CAP_IT_FEAT_MASK:
4804 		conn->feat_mask = get_unaligned_le32(rsp->data);
4805 
4806 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4807 			struct l2cap_info_req req;
4808 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4809 
4810 			conn->info_ident = l2cap_get_ident(conn);
4811 
4812 			l2cap_send_cmd(conn, conn->info_ident,
4813 				       L2CAP_INFO_REQ, sizeof(req), &req);
4814 		} else {
4815 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4816 			conn->info_ident = 0;
4817 
4818 			l2cap_conn_start(conn);
4819 		}
4820 		break;
4821 
4822 	case L2CAP_IT_FIXED_CHAN:
4823 		conn->remote_fixed_chan = rsp->data[0];
4824 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4825 		conn->info_ident = 0;
4826 
4827 		l2cap_conn_start(conn);
4828 		break;
4829 	}
4830 
4831 	return 0;
4832 }
4833 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4834 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4835 				    struct l2cap_cmd_hdr *cmd,
4836 				    u16 cmd_len, void *data)
4837 {
4838 	struct l2cap_create_chan_req *req = data;
4839 	struct l2cap_create_chan_rsp rsp;
4840 	struct l2cap_chan *chan;
4841 	struct hci_dev *hdev;
4842 	u16 psm, scid;
4843 
4844 	if (cmd_len != sizeof(*req))
4845 		return -EPROTO;
4846 
4847 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4848 		return -EINVAL;
4849 
4850 	psm = le16_to_cpu(req->psm);
4851 	scid = le16_to_cpu(req->scid);
4852 
4853 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4854 
4855 	/* For controller id 0 make BR/EDR connection */
4856 	if (req->amp_id == AMP_ID_BREDR) {
4857 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4858 			      req->amp_id);
4859 		return 0;
4860 	}
4861 
4862 	/* Validate AMP controller id */
4863 	hdev = hci_dev_get(req->amp_id);
4864 	if (!hdev)
4865 		goto error;
4866 
4867 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4868 		hci_dev_put(hdev);
4869 		goto error;
4870 	}
4871 
4872 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4873 			     req->amp_id);
4874 	if (chan) {
4875 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4876 		struct hci_conn *hs_hcon;
4877 
4878 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4879 						  &conn->hcon->dst);
4880 		if (!hs_hcon) {
4881 			hci_dev_put(hdev);
4882 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4883 					       chan->dcid);
4884 			return 0;
4885 		}
4886 
4887 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4888 
4889 		mgr->bredr_chan = chan;
4890 		chan->hs_hcon = hs_hcon;
4891 		chan->fcs = L2CAP_FCS_NONE;
4892 		conn->mtu = hdev->block_mtu;
4893 	}
4894 
4895 	hci_dev_put(hdev);
4896 
4897 	return 0;
4898 
4899 error:
4900 	rsp.dcid = 0;
4901 	rsp.scid = cpu_to_le16(scid);
4902 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4903 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4904 
4905 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4906 		       sizeof(rsp), &rsp);
4907 
4908 	return 0;
4909 }
4910 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4911 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4912 {
4913 	struct l2cap_move_chan_req req;
4914 	u8 ident;
4915 
4916 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4917 
4918 	ident = l2cap_get_ident(chan->conn);
4919 	chan->ident = ident;
4920 
4921 	req.icid = cpu_to_le16(chan->scid);
4922 	req.dest_amp_id = dest_amp_id;
4923 
4924 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4925 		       &req);
4926 
4927 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4928 }
4929 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4930 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4931 {
4932 	struct l2cap_move_chan_rsp rsp;
4933 
4934 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4935 
4936 	rsp.icid = cpu_to_le16(chan->dcid);
4937 	rsp.result = cpu_to_le16(result);
4938 
4939 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4940 		       sizeof(rsp), &rsp);
4941 }
4942 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4943 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4944 {
4945 	struct l2cap_move_chan_cfm cfm;
4946 
4947 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4948 
4949 	chan->ident = l2cap_get_ident(chan->conn);
4950 
4951 	cfm.icid = cpu_to_le16(chan->scid);
4952 	cfm.result = cpu_to_le16(result);
4953 
4954 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4955 		       sizeof(cfm), &cfm);
4956 
4957 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4958 }
4959 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4960 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4961 {
4962 	struct l2cap_move_chan_cfm cfm;
4963 
4964 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4965 
4966 	cfm.icid = cpu_to_le16(icid);
4967 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4968 
4969 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4970 		       sizeof(cfm), &cfm);
4971 }
4972 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4973 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4974 					 u16 icid)
4975 {
4976 	struct l2cap_move_chan_cfm_rsp rsp;
4977 
4978 	BT_DBG("icid 0x%4.4x", icid);
4979 
4980 	rsp.icid = cpu_to_le16(icid);
4981 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4982 }
4983 
__release_logical_link(struct l2cap_chan * chan)4984 static void __release_logical_link(struct l2cap_chan *chan)
4985 {
4986 	chan->hs_hchan = NULL;
4987 	chan->hs_hcon = NULL;
4988 
4989 	/* Placeholder - release the logical link */
4990 }
4991 
l2cap_logical_fail(struct l2cap_chan * chan)4992 static void l2cap_logical_fail(struct l2cap_chan *chan)
4993 {
4994 	/* Logical link setup failed */
4995 	if (chan->state != BT_CONNECTED) {
4996 		/* Create channel failure, disconnect */
4997 		l2cap_send_disconn_req(chan, ECONNRESET);
4998 		return;
4999 	}
5000 
5001 	switch (chan->move_role) {
5002 	case L2CAP_MOVE_ROLE_RESPONDER:
5003 		l2cap_move_done(chan);
5004 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5005 		break;
5006 	case L2CAP_MOVE_ROLE_INITIATOR:
5007 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5008 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5009 			/* Remote has only sent pending or
5010 			 * success responses, clean up
5011 			 */
5012 			l2cap_move_done(chan);
5013 		}
5014 
5015 		/* Other amp move states imply that the move
5016 		 * has already aborted
5017 		 */
5018 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5019 		break;
5020 	}
5021 }
5022 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5023 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5024 					struct hci_chan *hchan)
5025 {
5026 	struct l2cap_conf_rsp rsp;
5027 
5028 	chan->hs_hchan = hchan;
5029 	chan->hs_hcon->l2cap_data = chan->conn;
5030 
5031 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5032 
5033 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5034 		int err;
5035 
5036 		set_default_fcs(chan);
5037 
5038 		err = l2cap_ertm_init(chan);
5039 		if (err < 0)
5040 			l2cap_send_disconn_req(chan, -err);
5041 		else
5042 			l2cap_chan_ready(chan);
5043 	}
5044 }
5045 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5046 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5047 				      struct hci_chan *hchan)
5048 {
5049 	chan->hs_hcon = hchan->conn;
5050 	chan->hs_hcon->l2cap_data = chan->conn;
5051 
5052 	BT_DBG("move_state %d", chan->move_state);
5053 
5054 	switch (chan->move_state) {
5055 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5056 		/* Move confirm will be sent after a success
5057 		 * response is received
5058 		 */
5059 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5060 		break;
5061 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5062 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5063 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5064 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5065 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5066 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5067 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5068 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5069 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5070 		}
5071 		break;
5072 	default:
5073 		/* Move was not in expected state, free the channel */
5074 		__release_logical_link(chan);
5075 
5076 		chan->move_state = L2CAP_MOVE_STABLE;
5077 	}
5078 }
5079 
5080 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5081 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5082 		       u8 status)
5083 {
5084 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5085 
5086 	if (status) {
5087 		l2cap_logical_fail(chan);
5088 		__release_logical_link(chan);
5089 		return;
5090 	}
5091 
5092 	if (chan->state != BT_CONNECTED) {
5093 		/* Ignore logical link if channel is on BR/EDR */
5094 		if (chan->local_amp_id != AMP_ID_BREDR)
5095 			l2cap_logical_finish_create(chan, hchan);
5096 	} else {
5097 		l2cap_logical_finish_move(chan, hchan);
5098 	}
5099 }
5100 
l2cap_move_start(struct l2cap_chan * chan)5101 void l2cap_move_start(struct l2cap_chan *chan)
5102 {
5103 	BT_DBG("chan %p", chan);
5104 
5105 	if (chan->local_amp_id == AMP_ID_BREDR) {
5106 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5107 			return;
5108 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5109 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5110 		/* Placeholder - start physical link setup */
5111 	} else {
5112 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5113 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5114 		chan->move_id = 0;
5115 		l2cap_move_setup(chan);
5116 		l2cap_send_move_chan_req(chan, 0);
5117 	}
5118 }
5119 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5120 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5121 			    u8 local_amp_id, u8 remote_amp_id)
5122 {
5123 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5124 	       local_amp_id, remote_amp_id);
5125 
5126 	chan->fcs = L2CAP_FCS_NONE;
5127 
5128 	/* Outgoing channel on AMP */
5129 	if (chan->state == BT_CONNECT) {
5130 		if (result == L2CAP_CR_SUCCESS) {
5131 			chan->local_amp_id = local_amp_id;
5132 			l2cap_send_create_chan_req(chan, remote_amp_id);
5133 		} else {
5134 			/* Revert to BR/EDR connect */
5135 			l2cap_send_conn_req(chan);
5136 		}
5137 
5138 		return;
5139 	}
5140 
5141 	/* Incoming channel on AMP */
5142 	if (__l2cap_no_conn_pending(chan)) {
5143 		struct l2cap_conn_rsp rsp;
5144 		char buf[128];
5145 		rsp.scid = cpu_to_le16(chan->dcid);
5146 		rsp.dcid = cpu_to_le16(chan->scid);
5147 
5148 		if (result == L2CAP_CR_SUCCESS) {
5149 			/* Send successful response */
5150 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5151 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5152 		} else {
5153 			/* Send negative response */
5154 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5155 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5156 		}
5157 
5158 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5159 			       sizeof(rsp), &rsp);
5160 
5161 		if (result == L2CAP_CR_SUCCESS) {
5162 			l2cap_state_change(chan, BT_CONFIG);
5163 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5164 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5165 				       L2CAP_CONF_REQ,
5166 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5167 			chan->num_conf_req++;
5168 		}
5169 	}
5170 }
5171 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5172 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5173 				   u8 remote_amp_id)
5174 {
5175 	l2cap_move_setup(chan);
5176 	chan->move_id = local_amp_id;
5177 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5178 
5179 	l2cap_send_move_chan_req(chan, remote_amp_id);
5180 }
5181 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5182 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5183 {
5184 	struct hci_chan *hchan = NULL;
5185 
5186 	/* Placeholder - get hci_chan for logical link */
5187 
5188 	if (hchan) {
5189 		if (hchan->state == BT_CONNECTED) {
5190 			/* Logical link is ready to go */
5191 			chan->hs_hcon = hchan->conn;
5192 			chan->hs_hcon->l2cap_data = chan->conn;
5193 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5194 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5195 
5196 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5197 		} else {
5198 			/* Wait for logical link to be ready */
5199 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5200 		}
5201 	} else {
5202 		/* Logical link not available */
5203 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5204 	}
5205 }
5206 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5207 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5208 {
5209 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5210 		u8 rsp_result;
5211 		if (result == -EINVAL)
5212 			rsp_result = L2CAP_MR_BAD_ID;
5213 		else
5214 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5215 
5216 		l2cap_send_move_chan_rsp(chan, rsp_result);
5217 	}
5218 
5219 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5220 	chan->move_state = L2CAP_MOVE_STABLE;
5221 
5222 	/* Restart data transmission */
5223 	l2cap_ertm_send(chan);
5224 }
5225 
5226 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5227 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5228 {
5229 	u8 local_amp_id = chan->local_amp_id;
5230 	u8 remote_amp_id = chan->remote_amp_id;
5231 
5232 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5233 	       chan, result, local_amp_id, remote_amp_id);
5234 
5235 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5236 		return;
5237 
5238 	if (chan->state != BT_CONNECTED) {
5239 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5240 	} else if (result != L2CAP_MR_SUCCESS) {
5241 		l2cap_do_move_cancel(chan, result);
5242 	} else {
5243 		switch (chan->move_role) {
5244 		case L2CAP_MOVE_ROLE_INITIATOR:
5245 			l2cap_do_move_initiate(chan, local_amp_id,
5246 					       remote_amp_id);
5247 			break;
5248 		case L2CAP_MOVE_ROLE_RESPONDER:
5249 			l2cap_do_move_respond(chan, result);
5250 			break;
5251 		default:
5252 			l2cap_do_move_cancel(chan, result);
5253 			break;
5254 		}
5255 	}
5256 }
5257 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5258 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5259 					 struct l2cap_cmd_hdr *cmd,
5260 					 u16 cmd_len, void *data)
5261 {
5262 	struct l2cap_move_chan_req *req = data;
5263 	struct l2cap_move_chan_rsp rsp;
5264 	struct l2cap_chan *chan;
5265 	u16 icid = 0;
5266 	u16 result = L2CAP_MR_NOT_ALLOWED;
5267 
5268 	if (cmd_len != sizeof(*req))
5269 		return -EPROTO;
5270 
5271 	icid = le16_to_cpu(req->icid);
5272 
5273 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5274 
5275 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5276 		return -EINVAL;
5277 
5278 	chan = l2cap_get_chan_by_dcid(conn, icid);
5279 	if (!chan) {
5280 		rsp.icid = cpu_to_le16(icid);
5281 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5282 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5283 			       sizeof(rsp), &rsp);
5284 		return 0;
5285 	}
5286 
5287 	chan->ident = cmd->ident;
5288 
5289 	if (chan->scid < L2CAP_CID_DYN_START ||
5290 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5291 	    (chan->mode != L2CAP_MODE_ERTM &&
5292 	     chan->mode != L2CAP_MODE_STREAMING)) {
5293 		result = L2CAP_MR_NOT_ALLOWED;
5294 		goto send_move_response;
5295 	}
5296 
5297 	if (chan->local_amp_id == req->dest_amp_id) {
5298 		result = L2CAP_MR_SAME_ID;
5299 		goto send_move_response;
5300 	}
5301 
5302 	if (req->dest_amp_id != AMP_ID_BREDR) {
5303 		struct hci_dev *hdev;
5304 		hdev = hci_dev_get(req->dest_amp_id);
5305 		if (!hdev || hdev->dev_type != HCI_AMP ||
5306 		    !test_bit(HCI_UP, &hdev->flags)) {
5307 			if (hdev)
5308 				hci_dev_put(hdev);
5309 
5310 			result = L2CAP_MR_BAD_ID;
5311 			goto send_move_response;
5312 		}
5313 		hci_dev_put(hdev);
5314 	}
5315 
5316 	/* Detect a move collision.  Only send a collision response
5317 	 * if this side has "lost", otherwise proceed with the move.
5318 	 * The winner has the larger bd_addr.
5319 	 */
5320 	if ((__chan_is_moving(chan) ||
5321 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5322 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5323 		result = L2CAP_MR_COLLISION;
5324 		goto send_move_response;
5325 	}
5326 
5327 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5328 	l2cap_move_setup(chan);
5329 	chan->move_id = req->dest_amp_id;
5330 
5331 	if (req->dest_amp_id == AMP_ID_BREDR) {
5332 		/* Moving to BR/EDR */
5333 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5334 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5335 			result = L2CAP_MR_PEND;
5336 		} else {
5337 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5338 			result = L2CAP_MR_SUCCESS;
5339 		}
5340 	} else {
5341 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5342 		/* Placeholder - uncomment when amp functions are available */
5343 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5344 		result = L2CAP_MR_PEND;
5345 	}
5346 
5347 send_move_response:
5348 	l2cap_send_move_chan_rsp(chan, result);
5349 
5350 	l2cap_chan_unlock(chan);
5351 	l2cap_chan_put(chan);
5352 
5353 	return 0;
5354 }
5355 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5356 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5357 {
5358 	struct l2cap_chan *chan;
5359 	struct hci_chan *hchan = NULL;
5360 
5361 	chan = l2cap_get_chan_by_scid(conn, icid);
5362 	if (!chan) {
5363 		l2cap_send_move_chan_cfm_icid(conn, icid);
5364 		return;
5365 	}
5366 
5367 	__clear_chan_timer(chan);
5368 	if (result == L2CAP_MR_PEND)
5369 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5370 
5371 	switch (chan->move_state) {
5372 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5373 		/* Move confirm will be sent when logical link
5374 		 * is complete.
5375 		 */
5376 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5377 		break;
5378 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5379 		if (result == L2CAP_MR_PEND) {
5380 			break;
5381 		} else if (test_bit(CONN_LOCAL_BUSY,
5382 				    &chan->conn_state)) {
5383 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5384 		} else {
5385 			/* Logical link is up or moving to BR/EDR,
5386 			 * proceed with move
5387 			 */
5388 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5389 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5390 		}
5391 		break;
5392 	case L2CAP_MOVE_WAIT_RSP:
5393 		/* Moving to AMP */
5394 		if (result == L2CAP_MR_SUCCESS) {
5395 			/* Remote is ready, send confirm immediately
5396 			 * after logical link is ready
5397 			 */
5398 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5399 		} else {
5400 			/* Both logical link and move success
5401 			 * are required to confirm
5402 			 */
5403 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5404 		}
5405 
5406 		/* Placeholder - get hci_chan for logical link */
5407 		if (!hchan) {
5408 			/* Logical link not available */
5409 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5410 			break;
5411 		}
5412 
5413 		/* If the logical link is not yet connected, do not
5414 		 * send confirmation.
5415 		 */
5416 		if (hchan->state != BT_CONNECTED)
5417 			break;
5418 
5419 		/* Logical link is already ready to go */
5420 
5421 		chan->hs_hcon = hchan->conn;
5422 		chan->hs_hcon->l2cap_data = chan->conn;
5423 
5424 		if (result == L2CAP_MR_SUCCESS) {
5425 			/* Can confirm now */
5426 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5427 		} else {
5428 			/* Now only need move success
5429 			 * to confirm
5430 			 */
5431 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5432 		}
5433 
5434 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5435 		break;
5436 	default:
5437 		/* Any other amp move state means the move failed. */
5438 		chan->move_id = chan->local_amp_id;
5439 		l2cap_move_done(chan);
5440 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5441 	}
5442 
5443 	l2cap_chan_unlock(chan);
5444 	l2cap_chan_put(chan);
5445 }
5446 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5447 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5448 			    u16 result)
5449 {
5450 	struct l2cap_chan *chan;
5451 
5452 	chan = l2cap_get_chan_by_ident(conn, ident);
5453 	if (!chan) {
5454 		/* Could not locate channel, icid is best guess */
5455 		l2cap_send_move_chan_cfm_icid(conn, icid);
5456 		return;
5457 	}
5458 
5459 	__clear_chan_timer(chan);
5460 
5461 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5462 		if (result == L2CAP_MR_COLLISION) {
5463 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5464 		} else {
5465 			/* Cleanup - cancel move */
5466 			chan->move_id = chan->local_amp_id;
5467 			l2cap_move_done(chan);
5468 		}
5469 	}
5470 
5471 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5472 
5473 	l2cap_chan_unlock(chan);
5474 	l2cap_chan_put(chan);
5475 }
5476 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5477 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5478 				  struct l2cap_cmd_hdr *cmd,
5479 				  u16 cmd_len, void *data)
5480 {
5481 	struct l2cap_move_chan_rsp *rsp = data;
5482 	u16 icid, result;
5483 
5484 	if (cmd_len != sizeof(*rsp))
5485 		return -EPROTO;
5486 
5487 	icid = le16_to_cpu(rsp->icid);
5488 	result = le16_to_cpu(rsp->result);
5489 
5490 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5491 
5492 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5493 		l2cap_move_continue(conn, icid, result);
5494 	else
5495 		l2cap_move_fail(conn, cmd->ident, icid, result);
5496 
5497 	return 0;
5498 }
5499 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5500 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5501 				      struct l2cap_cmd_hdr *cmd,
5502 				      u16 cmd_len, void *data)
5503 {
5504 	struct l2cap_move_chan_cfm *cfm = data;
5505 	struct l2cap_chan *chan;
5506 	u16 icid, result;
5507 
5508 	if (cmd_len != sizeof(*cfm))
5509 		return -EPROTO;
5510 
5511 	icid = le16_to_cpu(cfm->icid);
5512 	result = le16_to_cpu(cfm->result);
5513 
5514 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5515 
5516 	chan = l2cap_get_chan_by_dcid(conn, icid);
5517 	if (!chan) {
5518 		/* Spec requires a response even if the icid was not found */
5519 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5520 		return 0;
5521 	}
5522 
5523 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5524 		if (result == L2CAP_MC_CONFIRMED) {
5525 			chan->local_amp_id = chan->move_id;
5526 			if (chan->local_amp_id == AMP_ID_BREDR)
5527 				__release_logical_link(chan);
5528 		} else {
5529 			chan->move_id = chan->local_amp_id;
5530 		}
5531 
5532 		l2cap_move_done(chan);
5533 	}
5534 
5535 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5536 
5537 	l2cap_chan_unlock(chan);
5538 	l2cap_chan_put(chan);
5539 
5540 	return 0;
5541 }
5542 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5543 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5544 						 struct l2cap_cmd_hdr *cmd,
5545 						 u16 cmd_len, void *data)
5546 {
5547 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5548 	struct l2cap_chan *chan;
5549 	u16 icid;
5550 
5551 	if (cmd_len != sizeof(*rsp))
5552 		return -EPROTO;
5553 
5554 	icid = le16_to_cpu(rsp->icid);
5555 
5556 	BT_DBG("icid 0x%4.4x", icid);
5557 
5558 	chan = l2cap_get_chan_by_scid(conn, icid);
5559 	if (!chan)
5560 		return 0;
5561 
5562 	__clear_chan_timer(chan);
5563 
5564 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5565 		chan->local_amp_id = chan->move_id;
5566 
5567 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5568 			__release_logical_link(chan);
5569 
5570 		l2cap_move_done(chan);
5571 	}
5572 
5573 	l2cap_chan_unlock(chan);
5574 	l2cap_chan_put(chan);
5575 
5576 	return 0;
5577 }
5578 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5579 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5580 					      struct l2cap_cmd_hdr *cmd,
5581 					      u16 cmd_len, u8 *data)
5582 {
5583 	struct hci_conn *hcon = conn->hcon;
5584 	struct l2cap_conn_param_update_req *req;
5585 	struct l2cap_conn_param_update_rsp rsp;
5586 	u16 min, max, latency, to_multiplier;
5587 	int err;
5588 
5589 	if (hcon->role != HCI_ROLE_MASTER)
5590 		return -EINVAL;
5591 
5592 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5593 		return -EPROTO;
5594 
5595 	req = (struct l2cap_conn_param_update_req *) data;
5596 	min		= __le16_to_cpu(req->min);
5597 	max		= __le16_to_cpu(req->max);
5598 	latency		= __le16_to_cpu(req->latency);
5599 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5600 
5601 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5602 	       min, max, latency, to_multiplier);
5603 
5604 	memset(&rsp, 0, sizeof(rsp));
5605 
5606 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5607 	if (err)
5608 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5609 	else
5610 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5611 
5612 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5613 		       sizeof(rsp), &rsp);
5614 
5615 	if (!err) {
5616 		u8 store_hint;
5617 
5618 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5619 						to_multiplier);
5620 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5621 				    store_hint, min, max, latency,
5622 				    to_multiplier);
5623 
5624 	}
5625 
5626 	return 0;
5627 }
5628 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5629 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5630 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5631 				u8 *data)
5632 {
5633 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5634 	struct hci_conn *hcon = conn->hcon;
5635 	u16 dcid, mtu, mps, credits, result;
5636 	struct l2cap_chan *chan;
5637 	int err, sec_level;
5638 
5639 	if (cmd_len < sizeof(*rsp))
5640 		return -EPROTO;
5641 
5642 	dcid    = __le16_to_cpu(rsp->dcid);
5643 	mtu     = __le16_to_cpu(rsp->mtu);
5644 	mps     = __le16_to_cpu(rsp->mps);
5645 	credits = __le16_to_cpu(rsp->credits);
5646 	result  = __le16_to_cpu(rsp->result);
5647 
5648 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5649 					   dcid < L2CAP_CID_DYN_START ||
5650 					   dcid > L2CAP_CID_LE_DYN_END))
5651 		return -EPROTO;
5652 
5653 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5654 	       dcid, mtu, mps, credits, result);
5655 
5656 	mutex_lock(&conn->chan_lock);
5657 
5658 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5659 	if (!chan) {
5660 		err = -EBADSLT;
5661 		goto unlock;
5662 	}
5663 
5664 	err = 0;
5665 
5666 	l2cap_chan_lock(chan);
5667 
5668 	switch (result) {
5669 	case L2CAP_CR_LE_SUCCESS:
5670 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5671 			err = -EBADSLT;
5672 			break;
5673 		}
5674 
5675 		chan->ident = 0;
5676 		chan->dcid = dcid;
5677 		chan->omtu = mtu;
5678 		chan->remote_mps = mps;
5679 		chan->tx_credits = credits;
5680 		l2cap_chan_ready(chan);
5681 		break;
5682 
5683 	case L2CAP_CR_LE_AUTHENTICATION:
5684 	case L2CAP_CR_LE_ENCRYPTION:
5685 		/* If we already have MITM protection we can't do
5686 		 * anything.
5687 		 */
5688 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5689 			l2cap_chan_del(chan, ECONNREFUSED);
5690 			break;
5691 		}
5692 
5693 		sec_level = hcon->sec_level + 1;
5694 		if (chan->sec_level < sec_level)
5695 			chan->sec_level = sec_level;
5696 
5697 		/* We'll need to send a new Connect Request */
5698 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5699 
5700 		smp_conn_security(hcon, chan->sec_level);
5701 		break;
5702 
5703 	default:
5704 		l2cap_chan_del(chan, ECONNREFUSED);
5705 		break;
5706 	}
5707 
5708 	l2cap_chan_unlock(chan);
5709 
5710 unlock:
5711 	mutex_unlock(&conn->chan_lock);
5712 
5713 	return err;
5714 }
5715 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5716 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5717 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5718 				      u8 *data)
5719 {
5720 	int err = 0;
5721 
5722 	switch (cmd->code) {
5723 	case L2CAP_COMMAND_REJ:
5724 		l2cap_command_rej(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_CONN_REQ:
5728 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_CONN_RSP:
5732 	case L2CAP_CREATE_CHAN_RSP:
5733 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5734 		break;
5735 
5736 	case L2CAP_CONF_REQ:
5737 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5738 		break;
5739 
5740 	case L2CAP_CONF_RSP:
5741 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5742 		break;
5743 
5744 	case L2CAP_DISCONN_REQ:
5745 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5746 		break;
5747 
5748 	case L2CAP_DISCONN_RSP:
5749 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5750 		break;
5751 
5752 	case L2CAP_ECHO_REQ:
5753 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_ECHO_RSP:
5757 		break;
5758 
5759 	case L2CAP_INFO_REQ:
5760 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5761 		break;
5762 
5763 	case L2CAP_INFO_RSP:
5764 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5765 		break;
5766 
5767 	case L2CAP_CREATE_CHAN_REQ:
5768 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5769 		break;
5770 
5771 	case L2CAP_MOVE_CHAN_REQ:
5772 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5773 		break;
5774 
5775 	case L2CAP_MOVE_CHAN_RSP:
5776 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5777 		break;
5778 
5779 	case L2CAP_MOVE_CHAN_CFM:
5780 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5781 		break;
5782 
5783 	case L2CAP_MOVE_CHAN_CFM_RSP:
5784 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5785 		break;
5786 
5787 	default:
5788 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5789 		err = -EINVAL;
5790 		break;
5791 	}
5792 
5793 	return err;
5794 }
5795 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5796 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5797 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5798 				u8 *data)
5799 {
5800 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5801 	struct l2cap_le_conn_rsp rsp;
5802 	struct l2cap_chan *chan, *pchan;
5803 	u16 dcid, scid, credits, mtu, mps;
5804 	__le16 psm;
5805 	u8 result;
5806 
5807 	if (cmd_len != sizeof(*req))
5808 		return -EPROTO;
5809 
5810 	scid = __le16_to_cpu(req->scid);
5811 	mtu  = __le16_to_cpu(req->mtu);
5812 	mps  = __le16_to_cpu(req->mps);
5813 	psm  = req->psm;
5814 	dcid = 0;
5815 	credits = 0;
5816 
5817 	if (mtu < 23 || mps < 23)
5818 		return -EPROTO;
5819 
5820 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5821 	       scid, mtu, mps);
5822 
5823 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5824 	 * page 1059:
5825 	 *
5826 	 * Valid range: 0x0001-0x00ff
5827 	 *
5828 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5829 	 */
5830 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5831 		result = L2CAP_CR_LE_BAD_PSM;
5832 		chan = NULL;
5833 		goto response;
5834 	}
5835 
5836 	/* Check if we have socket listening on psm */
5837 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5838 					 &conn->hcon->dst, LE_LINK);
5839 	if (!pchan) {
5840 		result = L2CAP_CR_LE_BAD_PSM;
5841 		chan = NULL;
5842 		goto response;
5843 	}
5844 
5845 	mutex_lock(&conn->chan_lock);
5846 	l2cap_chan_lock(pchan);
5847 
5848 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5849 				     SMP_ALLOW_STK)) {
5850 		result = L2CAP_CR_LE_AUTHENTICATION;
5851 		chan = NULL;
5852 		goto response_unlock;
5853 	}
5854 
5855 	/* Check for valid dynamic CID range */
5856 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5857 		result = L2CAP_CR_LE_INVALID_SCID;
5858 		chan = NULL;
5859 		goto response_unlock;
5860 	}
5861 
5862 	/* Check if we already have channel with that dcid */
5863 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5864 		result = L2CAP_CR_LE_SCID_IN_USE;
5865 		chan = NULL;
5866 		goto response_unlock;
5867 	}
5868 
5869 	chan = pchan->ops->new_connection(pchan);
5870 	if (!chan) {
5871 		result = L2CAP_CR_LE_NO_MEM;
5872 		goto response_unlock;
5873 	}
5874 
5875 	bacpy(&chan->src, &conn->hcon->src);
5876 	bacpy(&chan->dst, &conn->hcon->dst);
5877 	chan->src_type = bdaddr_src_type(conn->hcon);
5878 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5879 	chan->psm  = psm;
5880 	chan->dcid = scid;
5881 	chan->omtu = mtu;
5882 	chan->remote_mps = mps;
5883 
5884 	__l2cap_chan_add(conn, chan);
5885 
5886 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5887 
5888 	dcid = chan->scid;
5889 	credits = chan->rx_credits;
5890 
5891 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5892 
5893 	chan->ident = cmd->ident;
5894 
5895 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5896 		l2cap_state_change(chan, BT_CONNECT2);
5897 		/* The following result value is actually not defined
5898 		 * for LE CoC but we use it to let the function know
5899 		 * that it should bail out after doing its cleanup
5900 		 * instead of sending a response.
5901 		 */
5902 		result = L2CAP_CR_PEND;
5903 		chan->ops->defer(chan);
5904 	} else {
5905 		l2cap_chan_ready(chan);
5906 		result = L2CAP_CR_LE_SUCCESS;
5907 	}
5908 
5909 response_unlock:
5910 	l2cap_chan_unlock(pchan);
5911 	mutex_unlock(&conn->chan_lock);
5912 	l2cap_chan_put(pchan);
5913 
5914 	if (result == L2CAP_CR_PEND)
5915 		return 0;
5916 
5917 response:
5918 	if (chan) {
5919 		rsp.mtu = cpu_to_le16(chan->imtu);
5920 		rsp.mps = cpu_to_le16(chan->mps);
5921 	} else {
5922 		rsp.mtu = 0;
5923 		rsp.mps = 0;
5924 	}
5925 
5926 	rsp.dcid    = cpu_to_le16(dcid);
5927 	rsp.credits = cpu_to_le16(credits);
5928 	rsp.result  = cpu_to_le16(result);
5929 
5930 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5931 
5932 	return 0;
5933 }
5934 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5935 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5936 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5937 				   u8 *data)
5938 {
5939 	struct l2cap_le_credits *pkt;
5940 	struct l2cap_chan *chan;
5941 	u16 cid, credits, max_credits;
5942 
5943 	if (cmd_len != sizeof(*pkt))
5944 		return -EPROTO;
5945 
5946 	pkt = (struct l2cap_le_credits *) data;
5947 	cid	= __le16_to_cpu(pkt->cid);
5948 	credits	= __le16_to_cpu(pkt->credits);
5949 
5950 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5951 
5952 	chan = l2cap_get_chan_by_dcid(conn, cid);
5953 	if (!chan)
5954 		return -EBADSLT;
5955 
5956 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5957 	if (credits > max_credits) {
5958 		BT_ERR("LE credits overflow");
5959 		l2cap_send_disconn_req(chan, ECONNRESET);
5960 
5961 		/* Return 0 so that we don't trigger an unnecessary
5962 		 * command reject packet.
5963 		 */
5964 		goto unlock;
5965 	}
5966 
5967 	chan->tx_credits += credits;
5968 
5969 	/* Resume sending */
5970 	l2cap_le_flowctl_send(chan);
5971 
5972 	if (chan->tx_credits)
5973 		chan->ops->resume(chan);
5974 
5975 unlock:
5976 	l2cap_chan_unlock(chan);
5977 	l2cap_chan_put(chan);
5978 
5979 	return 0;
5980 }
5981 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5982 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5983 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5984 				       u8 *data)
5985 {
5986 	struct l2cap_ecred_conn_req *req = (void *) data;
5987 	struct {
5988 		struct l2cap_ecred_conn_rsp rsp;
5989 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5990 	} __packed pdu;
5991 	struct l2cap_chan *chan, *pchan;
5992 	u16 mtu, mps;
5993 	__le16 psm;
5994 	u8 result, len = 0;
5995 	int i, num_scid;
5996 	bool defer = false;
5997 
5998 	if (!enable_ecred)
5999 		return -EINVAL;
6000 
6001 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6002 		result = L2CAP_CR_LE_INVALID_PARAMS;
6003 		goto response;
6004 	}
6005 
6006 	cmd_len -= sizeof(*req);
6007 	num_scid = cmd_len / sizeof(u16);
6008 
6009 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6010 		result = L2CAP_CR_LE_INVALID_PARAMS;
6011 		goto response;
6012 	}
6013 
6014 	mtu  = __le16_to_cpu(req->mtu);
6015 	mps  = __le16_to_cpu(req->mps);
6016 
6017 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6018 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6019 		goto response;
6020 	}
6021 
6022 	psm  = req->psm;
6023 
6024 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6025 	 * page 1059:
6026 	 *
6027 	 * Valid range: 0x0001-0x00ff
6028 	 *
6029 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6030 	 */
6031 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6032 		result = L2CAP_CR_LE_BAD_PSM;
6033 		goto response;
6034 	}
6035 
6036 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6037 
6038 	memset(&pdu, 0, sizeof(pdu));
6039 
6040 	/* Check if we have socket listening on psm */
6041 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6042 					 &conn->hcon->dst, LE_LINK);
6043 	if (!pchan) {
6044 		result = L2CAP_CR_LE_BAD_PSM;
6045 		goto response;
6046 	}
6047 
6048 	mutex_lock(&conn->chan_lock);
6049 	l2cap_chan_lock(pchan);
6050 
6051 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6052 				     SMP_ALLOW_STK)) {
6053 		result = L2CAP_CR_LE_AUTHENTICATION;
6054 		goto unlock;
6055 	}
6056 
6057 	result = L2CAP_CR_LE_SUCCESS;
6058 
6059 	for (i = 0; i < num_scid; i++) {
6060 		u16 scid = __le16_to_cpu(req->scid[i]);
6061 
6062 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6063 
6064 		pdu.dcid[i] = 0x0000;
6065 		len += sizeof(*pdu.dcid);
6066 
6067 		/* Check for valid dynamic CID range */
6068 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6069 			result = L2CAP_CR_LE_INVALID_SCID;
6070 			continue;
6071 		}
6072 
6073 		/* Check if we already have channel with that dcid */
6074 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6075 			result = L2CAP_CR_LE_SCID_IN_USE;
6076 			continue;
6077 		}
6078 
6079 		chan = pchan->ops->new_connection(pchan);
6080 		if (!chan) {
6081 			result = L2CAP_CR_LE_NO_MEM;
6082 			continue;
6083 		}
6084 
6085 		bacpy(&chan->src, &conn->hcon->src);
6086 		bacpy(&chan->dst, &conn->hcon->dst);
6087 		chan->src_type = bdaddr_src_type(conn->hcon);
6088 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6089 		chan->psm  = psm;
6090 		chan->dcid = scid;
6091 		chan->omtu = mtu;
6092 		chan->remote_mps = mps;
6093 
6094 		__l2cap_chan_add(conn, chan);
6095 
6096 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6097 
6098 		/* Init response */
6099 		if (!pdu.rsp.credits) {
6100 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6101 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6102 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6103 		}
6104 
6105 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6106 
6107 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6108 
6109 		chan->ident = cmd->ident;
6110 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6111 
6112 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6113 			l2cap_state_change(chan, BT_CONNECT2);
6114 			defer = true;
6115 			chan->ops->defer(chan);
6116 		} else {
6117 			l2cap_chan_ready(chan);
6118 		}
6119 	}
6120 
6121 unlock:
6122 	l2cap_chan_unlock(pchan);
6123 	mutex_unlock(&conn->chan_lock);
6124 	l2cap_chan_put(pchan);
6125 
6126 response:
6127 	pdu.rsp.result = cpu_to_le16(result);
6128 
6129 	if (defer)
6130 		return 0;
6131 
6132 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6133 		       sizeof(pdu.rsp) + len, &pdu);
6134 
6135 	return 0;
6136 }
6137 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6138 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6139 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6140 				       u8 *data)
6141 {
6142 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6143 	struct hci_conn *hcon = conn->hcon;
6144 	u16 mtu, mps, credits, result;
6145 	struct l2cap_chan *chan, *tmp;
6146 	int err = 0, sec_level;
6147 	int i = 0;
6148 
6149 	if (cmd_len < sizeof(*rsp))
6150 		return -EPROTO;
6151 
6152 	mtu     = __le16_to_cpu(rsp->mtu);
6153 	mps     = __le16_to_cpu(rsp->mps);
6154 	credits = __le16_to_cpu(rsp->credits);
6155 	result  = __le16_to_cpu(rsp->result);
6156 
6157 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6158 	       result);
6159 
6160 	mutex_lock(&conn->chan_lock);
6161 
6162 	cmd_len -= sizeof(*rsp);
6163 
6164 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6165 		u16 dcid;
6166 
6167 		if (chan->ident != cmd->ident ||
6168 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6169 		    chan->state == BT_CONNECTED)
6170 			continue;
6171 
6172 		l2cap_chan_lock(chan);
6173 
6174 		/* Check that there is a dcid for each pending channel */
6175 		if (cmd_len < sizeof(dcid)) {
6176 			l2cap_chan_del(chan, ECONNREFUSED);
6177 			l2cap_chan_unlock(chan);
6178 			continue;
6179 		}
6180 
6181 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6182 		cmd_len -= sizeof(u16);
6183 
6184 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6185 
6186 		/* Check if dcid is already in use */
6187 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6188 			/* If a device receives a
6189 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6190 			 * already-assigned Destination CID, then both the
6191 			 * original channel and the new channel shall be
6192 			 * immediately discarded and not used.
6193 			 */
6194 			l2cap_chan_del(chan, ECONNREFUSED);
6195 			l2cap_chan_unlock(chan);
6196 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6197 			l2cap_chan_lock(chan);
6198 			l2cap_chan_del(chan, ECONNRESET);
6199 			l2cap_chan_unlock(chan);
6200 			continue;
6201 		}
6202 
6203 		switch (result) {
6204 		case L2CAP_CR_LE_AUTHENTICATION:
6205 		case L2CAP_CR_LE_ENCRYPTION:
6206 			/* If we already have MITM protection we can't do
6207 			 * anything.
6208 			 */
6209 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6210 				l2cap_chan_del(chan, ECONNREFUSED);
6211 				break;
6212 			}
6213 
6214 			sec_level = hcon->sec_level + 1;
6215 			if (chan->sec_level < sec_level)
6216 				chan->sec_level = sec_level;
6217 
6218 			/* We'll need to send a new Connect Request */
6219 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6220 
6221 			smp_conn_security(hcon, chan->sec_level);
6222 			break;
6223 
6224 		case L2CAP_CR_LE_BAD_PSM:
6225 			l2cap_chan_del(chan, ECONNREFUSED);
6226 			break;
6227 
6228 		default:
6229 			/* If dcid was not set it means channels was refused */
6230 			if (!dcid) {
6231 				l2cap_chan_del(chan, ECONNREFUSED);
6232 				break;
6233 			}
6234 
6235 			chan->ident = 0;
6236 			chan->dcid = dcid;
6237 			chan->omtu = mtu;
6238 			chan->remote_mps = mps;
6239 			chan->tx_credits = credits;
6240 			l2cap_chan_ready(chan);
6241 			break;
6242 		}
6243 
6244 		l2cap_chan_unlock(chan);
6245 	}
6246 
6247 	mutex_unlock(&conn->chan_lock);
6248 
6249 	return err;
6250 }
6251 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6252 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6253 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6254 					 u8 *data)
6255 {
6256 	struct l2cap_ecred_reconf_req *req = (void *) data;
6257 	struct l2cap_ecred_reconf_rsp rsp;
6258 	u16 mtu, mps, result;
6259 	struct l2cap_chan *chan;
6260 	int i, num_scid;
6261 
6262 	if (!enable_ecred)
6263 		return -EINVAL;
6264 
6265 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6266 		result = L2CAP_CR_LE_INVALID_PARAMS;
6267 		goto respond;
6268 	}
6269 
6270 	mtu = __le16_to_cpu(req->mtu);
6271 	mps = __le16_to_cpu(req->mps);
6272 
6273 	BT_DBG("mtu %u mps %u", mtu, mps);
6274 
6275 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6276 		result = L2CAP_RECONF_INVALID_MTU;
6277 		goto respond;
6278 	}
6279 
6280 	if (mps < L2CAP_ECRED_MIN_MPS) {
6281 		result = L2CAP_RECONF_INVALID_MPS;
6282 		goto respond;
6283 	}
6284 
6285 	cmd_len -= sizeof(*req);
6286 	num_scid = cmd_len / sizeof(u16);
6287 	result = L2CAP_RECONF_SUCCESS;
6288 
6289 	for (i = 0; i < num_scid; i++) {
6290 		u16 scid;
6291 
6292 		scid = __le16_to_cpu(req->scid[i]);
6293 		if (!scid)
6294 			return -EPROTO;
6295 
6296 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6297 		if (!chan)
6298 			continue;
6299 
6300 		/* If the MTU value is decreased for any of the included
6301 		 * channels, then the receiver shall disconnect all
6302 		 * included channels.
6303 		 */
6304 		if (chan->omtu > mtu) {
6305 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6306 			       chan->omtu, mtu);
6307 			result = L2CAP_RECONF_INVALID_MTU;
6308 		}
6309 
6310 		chan->omtu = mtu;
6311 		chan->remote_mps = mps;
6312 	}
6313 
6314 respond:
6315 	rsp.result = cpu_to_le16(result);
6316 
6317 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6318 		       &rsp);
6319 
6320 	return 0;
6321 }
6322 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6323 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6324 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6325 					 u8 *data)
6326 {
6327 	struct l2cap_chan *chan, *tmp;
6328 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6329 	u16 result;
6330 
6331 	if (cmd_len < sizeof(*rsp))
6332 		return -EPROTO;
6333 
6334 	result = __le16_to_cpu(rsp->result);
6335 
6336 	BT_DBG("result 0x%4.4x", rsp->result);
6337 
6338 	if (!result)
6339 		return 0;
6340 
6341 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6342 		if (chan->ident != cmd->ident)
6343 			continue;
6344 
6345 		l2cap_chan_del(chan, ECONNRESET);
6346 	}
6347 
6348 	return 0;
6349 }
6350 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6351 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6352 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6353 				       u8 *data)
6354 {
6355 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6356 	struct l2cap_chan *chan;
6357 
6358 	if (cmd_len < sizeof(*rej))
6359 		return -EPROTO;
6360 
6361 	mutex_lock(&conn->chan_lock);
6362 
6363 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6364 	if (!chan)
6365 		goto done;
6366 
6367 	chan = l2cap_chan_hold_unless_zero(chan);
6368 	if (!chan)
6369 		goto done;
6370 
6371 	l2cap_chan_lock(chan);
6372 	l2cap_chan_del(chan, ECONNREFUSED);
6373 	l2cap_chan_unlock(chan);
6374 	l2cap_chan_put(chan);
6375 
6376 done:
6377 	mutex_unlock(&conn->chan_lock);
6378 	return 0;
6379 }
6380 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6381 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6382 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6383 				   u8 *data)
6384 {
6385 	int err = 0;
6386 
6387 	switch (cmd->code) {
6388 	case L2CAP_COMMAND_REJ:
6389 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6390 		break;
6391 
6392 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6393 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6394 		break;
6395 
6396 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6397 		break;
6398 
6399 	case L2CAP_LE_CONN_RSP:
6400 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6401 		break;
6402 
6403 	case L2CAP_LE_CONN_REQ:
6404 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6405 		break;
6406 
6407 	case L2CAP_LE_CREDITS:
6408 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6409 		break;
6410 
6411 	case L2CAP_ECRED_CONN_REQ:
6412 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6413 		break;
6414 
6415 	case L2CAP_ECRED_CONN_RSP:
6416 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6417 		break;
6418 
6419 	case L2CAP_ECRED_RECONF_REQ:
6420 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6421 		break;
6422 
6423 	case L2CAP_ECRED_RECONF_RSP:
6424 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6425 		break;
6426 
6427 	case L2CAP_DISCONN_REQ:
6428 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6429 		break;
6430 
6431 	case L2CAP_DISCONN_RSP:
6432 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6433 		break;
6434 
6435 	default:
6436 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6437 		err = -EINVAL;
6438 		break;
6439 	}
6440 
6441 	return err;
6442 }
6443 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6444 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6445 					struct sk_buff *skb)
6446 {
6447 	struct hci_conn *hcon = conn->hcon;
6448 	struct l2cap_cmd_hdr *cmd;
6449 	u16 len;
6450 	int err;
6451 
6452 	if (hcon->type != LE_LINK)
6453 		goto drop;
6454 
6455 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6456 		goto drop;
6457 
6458 	cmd = (void *) skb->data;
6459 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6460 
6461 	len = le16_to_cpu(cmd->len);
6462 
6463 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6464 
6465 	if (len != skb->len || !cmd->ident) {
6466 		BT_DBG("corrupted command");
6467 		goto drop;
6468 	}
6469 
6470 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6471 	if (err) {
6472 		struct l2cap_cmd_rej_unk rej;
6473 
6474 		BT_ERR("Wrong link type (%d)", err);
6475 
6476 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6477 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6478 			       sizeof(rej), &rej);
6479 	}
6480 
6481 drop:
6482 	kfree_skb(skb);
6483 }
6484 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)6485 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6486 {
6487 	struct l2cap_cmd_rej_unk rej;
6488 
6489 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6490 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6491 }
6492 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6493 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6494 				     struct sk_buff *skb)
6495 {
6496 	struct hci_conn *hcon = conn->hcon;
6497 	struct l2cap_cmd_hdr *cmd;
6498 	int err;
6499 
6500 	l2cap_raw_recv(conn, skb);
6501 
6502 	if (hcon->type != ACL_LINK)
6503 		goto drop;
6504 
6505 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6506 		u16 len;
6507 
6508 		cmd = (void *) skb->data;
6509 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6510 
6511 		len = le16_to_cpu(cmd->len);
6512 
6513 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6514 		       cmd->ident);
6515 
6516 		if (len > skb->len || !cmd->ident) {
6517 			BT_DBG("corrupted command");
6518 			l2cap_sig_send_rej(conn, cmd->ident);
6519 			skb_pull(skb, len > skb->len ? skb->len : len);
6520 			continue;
6521 		}
6522 
6523 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6524 		if (err) {
6525 			BT_ERR("Wrong link type (%d)", err);
6526 			l2cap_sig_send_rej(conn, cmd->ident);
6527 		}
6528 
6529 		skb_pull(skb, len);
6530 	}
6531 
6532 	if (skb->len > 0) {
6533 		BT_DBG("corrupted command");
6534 		l2cap_sig_send_rej(conn, 0);
6535 	}
6536 
6537 drop:
6538 	kfree_skb(skb);
6539 }
6540 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6541 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6542 {
6543 	u16 our_fcs, rcv_fcs;
6544 	int hdr_size;
6545 
6546 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6547 		hdr_size = L2CAP_EXT_HDR_SIZE;
6548 	else
6549 		hdr_size = L2CAP_ENH_HDR_SIZE;
6550 
6551 	if (chan->fcs == L2CAP_FCS_CRC16) {
6552 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6553 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6554 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6555 
6556 		if (our_fcs != rcv_fcs)
6557 			return -EBADMSG;
6558 	}
6559 	return 0;
6560 }
6561 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6562 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6563 {
6564 	struct l2cap_ctrl control;
6565 
6566 	BT_DBG("chan %p", chan);
6567 
6568 	memset(&control, 0, sizeof(control));
6569 	control.sframe = 1;
6570 	control.final = 1;
6571 	control.reqseq = chan->buffer_seq;
6572 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6573 
6574 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6575 		control.super = L2CAP_SUPER_RNR;
6576 		l2cap_send_sframe(chan, &control);
6577 	}
6578 
6579 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6580 	    chan->unacked_frames > 0)
6581 		__set_retrans_timer(chan);
6582 
6583 	/* Send pending iframes */
6584 	l2cap_ertm_send(chan);
6585 
6586 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6587 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6588 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6589 		 * send it now.
6590 		 */
6591 		control.super = L2CAP_SUPER_RR;
6592 		l2cap_send_sframe(chan, &control);
6593 	}
6594 }
6595 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6596 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6597 			    struct sk_buff **last_frag)
6598 {
6599 	/* skb->len reflects data in skb as well as all fragments
6600 	 * skb->data_len reflects only data in fragments
6601 	 */
6602 	if (!skb_has_frag_list(skb))
6603 		skb_shinfo(skb)->frag_list = new_frag;
6604 
6605 	new_frag->next = NULL;
6606 
6607 	(*last_frag)->next = new_frag;
6608 	*last_frag = new_frag;
6609 
6610 	skb->len += new_frag->len;
6611 	skb->data_len += new_frag->len;
6612 	skb->truesize += new_frag->truesize;
6613 }
6614 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6615 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6616 				struct l2cap_ctrl *control)
6617 {
6618 	int err = -EINVAL;
6619 
6620 	switch (control->sar) {
6621 	case L2CAP_SAR_UNSEGMENTED:
6622 		if (chan->sdu)
6623 			break;
6624 
6625 		err = chan->ops->recv(chan, skb);
6626 		break;
6627 
6628 	case L2CAP_SAR_START:
6629 		if (chan->sdu)
6630 			break;
6631 
6632 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6633 			break;
6634 
6635 		chan->sdu_len = get_unaligned_le16(skb->data);
6636 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6637 
6638 		if (chan->sdu_len > chan->imtu) {
6639 			err = -EMSGSIZE;
6640 			break;
6641 		}
6642 
6643 		if (skb->len >= chan->sdu_len)
6644 			break;
6645 
6646 		chan->sdu = skb;
6647 		chan->sdu_last_frag = skb;
6648 
6649 		skb = NULL;
6650 		err = 0;
6651 		break;
6652 
6653 	case L2CAP_SAR_CONTINUE:
6654 		if (!chan->sdu)
6655 			break;
6656 
6657 		append_skb_frag(chan->sdu, skb,
6658 				&chan->sdu_last_frag);
6659 		skb = NULL;
6660 
6661 		if (chan->sdu->len >= chan->sdu_len)
6662 			break;
6663 
6664 		err = 0;
6665 		break;
6666 
6667 	case L2CAP_SAR_END:
6668 		if (!chan->sdu)
6669 			break;
6670 
6671 		append_skb_frag(chan->sdu, skb,
6672 				&chan->sdu_last_frag);
6673 		skb = NULL;
6674 
6675 		if (chan->sdu->len != chan->sdu_len)
6676 			break;
6677 
6678 		err = chan->ops->recv(chan, chan->sdu);
6679 
6680 		if (!err) {
6681 			/* Reassembly complete */
6682 			chan->sdu = NULL;
6683 			chan->sdu_last_frag = NULL;
6684 			chan->sdu_len = 0;
6685 		}
6686 		break;
6687 	}
6688 
6689 	if (err) {
6690 		kfree_skb(skb);
6691 		kfree_skb(chan->sdu);
6692 		chan->sdu = NULL;
6693 		chan->sdu_last_frag = NULL;
6694 		chan->sdu_len = 0;
6695 	}
6696 
6697 	return err;
6698 }
6699 
l2cap_resegment(struct l2cap_chan * chan)6700 static int l2cap_resegment(struct l2cap_chan *chan)
6701 {
6702 	/* Placeholder */
6703 	return 0;
6704 }
6705 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6706 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6707 {
6708 	u8 event;
6709 
6710 	if (chan->mode != L2CAP_MODE_ERTM)
6711 		return;
6712 
6713 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6714 	l2cap_tx(chan, NULL, NULL, event);
6715 }
6716 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6717 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6718 {
6719 	int err = 0;
6720 	/* Pass sequential frames to l2cap_reassemble_sdu()
6721 	 * until a gap is encountered.
6722 	 */
6723 
6724 	BT_DBG("chan %p", chan);
6725 
6726 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6727 		struct sk_buff *skb;
6728 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6729 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6730 
6731 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6732 
6733 		if (!skb)
6734 			break;
6735 
6736 		skb_unlink(skb, &chan->srej_q);
6737 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6738 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6739 		if (err)
6740 			break;
6741 	}
6742 
6743 	if (skb_queue_empty(&chan->srej_q)) {
6744 		chan->rx_state = L2CAP_RX_STATE_RECV;
6745 		l2cap_send_ack(chan);
6746 	}
6747 
6748 	return err;
6749 }
6750 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6751 static void l2cap_handle_srej(struct l2cap_chan *chan,
6752 			      struct l2cap_ctrl *control)
6753 {
6754 	struct sk_buff *skb;
6755 
6756 	BT_DBG("chan %p, control %p", chan, control);
6757 
6758 	if (control->reqseq == chan->next_tx_seq) {
6759 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6760 		l2cap_send_disconn_req(chan, ECONNRESET);
6761 		return;
6762 	}
6763 
6764 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6765 
6766 	if (skb == NULL) {
6767 		BT_DBG("Seq %d not available for retransmission",
6768 		       control->reqseq);
6769 		return;
6770 	}
6771 
6772 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6773 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6774 		l2cap_send_disconn_req(chan, ECONNRESET);
6775 		return;
6776 	}
6777 
6778 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6779 
6780 	if (control->poll) {
6781 		l2cap_pass_to_tx(chan, control);
6782 
6783 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6784 		l2cap_retransmit(chan, control);
6785 		l2cap_ertm_send(chan);
6786 
6787 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6788 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6789 			chan->srej_save_reqseq = control->reqseq;
6790 		}
6791 	} else {
6792 		l2cap_pass_to_tx_fbit(chan, control);
6793 
6794 		if (control->final) {
6795 			if (chan->srej_save_reqseq != control->reqseq ||
6796 			    !test_and_clear_bit(CONN_SREJ_ACT,
6797 						&chan->conn_state))
6798 				l2cap_retransmit(chan, control);
6799 		} else {
6800 			l2cap_retransmit(chan, control);
6801 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6802 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6803 				chan->srej_save_reqseq = control->reqseq;
6804 			}
6805 		}
6806 	}
6807 }
6808 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6809 static void l2cap_handle_rej(struct l2cap_chan *chan,
6810 			     struct l2cap_ctrl *control)
6811 {
6812 	struct sk_buff *skb;
6813 
6814 	BT_DBG("chan %p, control %p", chan, control);
6815 
6816 	if (control->reqseq == chan->next_tx_seq) {
6817 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6818 		l2cap_send_disconn_req(chan, ECONNRESET);
6819 		return;
6820 	}
6821 
6822 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6823 
6824 	if (chan->max_tx && skb &&
6825 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6826 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6827 		l2cap_send_disconn_req(chan, ECONNRESET);
6828 		return;
6829 	}
6830 
6831 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6832 
6833 	l2cap_pass_to_tx(chan, control);
6834 
6835 	if (control->final) {
6836 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6837 			l2cap_retransmit_all(chan, control);
6838 	} else {
6839 		l2cap_retransmit_all(chan, control);
6840 		l2cap_ertm_send(chan);
6841 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6842 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6843 	}
6844 }
6845 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6846 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6847 {
6848 	BT_DBG("chan %p, txseq %d", chan, txseq);
6849 
6850 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6851 	       chan->expected_tx_seq);
6852 
6853 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6854 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6855 		    chan->tx_win) {
6856 			/* See notes below regarding "double poll" and
6857 			 * invalid packets.
6858 			 */
6859 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6860 				BT_DBG("Invalid/Ignore - after SREJ");
6861 				return L2CAP_TXSEQ_INVALID_IGNORE;
6862 			} else {
6863 				BT_DBG("Invalid - in window after SREJ sent");
6864 				return L2CAP_TXSEQ_INVALID;
6865 			}
6866 		}
6867 
6868 		if (chan->srej_list.head == txseq) {
6869 			BT_DBG("Expected SREJ");
6870 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6871 		}
6872 
6873 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6874 			BT_DBG("Duplicate SREJ - txseq already stored");
6875 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6876 		}
6877 
6878 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6879 			BT_DBG("Unexpected SREJ - not requested");
6880 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6881 		}
6882 	}
6883 
6884 	if (chan->expected_tx_seq == txseq) {
6885 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6886 		    chan->tx_win) {
6887 			BT_DBG("Invalid - txseq outside tx window");
6888 			return L2CAP_TXSEQ_INVALID;
6889 		} else {
6890 			BT_DBG("Expected");
6891 			return L2CAP_TXSEQ_EXPECTED;
6892 		}
6893 	}
6894 
6895 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6896 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6897 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6898 		return L2CAP_TXSEQ_DUPLICATE;
6899 	}
6900 
6901 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6902 		/* A source of invalid packets is a "double poll" condition,
6903 		 * where delays cause us to send multiple poll packets.  If
6904 		 * the remote stack receives and processes both polls,
6905 		 * sequence numbers can wrap around in such a way that a
6906 		 * resent frame has a sequence number that looks like new data
6907 		 * with a sequence gap.  This would trigger an erroneous SREJ
6908 		 * request.
6909 		 *
6910 		 * Fortunately, this is impossible with a tx window that's
6911 		 * less than half of the maximum sequence number, which allows
6912 		 * invalid frames to be safely ignored.
6913 		 *
6914 		 * With tx window sizes greater than half of the tx window
6915 		 * maximum, the frame is invalid and cannot be ignored.  This
6916 		 * causes a disconnect.
6917 		 */
6918 
6919 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6920 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6921 			return L2CAP_TXSEQ_INVALID_IGNORE;
6922 		} else {
6923 			BT_DBG("Invalid - txseq outside tx window");
6924 			return L2CAP_TXSEQ_INVALID;
6925 		}
6926 	} else {
6927 		BT_DBG("Unexpected - txseq indicates missing frames");
6928 		return L2CAP_TXSEQ_UNEXPECTED;
6929 	}
6930 }
6931 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6932 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6933 			       struct l2cap_ctrl *control,
6934 			       struct sk_buff *skb, u8 event)
6935 {
6936 	struct l2cap_ctrl local_control;
6937 	int err = 0;
6938 	bool skb_in_use = false;
6939 
6940 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6941 	       event);
6942 
6943 	switch (event) {
6944 	case L2CAP_EV_RECV_IFRAME:
6945 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6946 		case L2CAP_TXSEQ_EXPECTED:
6947 			l2cap_pass_to_tx(chan, control);
6948 
6949 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6950 				BT_DBG("Busy, discarding expected seq %d",
6951 				       control->txseq);
6952 				break;
6953 			}
6954 
6955 			chan->expected_tx_seq = __next_seq(chan,
6956 							   control->txseq);
6957 
6958 			chan->buffer_seq = chan->expected_tx_seq;
6959 			skb_in_use = true;
6960 
6961 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6962 			 * control, so make a copy in advance to use it after
6963 			 * l2cap_reassemble_sdu returns and to avoid the race
6964 			 * condition, for example:
6965 			 *
6966 			 * The current thread calls:
6967 			 *   l2cap_reassemble_sdu
6968 			 *     chan->ops->recv == l2cap_sock_recv_cb
6969 			 *       __sock_queue_rcv_skb
6970 			 * Another thread calls:
6971 			 *   bt_sock_recvmsg
6972 			 *     skb_recv_datagram
6973 			 *     skb_free_datagram
6974 			 * Then the current thread tries to access control, but
6975 			 * it was freed by skb_free_datagram.
6976 			 */
6977 			local_control = *control;
6978 			err = l2cap_reassemble_sdu(chan, skb, control);
6979 			if (err)
6980 				break;
6981 
6982 			if (local_control.final) {
6983 				if (!test_and_clear_bit(CONN_REJ_ACT,
6984 							&chan->conn_state)) {
6985 					local_control.final = 0;
6986 					l2cap_retransmit_all(chan, &local_control);
6987 					l2cap_ertm_send(chan);
6988 				}
6989 			}
6990 
6991 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6992 				l2cap_send_ack(chan);
6993 			break;
6994 		case L2CAP_TXSEQ_UNEXPECTED:
6995 			l2cap_pass_to_tx(chan, control);
6996 
6997 			/* Can't issue SREJ frames in the local busy state.
6998 			 * Drop this frame, it will be seen as missing
6999 			 * when local busy is exited.
7000 			 */
7001 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7002 				BT_DBG("Busy, discarding unexpected seq %d",
7003 				       control->txseq);
7004 				break;
7005 			}
7006 
7007 			/* There was a gap in the sequence, so an SREJ
7008 			 * must be sent for each missing frame.  The
7009 			 * current frame is stored for later use.
7010 			 */
7011 			skb_queue_tail(&chan->srej_q, skb);
7012 			skb_in_use = true;
7013 			BT_DBG("Queued %p (queue len %d)", skb,
7014 			       skb_queue_len(&chan->srej_q));
7015 
7016 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7017 			l2cap_seq_list_clear(&chan->srej_list);
7018 			l2cap_send_srej(chan, control->txseq);
7019 
7020 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7021 			break;
7022 		case L2CAP_TXSEQ_DUPLICATE:
7023 			l2cap_pass_to_tx(chan, control);
7024 			break;
7025 		case L2CAP_TXSEQ_INVALID_IGNORE:
7026 			break;
7027 		case L2CAP_TXSEQ_INVALID:
7028 		default:
7029 			l2cap_send_disconn_req(chan, ECONNRESET);
7030 			break;
7031 		}
7032 		break;
7033 	case L2CAP_EV_RECV_RR:
7034 		l2cap_pass_to_tx(chan, control);
7035 		if (control->final) {
7036 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7037 
7038 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7039 			    !__chan_is_moving(chan)) {
7040 				control->final = 0;
7041 				l2cap_retransmit_all(chan, control);
7042 			}
7043 
7044 			l2cap_ertm_send(chan);
7045 		} else if (control->poll) {
7046 			l2cap_send_i_or_rr_or_rnr(chan);
7047 		} else {
7048 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7049 					       &chan->conn_state) &&
7050 			    chan->unacked_frames)
7051 				__set_retrans_timer(chan);
7052 
7053 			l2cap_ertm_send(chan);
7054 		}
7055 		break;
7056 	case L2CAP_EV_RECV_RNR:
7057 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7058 		l2cap_pass_to_tx(chan, control);
7059 		if (control && control->poll) {
7060 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7061 			l2cap_send_rr_or_rnr(chan, 0);
7062 		}
7063 		__clear_retrans_timer(chan);
7064 		l2cap_seq_list_clear(&chan->retrans_list);
7065 		break;
7066 	case L2CAP_EV_RECV_REJ:
7067 		l2cap_handle_rej(chan, control);
7068 		break;
7069 	case L2CAP_EV_RECV_SREJ:
7070 		l2cap_handle_srej(chan, control);
7071 		break;
7072 	default:
7073 		break;
7074 	}
7075 
7076 	if (skb && !skb_in_use) {
7077 		BT_DBG("Freeing %p", skb);
7078 		kfree_skb(skb);
7079 	}
7080 
7081 	return err;
7082 }
7083 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7084 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7085 				    struct l2cap_ctrl *control,
7086 				    struct sk_buff *skb, u8 event)
7087 {
7088 	int err = 0;
7089 	u16 txseq = control->txseq;
7090 	bool skb_in_use = false;
7091 
7092 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7093 	       event);
7094 
7095 	switch (event) {
7096 	case L2CAP_EV_RECV_IFRAME:
7097 		switch (l2cap_classify_txseq(chan, txseq)) {
7098 		case L2CAP_TXSEQ_EXPECTED:
7099 			/* Keep frame for reassembly later */
7100 			l2cap_pass_to_tx(chan, control);
7101 			skb_queue_tail(&chan->srej_q, skb);
7102 			skb_in_use = true;
7103 			BT_DBG("Queued %p (queue len %d)", skb,
7104 			       skb_queue_len(&chan->srej_q));
7105 
7106 			chan->expected_tx_seq = __next_seq(chan, txseq);
7107 			break;
7108 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7109 			l2cap_seq_list_pop(&chan->srej_list);
7110 
7111 			l2cap_pass_to_tx(chan, control);
7112 			skb_queue_tail(&chan->srej_q, skb);
7113 			skb_in_use = true;
7114 			BT_DBG("Queued %p (queue len %d)", skb,
7115 			       skb_queue_len(&chan->srej_q));
7116 
7117 			err = l2cap_rx_queued_iframes(chan);
7118 			if (err)
7119 				break;
7120 
7121 			break;
7122 		case L2CAP_TXSEQ_UNEXPECTED:
7123 			/* Got a frame that can't be reassembled yet.
7124 			 * Save it for later, and send SREJs to cover
7125 			 * the missing frames.
7126 			 */
7127 			skb_queue_tail(&chan->srej_q, skb);
7128 			skb_in_use = true;
7129 			BT_DBG("Queued %p (queue len %d)", skb,
7130 			       skb_queue_len(&chan->srej_q));
7131 
7132 			l2cap_pass_to_tx(chan, control);
7133 			l2cap_send_srej(chan, control->txseq);
7134 			break;
7135 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7136 			/* This frame was requested with an SREJ, but
7137 			 * some expected retransmitted frames are
7138 			 * missing.  Request retransmission of missing
7139 			 * SREJ'd frames.
7140 			 */
7141 			skb_queue_tail(&chan->srej_q, skb);
7142 			skb_in_use = true;
7143 			BT_DBG("Queued %p (queue len %d)", skb,
7144 			       skb_queue_len(&chan->srej_q));
7145 
7146 			l2cap_pass_to_tx(chan, control);
7147 			l2cap_send_srej_list(chan, control->txseq);
7148 			break;
7149 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7150 			/* We've already queued this frame.  Drop this copy. */
7151 			l2cap_pass_to_tx(chan, control);
7152 			break;
7153 		case L2CAP_TXSEQ_DUPLICATE:
7154 			/* Expecting a later sequence number, so this frame
7155 			 * was already received.  Ignore it completely.
7156 			 */
7157 			break;
7158 		case L2CAP_TXSEQ_INVALID_IGNORE:
7159 			break;
7160 		case L2CAP_TXSEQ_INVALID:
7161 		default:
7162 			l2cap_send_disconn_req(chan, ECONNRESET);
7163 			break;
7164 		}
7165 		break;
7166 	case L2CAP_EV_RECV_RR:
7167 		l2cap_pass_to_tx(chan, control);
7168 		if (control->final) {
7169 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7170 
7171 			if (!test_and_clear_bit(CONN_REJ_ACT,
7172 						&chan->conn_state)) {
7173 				control->final = 0;
7174 				l2cap_retransmit_all(chan, control);
7175 			}
7176 
7177 			l2cap_ertm_send(chan);
7178 		} else if (control->poll) {
7179 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7180 					       &chan->conn_state) &&
7181 			    chan->unacked_frames) {
7182 				__set_retrans_timer(chan);
7183 			}
7184 
7185 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7186 			l2cap_send_srej_tail(chan);
7187 		} else {
7188 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7189 					       &chan->conn_state) &&
7190 			    chan->unacked_frames)
7191 				__set_retrans_timer(chan);
7192 
7193 			l2cap_send_ack(chan);
7194 		}
7195 		break;
7196 	case L2CAP_EV_RECV_RNR:
7197 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7198 		l2cap_pass_to_tx(chan, control);
7199 		if (control->poll) {
7200 			l2cap_send_srej_tail(chan);
7201 		} else {
7202 			struct l2cap_ctrl rr_control;
7203 			memset(&rr_control, 0, sizeof(rr_control));
7204 			rr_control.sframe = 1;
7205 			rr_control.super = L2CAP_SUPER_RR;
7206 			rr_control.reqseq = chan->buffer_seq;
7207 			l2cap_send_sframe(chan, &rr_control);
7208 		}
7209 
7210 		break;
7211 	case L2CAP_EV_RECV_REJ:
7212 		l2cap_handle_rej(chan, control);
7213 		break;
7214 	case L2CAP_EV_RECV_SREJ:
7215 		l2cap_handle_srej(chan, control);
7216 		break;
7217 	}
7218 
7219 	if (skb && !skb_in_use) {
7220 		BT_DBG("Freeing %p", skb);
7221 		kfree_skb(skb);
7222 	}
7223 
7224 	return err;
7225 }
7226 
l2cap_finish_move(struct l2cap_chan * chan)7227 static int l2cap_finish_move(struct l2cap_chan *chan)
7228 {
7229 	BT_DBG("chan %p", chan);
7230 
7231 	chan->rx_state = L2CAP_RX_STATE_RECV;
7232 
7233 	if (chan->hs_hcon)
7234 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7235 	else
7236 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7237 
7238 	return l2cap_resegment(chan);
7239 }
7240 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7241 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7242 				 struct l2cap_ctrl *control,
7243 				 struct sk_buff *skb, u8 event)
7244 {
7245 	int err;
7246 
7247 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7248 	       event);
7249 
7250 	if (!control->poll)
7251 		return -EPROTO;
7252 
7253 	l2cap_process_reqseq(chan, control->reqseq);
7254 
7255 	if (!skb_queue_empty(&chan->tx_q))
7256 		chan->tx_send_head = skb_peek(&chan->tx_q);
7257 	else
7258 		chan->tx_send_head = NULL;
7259 
7260 	/* Rewind next_tx_seq to the point expected
7261 	 * by the receiver.
7262 	 */
7263 	chan->next_tx_seq = control->reqseq;
7264 	chan->unacked_frames = 0;
7265 
7266 	err = l2cap_finish_move(chan);
7267 	if (err)
7268 		return err;
7269 
7270 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7271 	l2cap_send_i_or_rr_or_rnr(chan);
7272 
7273 	if (event == L2CAP_EV_RECV_IFRAME)
7274 		return -EPROTO;
7275 
7276 	return l2cap_rx_state_recv(chan, control, NULL, event);
7277 }
7278 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7279 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7280 				 struct l2cap_ctrl *control,
7281 				 struct sk_buff *skb, u8 event)
7282 {
7283 	int err;
7284 
7285 	if (!control->final)
7286 		return -EPROTO;
7287 
7288 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7289 
7290 	chan->rx_state = L2CAP_RX_STATE_RECV;
7291 	l2cap_process_reqseq(chan, control->reqseq);
7292 
7293 	if (!skb_queue_empty(&chan->tx_q))
7294 		chan->tx_send_head = skb_peek(&chan->tx_q);
7295 	else
7296 		chan->tx_send_head = NULL;
7297 
7298 	/* Rewind next_tx_seq to the point expected
7299 	 * by the receiver.
7300 	 */
7301 	chan->next_tx_seq = control->reqseq;
7302 	chan->unacked_frames = 0;
7303 
7304 	if (chan->hs_hcon)
7305 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7306 	else
7307 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7308 
7309 	err = l2cap_resegment(chan);
7310 
7311 	if (!err)
7312 		err = l2cap_rx_state_recv(chan, control, skb, event);
7313 
7314 	return err;
7315 }
7316 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7317 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7318 {
7319 	/* Make sure reqseq is for a packet that has been sent but not acked */
7320 	u16 unacked;
7321 
7322 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7323 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7324 }
7325 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7326 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7327 		    struct sk_buff *skb, u8 event)
7328 {
7329 	int err = 0;
7330 
7331 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7332 	       control, skb, event, chan->rx_state);
7333 
7334 	if (__valid_reqseq(chan, control->reqseq)) {
7335 		switch (chan->rx_state) {
7336 		case L2CAP_RX_STATE_RECV:
7337 			err = l2cap_rx_state_recv(chan, control, skb, event);
7338 			break;
7339 		case L2CAP_RX_STATE_SREJ_SENT:
7340 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7341 						       event);
7342 			break;
7343 		case L2CAP_RX_STATE_WAIT_P:
7344 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7345 			break;
7346 		case L2CAP_RX_STATE_WAIT_F:
7347 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7348 			break;
7349 		default:
7350 			/* shut it down */
7351 			break;
7352 		}
7353 	} else {
7354 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7355 		       control->reqseq, chan->next_tx_seq,
7356 		       chan->expected_ack_seq);
7357 		l2cap_send_disconn_req(chan, ECONNRESET);
7358 	}
7359 
7360 	return err;
7361 }
7362 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7363 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7364 			   struct sk_buff *skb)
7365 {
7366 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7367 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7368 	 * returns and to avoid the race condition, for example:
7369 	 *
7370 	 * The current thread calls:
7371 	 *   l2cap_reassemble_sdu
7372 	 *     chan->ops->recv == l2cap_sock_recv_cb
7373 	 *       __sock_queue_rcv_skb
7374 	 * Another thread calls:
7375 	 *   bt_sock_recvmsg
7376 	 *     skb_recv_datagram
7377 	 *     skb_free_datagram
7378 	 * Then the current thread tries to access control, but it was freed by
7379 	 * skb_free_datagram.
7380 	 */
7381 	u16 txseq = control->txseq;
7382 
7383 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7384 	       chan->rx_state);
7385 
7386 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7387 		l2cap_pass_to_tx(chan, control);
7388 
7389 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7390 		       __next_seq(chan, chan->buffer_seq));
7391 
7392 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7393 
7394 		l2cap_reassemble_sdu(chan, skb, control);
7395 	} else {
7396 		if (chan->sdu) {
7397 			kfree_skb(chan->sdu);
7398 			chan->sdu = NULL;
7399 		}
7400 		chan->sdu_last_frag = NULL;
7401 		chan->sdu_len = 0;
7402 
7403 		if (skb) {
7404 			BT_DBG("Freeing %p", skb);
7405 			kfree_skb(skb);
7406 		}
7407 	}
7408 
7409 	chan->last_acked_seq = txseq;
7410 	chan->expected_tx_seq = __next_seq(chan, txseq);
7411 
7412 	return 0;
7413 }
7414 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7415 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7416 {
7417 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7418 	u16 len;
7419 	u8 event;
7420 
7421 	__unpack_control(chan, skb);
7422 
7423 	len = skb->len;
7424 
7425 	/*
7426 	 * We can just drop the corrupted I-frame here.
7427 	 * Receiver will miss it and start proper recovery
7428 	 * procedures and ask for retransmission.
7429 	 */
7430 	if (l2cap_check_fcs(chan, skb))
7431 		goto drop;
7432 
7433 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7434 		len -= L2CAP_SDULEN_SIZE;
7435 
7436 	if (chan->fcs == L2CAP_FCS_CRC16)
7437 		len -= L2CAP_FCS_SIZE;
7438 
7439 	if (len > chan->mps) {
7440 		l2cap_send_disconn_req(chan, ECONNRESET);
7441 		goto drop;
7442 	}
7443 
7444 	if (chan->ops->filter) {
7445 		if (chan->ops->filter(chan, skb))
7446 			goto drop;
7447 	}
7448 
7449 	if (!control->sframe) {
7450 		int err;
7451 
7452 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7453 		       control->sar, control->reqseq, control->final,
7454 		       control->txseq);
7455 
7456 		/* Validate F-bit - F=0 always valid, F=1 only
7457 		 * valid in TX WAIT_F
7458 		 */
7459 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7460 			goto drop;
7461 
7462 		if (chan->mode != L2CAP_MODE_STREAMING) {
7463 			event = L2CAP_EV_RECV_IFRAME;
7464 			err = l2cap_rx(chan, control, skb, event);
7465 		} else {
7466 			err = l2cap_stream_rx(chan, control, skb);
7467 		}
7468 
7469 		if (err)
7470 			l2cap_send_disconn_req(chan, ECONNRESET);
7471 	} else {
7472 		const u8 rx_func_to_event[4] = {
7473 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7474 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7475 		};
7476 
7477 		/* Only I-frames are expected in streaming mode */
7478 		if (chan->mode == L2CAP_MODE_STREAMING)
7479 			goto drop;
7480 
7481 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7482 		       control->reqseq, control->final, control->poll,
7483 		       control->super);
7484 
7485 		if (len != 0) {
7486 			BT_ERR("Trailing bytes: %d in sframe", len);
7487 			l2cap_send_disconn_req(chan, ECONNRESET);
7488 			goto drop;
7489 		}
7490 
7491 		/* Validate F and P bits */
7492 		if (control->final && (control->poll ||
7493 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7494 			goto drop;
7495 
7496 		event = rx_func_to_event[control->super];
7497 		if (l2cap_rx(chan, control, skb, event))
7498 			l2cap_send_disconn_req(chan, ECONNRESET);
7499 	}
7500 
7501 	return 0;
7502 
7503 drop:
7504 	kfree_skb(skb);
7505 	return 0;
7506 }
7507 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7508 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7509 {
7510 	struct l2cap_conn *conn = chan->conn;
7511 	struct l2cap_le_credits pkt;
7512 	u16 return_credits;
7513 
7514 	return_credits = (chan->imtu / chan->mps) + 1;
7515 
7516 	if (chan->rx_credits >= return_credits)
7517 		return;
7518 
7519 	return_credits -= chan->rx_credits;
7520 
7521 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7522 
7523 	chan->rx_credits += return_credits;
7524 
7525 	pkt.cid     = cpu_to_le16(chan->scid);
7526 	pkt.credits = cpu_to_le16(return_credits);
7527 
7528 	chan->ident = l2cap_get_ident(conn);
7529 
7530 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7531 }
7532 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7533 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7534 {
7535 	int err;
7536 
7537 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7538 
7539 	/* Wait recv to confirm reception before updating the credits */
7540 	err = chan->ops->recv(chan, skb);
7541 
7542 	/* Update credits whenever an SDU is received */
7543 	l2cap_chan_le_send_credits(chan);
7544 
7545 	return err;
7546 }
7547 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7548 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7549 {
7550 	int err;
7551 
7552 	if (!chan->rx_credits) {
7553 		BT_ERR("No credits to receive LE L2CAP data");
7554 		l2cap_send_disconn_req(chan, ECONNRESET);
7555 		return -ENOBUFS;
7556 	}
7557 
7558 	if (chan->imtu < skb->len) {
7559 		BT_ERR("Too big LE L2CAP PDU");
7560 		return -ENOBUFS;
7561 	}
7562 
7563 	chan->rx_credits--;
7564 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7565 
7566 	/* Update if remote had run out of credits, this should only happens
7567 	 * if the remote is not using the entire MPS.
7568 	 */
7569 	if (!chan->rx_credits)
7570 		l2cap_chan_le_send_credits(chan);
7571 
7572 	err = 0;
7573 
7574 	if (!chan->sdu) {
7575 		u16 sdu_len;
7576 
7577 		sdu_len = get_unaligned_le16(skb->data);
7578 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7579 
7580 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7581 		       sdu_len, skb->len, chan->imtu);
7582 
7583 		if (sdu_len > chan->imtu) {
7584 			BT_ERR("Too big LE L2CAP SDU length received");
7585 			err = -EMSGSIZE;
7586 			goto failed;
7587 		}
7588 
7589 		if (skb->len > sdu_len) {
7590 			BT_ERR("Too much LE L2CAP data received");
7591 			err = -EINVAL;
7592 			goto failed;
7593 		}
7594 
7595 		if (skb->len == sdu_len)
7596 			return l2cap_ecred_recv(chan, skb);
7597 
7598 		chan->sdu = skb;
7599 		chan->sdu_len = sdu_len;
7600 		chan->sdu_last_frag = skb;
7601 
7602 		/* Detect if remote is not able to use the selected MPS */
7603 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7604 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7605 
7606 			/* Adjust the number of credits */
7607 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7608 			chan->mps = mps_len;
7609 			l2cap_chan_le_send_credits(chan);
7610 		}
7611 
7612 		return 0;
7613 	}
7614 
7615 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7616 	       chan->sdu->len, skb->len, chan->sdu_len);
7617 
7618 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7619 		BT_ERR("Too much LE L2CAP data received");
7620 		err = -EINVAL;
7621 		goto failed;
7622 	}
7623 
7624 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7625 	skb = NULL;
7626 
7627 	if (chan->sdu->len == chan->sdu_len) {
7628 		err = l2cap_ecred_recv(chan, chan->sdu);
7629 		if (!err) {
7630 			chan->sdu = NULL;
7631 			chan->sdu_last_frag = NULL;
7632 			chan->sdu_len = 0;
7633 		}
7634 	}
7635 
7636 failed:
7637 	if (err) {
7638 		kfree_skb(skb);
7639 		kfree_skb(chan->sdu);
7640 		chan->sdu = NULL;
7641 		chan->sdu_last_frag = NULL;
7642 		chan->sdu_len = 0;
7643 	}
7644 
7645 	/* We can't return an error here since we took care of the skb
7646 	 * freeing internally. An error return would cause the caller to
7647 	 * do a double-free of the skb.
7648 	 */
7649 	return 0;
7650 }
7651 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7652 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7653 			       struct sk_buff *skb)
7654 {
7655 	struct l2cap_chan *chan;
7656 
7657 	chan = l2cap_get_chan_by_scid(conn, cid);
7658 	if (!chan) {
7659 		if (cid == L2CAP_CID_A2MP) {
7660 			chan = a2mp_channel_create(conn, skb);
7661 			if (!chan) {
7662 				kfree_skb(skb);
7663 				return;
7664 			}
7665 
7666 			l2cap_chan_hold(chan);
7667 			l2cap_chan_lock(chan);
7668 		} else {
7669 			BT_DBG("unknown cid 0x%4.4x", cid);
7670 			/* Drop packet and return */
7671 			kfree_skb(skb);
7672 			return;
7673 		}
7674 	}
7675 
7676 	BT_DBG("chan %p, len %d", chan, skb->len);
7677 
7678 	/* If we receive data on a fixed channel before the info req/rsp
7679 	 * procedure is done simply assume that the channel is supported
7680 	 * and mark it as ready.
7681 	 */
7682 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7683 		l2cap_chan_ready(chan);
7684 
7685 	if (chan->state != BT_CONNECTED)
7686 		goto drop;
7687 
7688 	switch (chan->mode) {
7689 	case L2CAP_MODE_LE_FLOWCTL:
7690 	case L2CAP_MODE_EXT_FLOWCTL:
7691 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7692 			goto drop;
7693 
7694 		goto done;
7695 
7696 	case L2CAP_MODE_BASIC:
7697 		/* If socket recv buffers overflows we drop data here
7698 		 * which is *bad* because L2CAP has to be reliable.
7699 		 * But we don't have any other choice. L2CAP doesn't
7700 		 * provide flow control mechanism. */
7701 
7702 		if (chan->imtu < skb->len) {
7703 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7704 			goto drop;
7705 		}
7706 
7707 		if (!chan->ops->recv(chan, skb))
7708 			goto done;
7709 		break;
7710 
7711 	case L2CAP_MODE_ERTM:
7712 	case L2CAP_MODE_STREAMING:
7713 		l2cap_data_rcv(chan, skb);
7714 		goto done;
7715 
7716 	default:
7717 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7718 		break;
7719 	}
7720 
7721 drop:
7722 	kfree_skb(skb);
7723 
7724 done:
7725 	l2cap_chan_unlock(chan);
7726 	l2cap_chan_put(chan);
7727 }
7728 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7729 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7730 				  struct sk_buff *skb)
7731 {
7732 	struct hci_conn *hcon = conn->hcon;
7733 	struct l2cap_chan *chan;
7734 
7735 	if (hcon->type != ACL_LINK)
7736 		goto free_skb;
7737 
7738 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7739 					ACL_LINK);
7740 	if (!chan)
7741 		goto free_skb;
7742 
7743 	BT_DBG("chan %p, len %d", chan, skb->len);
7744 
7745 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7746 		goto drop;
7747 
7748 	if (chan->imtu < skb->len)
7749 		goto drop;
7750 
7751 	/* Store remote BD_ADDR and PSM for msg_name */
7752 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7753 	bt_cb(skb)->l2cap.psm = psm;
7754 
7755 	if (!chan->ops->recv(chan, skb)) {
7756 		l2cap_chan_put(chan);
7757 		return;
7758 	}
7759 
7760 drop:
7761 	l2cap_chan_put(chan);
7762 free_skb:
7763 	kfree_skb(skb);
7764 }
7765 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7766 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7767 {
7768 	struct l2cap_hdr *lh = (void *) skb->data;
7769 	struct hci_conn *hcon = conn->hcon;
7770 	u16 cid, len;
7771 	__le16 psm;
7772 
7773 	if (hcon->state != BT_CONNECTED) {
7774 		BT_DBG("queueing pending rx skb");
7775 		skb_queue_tail(&conn->pending_rx, skb);
7776 		return;
7777 	}
7778 
7779 	skb_pull(skb, L2CAP_HDR_SIZE);
7780 	cid = __le16_to_cpu(lh->cid);
7781 	len = __le16_to_cpu(lh->len);
7782 
7783 	if (len != skb->len) {
7784 		kfree_skb(skb);
7785 		return;
7786 	}
7787 
7788 	/* Since we can't actively block incoming LE connections we must
7789 	 * at least ensure that we ignore incoming data from them.
7790 	 */
7791 	if (hcon->type == LE_LINK &&
7792 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7793 				   bdaddr_dst_type(hcon))) {
7794 		kfree_skb(skb);
7795 		return;
7796 	}
7797 
7798 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7799 
7800 	switch (cid) {
7801 	case L2CAP_CID_SIGNALING:
7802 		l2cap_sig_channel(conn, skb);
7803 		break;
7804 
7805 	case L2CAP_CID_CONN_LESS:
7806 		psm = get_unaligned((__le16 *) skb->data);
7807 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7808 		l2cap_conless_channel(conn, psm, skb);
7809 		break;
7810 
7811 	case L2CAP_CID_LE_SIGNALING:
7812 		l2cap_le_sig_channel(conn, skb);
7813 		break;
7814 
7815 	default:
7816 		l2cap_data_channel(conn, cid, skb);
7817 		break;
7818 	}
7819 }
7820 
process_pending_rx(struct work_struct * work)7821 static void process_pending_rx(struct work_struct *work)
7822 {
7823 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7824 					       pending_rx_work);
7825 	struct sk_buff *skb;
7826 
7827 	BT_DBG("");
7828 
7829 	while ((skb = skb_dequeue(&conn->pending_rx)))
7830 		l2cap_recv_frame(conn, skb);
7831 }
7832 
l2cap_conn_add(struct hci_conn * hcon)7833 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7834 {
7835 	struct l2cap_conn *conn = hcon->l2cap_data;
7836 	struct hci_chan *hchan;
7837 
7838 	if (conn)
7839 		return conn;
7840 
7841 	hchan = hci_chan_create(hcon);
7842 	if (!hchan)
7843 		return NULL;
7844 
7845 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7846 	if (!conn) {
7847 		hci_chan_del(hchan);
7848 		return NULL;
7849 	}
7850 
7851 	kref_init(&conn->ref);
7852 	hcon->l2cap_data = conn;
7853 	conn->hcon = hci_conn_get(hcon);
7854 	conn->hchan = hchan;
7855 
7856 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7857 
7858 	switch (hcon->type) {
7859 	case LE_LINK:
7860 		if (hcon->hdev->le_mtu) {
7861 			conn->mtu = hcon->hdev->le_mtu;
7862 			break;
7863 		}
7864 		fallthrough;
7865 	default:
7866 		conn->mtu = hcon->hdev->acl_mtu;
7867 		break;
7868 	}
7869 
7870 	conn->feat_mask = 0;
7871 
7872 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7873 
7874 	if (hcon->type == ACL_LINK &&
7875 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7876 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7877 
7878 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7879 	    (bredr_sc_enabled(hcon->hdev) ||
7880 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7881 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7882 
7883 	mutex_init(&conn->ident_lock);
7884 	mutex_init(&conn->chan_lock);
7885 
7886 	INIT_LIST_HEAD(&conn->chan_l);
7887 	INIT_LIST_HEAD(&conn->users);
7888 
7889 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7890 
7891 	skb_queue_head_init(&conn->pending_rx);
7892 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7893 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7894 
7895 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7896 
7897 	return conn;
7898 }
7899 
is_valid_psm(u16 psm,u8 dst_type)7900 static bool is_valid_psm(u16 psm, u8 dst_type) {
7901 	if (!psm)
7902 		return false;
7903 
7904 	if (bdaddr_type_is_le(dst_type))
7905 		return (psm <= 0x00ff);
7906 
7907 	/* PSM must be odd and lsb of upper byte must be 0 */
7908 	return ((psm & 0x0101) == 0x0001);
7909 }
7910 
7911 struct l2cap_chan_data {
7912 	struct l2cap_chan *chan;
7913 	struct pid *pid;
7914 	int count;
7915 };
7916 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7917 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7918 {
7919 	struct l2cap_chan_data *d = data;
7920 	struct pid *pid;
7921 
7922 	if (chan == d->chan)
7923 		return;
7924 
7925 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7926 		return;
7927 
7928 	pid = chan->ops->get_peer_pid(chan);
7929 
7930 	/* Only count deferred channels with the same PID/PSM */
7931 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7932 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7933 		return;
7934 
7935 	d->count++;
7936 }
7937 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7938 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7939 		       bdaddr_t *dst, u8 dst_type)
7940 {
7941 	struct l2cap_conn *conn;
7942 	struct hci_conn *hcon;
7943 	struct hci_dev *hdev;
7944 	int err;
7945 
7946 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7947 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7948 
7949 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7950 	if (!hdev)
7951 		return -EHOSTUNREACH;
7952 
7953 	hci_dev_lock(hdev);
7954 
7955 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7956 	    chan->chan_type != L2CAP_CHAN_RAW) {
7957 		err = -EINVAL;
7958 		goto done;
7959 	}
7960 
7961 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7962 		err = -EINVAL;
7963 		goto done;
7964 	}
7965 
7966 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7967 		err = -EINVAL;
7968 		goto done;
7969 	}
7970 
7971 	switch (chan->mode) {
7972 	case L2CAP_MODE_BASIC:
7973 		break;
7974 	case L2CAP_MODE_LE_FLOWCTL:
7975 		break;
7976 	case L2CAP_MODE_EXT_FLOWCTL:
7977 		if (!enable_ecred) {
7978 			err = -EOPNOTSUPP;
7979 			goto done;
7980 		}
7981 		break;
7982 	case L2CAP_MODE_ERTM:
7983 	case L2CAP_MODE_STREAMING:
7984 		if (!disable_ertm)
7985 			break;
7986 		fallthrough;
7987 	default:
7988 		err = -EOPNOTSUPP;
7989 		goto done;
7990 	}
7991 
7992 	switch (chan->state) {
7993 	case BT_CONNECT:
7994 	case BT_CONNECT2:
7995 	case BT_CONFIG:
7996 		/* Already connecting */
7997 		err = 0;
7998 		goto done;
7999 
8000 	case BT_CONNECTED:
8001 		/* Already connected */
8002 		err = -EISCONN;
8003 		goto done;
8004 
8005 	case BT_OPEN:
8006 	case BT_BOUND:
8007 		/* Can connect */
8008 		break;
8009 
8010 	default:
8011 		err = -EBADFD;
8012 		goto done;
8013 	}
8014 
8015 	/* Set destination address and psm */
8016 	bacpy(&chan->dst, dst);
8017 	chan->dst_type = dst_type;
8018 
8019 	chan->psm = psm;
8020 	chan->dcid = cid;
8021 
8022 	if (bdaddr_type_is_le(dst_type)) {
8023 		/* Convert from L2CAP channel address type to HCI address type
8024 		 */
8025 		if (dst_type == BDADDR_LE_PUBLIC)
8026 			dst_type = ADDR_LE_DEV_PUBLIC;
8027 		else
8028 			dst_type = ADDR_LE_DEV_RANDOM;
8029 
8030 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8031 			hcon = hci_connect_le(hdev, dst, dst_type,
8032 					      chan->sec_level,
8033 					      HCI_LE_CONN_TIMEOUT,
8034 					      HCI_ROLE_SLAVE, NULL);
8035 		else
8036 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8037 						   chan->sec_level,
8038 						   HCI_LE_CONN_TIMEOUT,
8039 						   CONN_REASON_L2CAP_CHAN);
8040 
8041 	} else {
8042 		u8 auth_type = l2cap_get_auth_type(chan);
8043 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8044 				       CONN_REASON_L2CAP_CHAN);
8045 	}
8046 
8047 	if (IS_ERR(hcon)) {
8048 		err = PTR_ERR(hcon);
8049 		goto done;
8050 	}
8051 
8052 	conn = l2cap_conn_add(hcon);
8053 	if (!conn) {
8054 		hci_conn_drop(hcon);
8055 		err = -ENOMEM;
8056 		goto done;
8057 	}
8058 
8059 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8060 		struct l2cap_chan_data data;
8061 
8062 		data.chan = chan;
8063 		data.pid = chan->ops->get_peer_pid(chan);
8064 		data.count = 1;
8065 
8066 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8067 
8068 		/* Check if there isn't too many channels being connected */
8069 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8070 			hci_conn_drop(hcon);
8071 			err = -EPROTO;
8072 			goto done;
8073 		}
8074 	}
8075 
8076 	mutex_lock(&conn->chan_lock);
8077 	l2cap_chan_lock(chan);
8078 
8079 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8080 		hci_conn_drop(hcon);
8081 		err = -EBUSY;
8082 		goto chan_unlock;
8083 	}
8084 
8085 	/* Update source addr of the socket */
8086 	bacpy(&chan->src, &hcon->src);
8087 	chan->src_type = bdaddr_src_type(hcon);
8088 
8089 	__l2cap_chan_add(conn, chan);
8090 
8091 	/* l2cap_chan_add takes its own ref so we can drop this one */
8092 	hci_conn_drop(hcon);
8093 
8094 	l2cap_state_change(chan, BT_CONNECT);
8095 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8096 
8097 	/* Release chan->sport so that it can be reused by other
8098 	 * sockets (as it's only used for listening sockets).
8099 	 */
8100 	write_lock(&chan_list_lock);
8101 	chan->sport = 0;
8102 	write_unlock(&chan_list_lock);
8103 
8104 	if (hcon->state == BT_CONNECTED) {
8105 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8106 			__clear_chan_timer(chan);
8107 			if (l2cap_chan_check_security(chan, true))
8108 				l2cap_state_change(chan, BT_CONNECTED);
8109 		} else
8110 			l2cap_do_start(chan);
8111 	}
8112 
8113 	err = 0;
8114 
8115 chan_unlock:
8116 	l2cap_chan_unlock(chan);
8117 	mutex_unlock(&conn->chan_lock);
8118 done:
8119 	hci_dev_unlock(hdev);
8120 	hci_dev_put(hdev);
8121 	return err;
8122 }
8123 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8124 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8125 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8126 {
8127 	struct l2cap_conn *conn = chan->conn;
8128 	struct {
8129 		struct l2cap_ecred_reconf_req req;
8130 		__le16 scid;
8131 	} pdu;
8132 
8133 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8134 	pdu.req.mps = cpu_to_le16(chan->mps);
8135 	pdu.scid    = cpu_to_le16(chan->scid);
8136 
8137 	chan->ident = l2cap_get_ident(conn);
8138 
8139 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8140 		       sizeof(pdu), &pdu);
8141 }
8142 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8143 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8144 {
8145 	if (chan->imtu > mtu)
8146 		return -EINVAL;
8147 
8148 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8149 
8150 	chan->imtu = mtu;
8151 
8152 	l2cap_ecred_reconfigure(chan);
8153 
8154 	return 0;
8155 }
8156 
8157 /* ---- L2CAP interface with lower layer (HCI) ---- */
8158 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8159 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8160 {
8161 	int exact = 0, lm1 = 0, lm2 = 0;
8162 	struct l2cap_chan *c;
8163 
8164 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8165 
8166 	/* Find listening sockets and check their link_mode */
8167 	read_lock(&chan_list_lock);
8168 	list_for_each_entry(c, &chan_list, global_l) {
8169 		if (c->state != BT_LISTEN)
8170 			continue;
8171 
8172 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8173 			lm1 |= HCI_LM_ACCEPT;
8174 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8175 				lm1 |= HCI_LM_MASTER;
8176 			exact++;
8177 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8178 			lm2 |= HCI_LM_ACCEPT;
8179 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8180 				lm2 |= HCI_LM_MASTER;
8181 		}
8182 	}
8183 	read_unlock(&chan_list_lock);
8184 
8185 	return exact ? lm1 : lm2;
8186 }
8187 
8188 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8189  * from an existing channel in the list or from the beginning of the
8190  * global list (by passing NULL as first parameter).
8191  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8192 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8193 						  struct hci_conn *hcon)
8194 {
8195 	u8 src_type = bdaddr_src_type(hcon);
8196 
8197 	read_lock(&chan_list_lock);
8198 
8199 	if (c)
8200 		c = list_next_entry(c, global_l);
8201 	else
8202 		c = list_entry(chan_list.next, typeof(*c), global_l);
8203 
8204 	list_for_each_entry_from(c, &chan_list, global_l) {
8205 		if (c->chan_type != L2CAP_CHAN_FIXED)
8206 			continue;
8207 		if (c->state != BT_LISTEN)
8208 			continue;
8209 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8210 			continue;
8211 		if (src_type != c->src_type)
8212 			continue;
8213 
8214 		c = l2cap_chan_hold_unless_zero(c);
8215 		read_unlock(&chan_list_lock);
8216 		return c;
8217 	}
8218 
8219 	read_unlock(&chan_list_lock);
8220 
8221 	return NULL;
8222 }
8223 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8224 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8225 {
8226 	struct hci_dev *hdev = hcon->hdev;
8227 	struct l2cap_conn *conn;
8228 	struct l2cap_chan *pchan;
8229 	u8 dst_type;
8230 
8231 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8232 		return;
8233 
8234 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8235 
8236 	if (status) {
8237 		l2cap_conn_del(hcon, bt_to_errno(status));
8238 		return;
8239 	}
8240 
8241 	conn = l2cap_conn_add(hcon);
8242 	if (!conn)
8243 		return;
8244 
8245 	dst_type = bdaddr_dst_type(hcon);
8246 
8247 	/* If device is blocked, do not create channels for it */
8248 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8249 		return;
8250 
8251 	/* Find fixed channels and notify them of the new connection. We
8252 	 * use multiple individual lookups, continuing each time where
8253 	 * we left off, because the list lock would prevent calling the
8254 	 * potentially sleeping l2cap_chan_lock() function.
8255 	 */
8256 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8257 	while (pchan) {
8258 		struct l2cap_chan *chan, *next;
8259 
8260 		/* Client fixed channels should override server ones */
8261 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8262 			goto next;
8263 
8264 		l2cap_chan_lock(pchan);
8265 		chan = pchan->ops->new_connection(pchan);
8266 		if (chan) {
8267 			bacpy(&chan->src, &hcon->src);
8268 			bacpy(&chan->dst, &hcon->dst);
8269 			chan->src_type = bdaddr_src_type(hcon);
8270 			chan->dst_type = dst_type;
8271 
8272 			__l2cap_chan_add(conn, chan);
8273 		}
8274 
8275 		l2cap_chan_unlock(pchan);
8276 next:
8277 		next = l2cap_global_fixed_chan(pchan, hcon);
8278 		l2cap_chan_put(pchan);
8279 		pchan = next;
8280 	}
8281 
8282 	l2cap_conn_ready(conn);
8283 }
8284 
l2cap_disconn_ind(struct hci_conn * hcon)8285 int l2cap_disconn_ind(struct hci_conn *hcon)
8286 {
8287 	struct l2cap_conn *conn = hcon->l2cap_data;
8288 
8289 	BT_DBG("hcon %p", hcon);
8290 
8291 	if (!conn)
8292 		return HCI_ERROR_REMOTE_USER_TERM;
8293 	return conn->disc_reason;
8294 }
8295 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8296 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8297 {
8298 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8299 		return;
8300 
8301 	BT_DBG("hcon %p reason %d", hcon, reason);
8302 
8303 	l2cap_conn_del(hcon, bt_to_errno(reason));
8304 }
8305 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8306 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8307 {
8308 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8309 		return;
8310 
8311 	if (encrypt == 0x00) {
8312 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8313 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8314 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8315 			   chan->sec_level == BT_SECURITY_FIPS)
8316 			l2cap_chan_close(chan, ECONNREFUSED);
8317 	} else {
8318 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8319 			__clear_chan_timer(chan);
8320 	}
8321 }
8322 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8323 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8324 {
8325 	struct l2cap_conn *conn = hcon->l2cap_data;
8326 	struct l2cap_chan *chan;
8327 
8328 	if (!conn)
8329 		return;
8330 
8331 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8332 
8333 	mutex_lock(&conn->chan_lock);
8334 
8335 	list_for_each_entry(chan, &conn->chan_l, list) {
8336 		l2cap_chan_lock(chan);
8337 
8338 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8339 		       state_to_string(chan->state));
8340 
8341 		if (chan->scid == L2CAP_CID_A2MP) {
8342 			l2cap_chan_unlock(chan);
8343 			continue;
8344 		}
8345 
8346 		if (!status && encrypt)
8347 			chan->sec_level = hcon->sec_level;
8348 
8349 		if (!__l2cap_no_conn_pending(chan)) {
8350 			l2cap_chan_unlock(chan);
8351 			continue;
8352 		}
8353 
8354 		if (!status && (chan->state == BT_CONNECTED ||
8355 				chan->state == BT_CONFIG)) {
8356 			chan->ops->resume(chan);
8357 			l2cap_check_encryption(chan, encrypt);
8358 			l2cap_chan_unlock(chan);
8359 			continue;
8360 		}
8361 
8362 		if (chan->state == BT_CONNECT) {
8363 			if (!status && l2cap_check_enc_key_size(hcon))
8364 				l2cap_start_connection(chan);
8365 			else
8366 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8367 		} else if (chan->state == BT_CONNECT2 &&
8368 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8369 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8370 			struct l2cap_conn_rsp rsp;
8371 			__u16 res, stat;
8372 
8373 			if (!status && l2cap_check_enc_key_size(hcon)) {
8374 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8375 					res = L2CAP_CR_PEND;
8376 					stat = L2CAP_CS_AUTHOR_PEND;
8377 					chan->ops->defer(chan);
8378 				} else {
8379 					l2cap_state_change(chan, BT_CONFIG);
8380 					res = L2CAP_CR_SUCCESS;
8381 					stat = L2CAP_CS_NO_INFO;
8382 				}
8383 			} else {
8384 				l2cap_state_change(chan, BT_DISCONN);
8385 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8386 				res = L2CAP_CR_SEC_BLOCK;
8387 				stat = L2CAP_CS_NO_INFO;
8388 			}
8389 
8390 			rsp.scid   = cpu_to_le16(chan->dcid);
8391 			rsp.dcid   = cpu_to_le16(chan->scid);
8392 			rsp.result = cpu_to_le16(res);
8393 			rsp.status = cpu_to_le16(stat);
8394 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8395 				       sizeof(rsp), &rsp);
8396 
8397 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8398 			    res == L2CAP_CR_SUCCESS) {
8399 				char buf[128];
8400 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8401 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8402 					       L2CAP_CONF_REQ,
8403 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8404 					       buf);
8405 				chan->num_conf_req++;
8406 			}
8407 		}
8408 
8409 		l2cap_chan_unlock(chan);
8410 	}
8411 
8412 	mutex_unlock(&conn->chan_lock);
8413 }
8414 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8415 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8416 {
8417 	struct l2cap_conn *conn = hcon->l2cap_data;
8418 	struct l2cap_hdr *hdr;
8419 	int len;
8420 
8421 	/* For AMP controller do not create l2cap conn */
8422 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8423 		goto drop;
8424 
8425 	if (!conn)
8426 		conn = l2cap_conn_add(hcon);
8427 
8428 	if (!conn)
8429 		goto drop;
8430 
8431 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8432 
8433 	switch (flags) {
8434 	case ACL_START:
8435 	case ACL_START_NO_FLUSH:
8436 	case ACL_COMPLETE:
8437 		if (conn->rx_len) {
8438 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8439 			kfree_skb(conn->rx_skb);
8440 			conn->rx_skb = NULL;
8441 			conn->rx_len = 0;
8442 			l2cap_conn_unreliable(conn, ECOMM);
8443 		}
8444 
8445 		/* Start fragment always begin with Basic L2CAP header */
8446 		if (skb->len < L2CAP_HDR_SIZE) {
8447 			BT_ERR("Frame is too short (len %d)", skb->len);
8448 			l2cap_conn_unreliable(conn, ECOMM);
8449 			goto drop;
8450 		}
8451 
8452 		hdr = (struct l2cap_hdr *) skb->data;
8453 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8454 
8455 		if (len == skb->len) {
8456 			/* Complete frame received */
8457 			l2cap_recv_frame(conn, skb);
8458 			return;
8459 		}
8460 
8461 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8462 
8463 		if (skb->len > len) {
8464 			BT_ERR("Frame is too long (len %d, expected len %d)",
8465 			       skb->len, len);
8466 			l2cap_conn_unreliable(conn, ECOMM);
8467 			goto drop;
8468 		}
8469 
8470 		/* Allocate skb for the complete frame (with header) */
8471 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8472 		if (!conn->rx_skb)
8473 			goto drop;
8474 
8475 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8476 					  skb->len);
8477 		conn->rx_len = len - skb->len;
8478 		break;
8479 
8480 	case ACL_CONT:
8481 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8482 
8483 		if (!conn->rx_len) {
8484 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8485 			l2cap_conn_unreliable(conn, ECOMM);
8486 			goto drop;
8487 		}
8488 
8489 		if (skb->len > conn->rx_len) {
8490 			BT_ERR("Fragment is too long (len %d, expected %d)",
8491 			       skb->len, conn->rx_len);
8492 			kfree_skb(conn->rx_skb);
8493 			conn->rx_skb = NULL;
8494 			conn->rx_len = 0;
8495 			l2cap_conn_unreliable(conn, ECOMM);
8496 			goto drop;
8497 		}
8498 
8499 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8500 					  skb->len);
8501 		conn->rx_len -= skb->len;
8502 
8503 		if (!conn->rx_len) {
8504 			/* Complete frame received. l2cap_recv_frame
8505 			 * takes ownership of the skb so set the global
8506 			 * rx_skb pointer to NULL first.
8507 			 */
8508 			struct sk_buff *rx_skb = conn->rx_skb;
8509 			conn->rx_skb = NULL;
8510 			l2cap_recv_frame(conn, rx_skb);
8511 		}
8512 		break;
8513 	}
8514 
8515 drop:
8516 	kfree_skb(skb);
8517 }
8518 
8519 static struct hci_cb l2cap_cb = {
8520 	.name		= "L2CAP",
8521 	.connect_cfm	= l2cap_connect_cfm,
8522 	.disconn_cfm	= l2cap_disconn_cfm,
8523 	.security_cfm	= l2cap_security_cfm,
8524 };
8525 
l2cap_debugfs_show(struct seq_file * f,void * p)8526 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8527 {
8528 	struct l2cap_chan *c;
8529 
8530 	read_lock(&chan_list_lock);
8531 
8532 	list_for_each_entry(c, &chan_list, global_l) {
8533 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8534 			   &c->src, c->src_type, &c->dst, c->dst_type,
8535 			   c->state, __le16_to_cpu(c->psm),
8536 			   c->scid, c->dcid, c->imtu, c->omtu,
8537 			   c->sec_level, c->mode);
8538 	}
8539 
8540 	read_unlock(&chan_list_lock);
8541 
8542 	return 0;
8543 }
8544 
8545 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8546 
8547 static struct dentry *l2cap_debugfs;
8548 
l2cap_init(void)8549 int __init l2cap_init(void)
8550 {
8551 	int err;
8552 
8553 	err = l2cap_init_sockets();
8554 	if (err < 0)
8555 		return err;
8556 
8557 	hci_register_cb(&l2cap_cb);
8558 
8559 	if (IS_ERR_OR_NULL(bt_debugfs))
8560 		return 0;
8561 
8562 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8563 					    NULL, &l2cap_debugfs_fops);
8564 
8565 	return 0;
8566 }
8567 
l2cap_exit(void)8568 void l2cap_exit(void)
8569 {
8570 	debugfs_remove(l2cap_debugfs);
8571 	hci_unregister_cb(&l2cap_cb);
8572 	l2cap_cleanup_sockets();
8573 }
8574 
8575 module_param(disable_ertm, bool, 0644);
8576 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8577 
8578 module_param(enable_ecred, bool, 0644);
8579 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8580