• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
bdaddr_type(u8 link_type,u8 bdaddr_type)65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
bdaddr_src_type(struct hci_conn * hcon)77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
bdaddr_dst_type(struct hci_conn * hcon)82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
116 						 u16 cid)
117 {
118 	struct l2cap_chan *c;
119 
120 	mutex_lock(&conn->chan_lock);
121 	c = __l2cap_get_chan_by_scid(conn, cid);
122 	if (c)
123 		l2cap_chan_lock(c);
124 	mutex_unlock(&conn->chan_lock);
125 
126 	return c;
127 }
128 
129 /* Find channel with given DCID.
130  * Returns locked channel.
131  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
133 						 u16 cid)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_dcid(conn, cid);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
147 						    u8 ident)
148 {
149 	struct l2cap_chan *c;
150 
151 	list_for_each_entry(c, &conn->chan_l, list) {
152 		if (c->ident == ident)
153 			return c;
154 	}
155 	return NULL;
156 }
157 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						  u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	mutex_lock(&conn->chan_lock);
164 	c = __l2cap_get_chan_by_ident(conn, ident);
165 	if (c)
166 		l2cap_chan_lock(c);
167 	mutex_unlock(&conn->chan_lock);
168 
169 	return c;
170 }
171 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
173 						      u8 src_type)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
179 			continue;
180 
181 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
182 			continue;
183 
184 		if (c->sport == psm && !bacmp(&c->src, src))
185 			return c;
186 	}
187 	return NULL;
188 }
189 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 {
192 	int err;
193 
194 	write_lock(&chan_list_lock);
195 
196 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
197 		err = -EADDRINUSE;
198 		goto done;
199 	}
200 
201 	if (psm) {
202 		chan->psm = psm;
203 		chan->sport = psm;
204 		err = 0;
205 	} else {
206 		u16 p, start, end, incr;
207 
208 		if (chan->src_type == BDADDR_BREDR) {
209 			start = L2CAP_PSM_DYN_START;
210 			end = L2CAP_PSM_AUTO_END;
211 			incr = 2;
212 		} else {
213 			start = L2CAP_PSM_LE_DYN_START;
214 			end = L2CAP_PSM_LE_DYN_END;
215 			incr = 1;
216 		}
217 
218 		err = -EINVAL;
219 		for (p = start; p <= end; p += incr)
220 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 							 chan->src_type)) {
222 				chan->psm   = cpu_to_le16(p);
223 				chan->sport = cpu_to_le16(p);
224 				err = 0;
225 				break;
226 			}
227 	}
228 
229 done:
230 	write_unlock(&chan_list_lock);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)235 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
236 {
237 	write_lock(&chan_list_lock);
238 
239 	/* Override the defaults (which are for conn-oriented) */
240 	chan->omtu = L2CAP_DEFAULT_MTU;
241 	chan->chan_type = L2CAP_CHAN_FIXED;
242 
243 	chan->scid = scid;
244 
245 	write_unlock(&chan_list_lock);
246 
247 	return 0;
248 }
249 
l2cap_alloc_cid(struct l2cap_conn * conn)250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 {
252 	u16 cid, dyn_end;
253 
254 	if (conn->hcon->type == LE_LINK)
255 		dyn_end = L2CAP_CID_LE_DYN_END;
256 	else
257 		dyn_end = L2CAP_CID_DYN_END;
258 
259 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 		if (!__l2cap_get_chan_by_scid(conn, cid))
261 			return cid;
262 	}
263 
264 	return 0;
265 }
266 
l2cap_state_change(struct l2cap_chan * chan,int state)267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 {
269 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 	       state_to_string(state));
271 
272 	chan->state = state;
273 	chan->ops->state_change(chan, state, 0);
274 }
275 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
277 						int state, int err)
278 {
279 	chan->state = state;
280 	chan->ops->state_change(chan, chan->state, err);
281 }
282 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 {
285 	chan->ops->state_change(chan, chan->state, err);
286 }
287 
__set_retrans_timer(struct l2cap_chan * chan)288 static void __set_retrans_timer(struct l2cap_chan *chan)
289 {
290 	if (!delayed_work_pending(&chan->monitor_timer) &&
291 	    chan->retrans_timeout) {
292 		l2cap_set_timer(chan, &chan->retrans_timer,
293 				msecs_to_jiffies(chan->retrans_timeout));
294 	}
295 }
296 
__set_monitor_timer(struct l2cap_chan * chan)297 static void __set_monitor_timer(struct l2cap_chan *chan)
298 {
299 	__clear_retrans_timer(chan);
300 	if (chan->monitor_timeout) {
301 		l2cap_set_timer(chan, &chan->monitor_timer,
302 				msecs_to_jiffies(chan->monitor_timeout));
303 	}
304 }
305 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 					       u16 seq)
308 {
309 	struct sk_buff *skb;
310 
311 	skb_queue_walk(head, skb) {
312 		if (bt_cb(skb)->l2cap.txseq == seq)
313 			return skb;
314 	}
315 
316 	return NULL;
317 }
318 
319 /* ---- L2CAP sequence number lists ---- */
320 
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322  * SREJ requests that are received and for frames that are to be
323  * retransmitted. These seq_list functions implement a singly-linked
324  * list in an array, where membership in the list can also be checked
325  * in constant time. Items can also be added to the tail of the list
326  * and removed from the head in constant time, without further memory
327  * allocs or frees.
328  */
329 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 {
332 	size_t alloc_size, i;
333 
334 	/* Allocated size is a power of 2 to map sequence numbers
335 	 * (which may be up to 14 bits) in to a smaller array that is
336 	 * sized for the negotiated ERTM transmit windows.
337 	 */
338 	alloc_size = roundup_pow_of_two(size);
339 
340 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
341 	if (!seq_list->list)
342 		return -ENOMEM;
343 
344 	seq_list->mask = alloc_size - 1;
345 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 	for (i = 0; i < alloc_size; i++)
348 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 
350 	return 0;
351 }
352 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 {
355 	kfree(seq_list->list);
356 }
357 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
359 					   u16 seq)
360 {
361 	/* Constant-time check for list membership */
362 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
363 }
364 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 {
367 	u16 seq = seq_list->head;
368 	u16 mask = seq_list->mask;
369 
370 	seq_list->head = seq_list->list[seq & mask];
371 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 
373 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 	}
377 
378 	return seq;
379 }
380 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 	u16 i;
384 
385 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 		return;
387 
388 	for (i = 0; i <= seq_list->mask; i++)
389 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 
391 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 	u16 mask = seq_list->mask;
398 
399 	/* All appends happen in constant time */
400 
401 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 		seq_list->head = seq;
406 	else
407 		seq_list->list[seq_list->tail & mask] = seq;
408 
409 	seq_list->tail = seq;
410 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412 
l2cap_chan_timeout(struct work_struct * work)413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 					       chan_timer.work);
417 	struct l2cap_conn *conn = chan->conn;
418 	int reason;
419 
420 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 
422 	mutex_lock(&conn->chan_lock);
423 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 	 * this work. No need to call l2cap_chan_hold(chan) here again.
425 	 */
426 	l2cap_chan_lock(chan);
427 
428 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 		reason = ECONNREFUSED;
430 	else if (chan->state == BT_CONNECT &&
431 		 chan->sec_level != BT_SECURITY_SDP)
432 		reason = ECONNREFUSED;
433 	else
434 		reason = ETIMEDOUT;
435 
436 	l2cap_chan_close(chan, reason);
437 
438 	chan->ops->close(chan);
439 
440 	l2cap_chan_unlock(chan);
441 	l2cap_chan_put(chan);
442 
443 	mutex_unlock(&conn->chan_lock);
444 }
445 
l2cap_chan_create(void)446 struct l2cap_chan *l2cap_chan_create(void)
447 {
448 	struct l2cap_chan *chan;
449 
450 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 	if (!chan)
452 		return NULL;
453 
454 	skb_queue_head_init(&chan->tx_q);
455 	skb_queue_head_init(&chan->srej_q);
456 	mutex_init(&chan->lock);
457 
458 	/* Set default lock nesting level */
459 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
460 
461 	write_lock(&chan_list_lock);
462 	list_add(&chan->global_l, &chan_list);
463 	write_unlock(&chan_list_lock);
464 
465 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
466 
467 	chan->state = BT_OPEN;
468 
469 	kref_init(&chan->kref);
470 
471 	/* This flag is cleared in l2cap_chan_ready() */
472 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
473 
474 	BT_DBG("chan %p", chan);
475 
476 	return chan;
477 }
478 EXPORT_SYMBOL_GPL(l2cap_chan_create);
479 
l2cap_chan_destroy(struct kref * kref)480 static void l2cap_chan_destroy(struct kref *kref)
481 {
482 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
483 
484 	BT_DBG("chan %p", chan);
485 
486 	write_lock(&chan_list_lock);
487 	list_del(&chan->global_l);
488 	write_unlock(&chan_list_lock);
489 
490 	kfree(chan);
491 }
492 
l2cap_chan_hold(struct l2cap_chan * c)493 void l2cap_chan_hold(struct l2cap_chan *c)
494 {
495 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
496 
497 	kref_get(&c->kref);
498 }
499 
l2cap_chan_put(struct l2cap_chan * c)500 void l2cap_chan_put(struct l2cap_chan *c)
501 {
502 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
503 
504 	kref_put(&c->kref, l2cap_chan_destroy);
505 }
506 EXPORT_SYMBOL_GPL(l2cap_chan_put);
507 
l2cap_chan_set_defaults(struct l2cap_chan * chan)508 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
509 {
510 	chan->fcs  = L2CAP_FCS_CRC16;
511 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
512 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
513 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
514 	chan->remote_max_tx = chan->max_tx;
515 	chan->remote_tx_win = chan->tx_win;
516 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
517 	chan->sec_level = BT_SECURITY_LOW;
518 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
519 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
520 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
521 
522 	chan->conf_state = 0;
523 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
524 
525 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
526 }
527 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
528 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)529 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
530 {
531 	chan->sdu = NULL;
532 	chan->sdu_last_frag = NULL;
533 	chan->sdu_len = 0;
534 	chan->tx_credits = tx_credits;
535 	/* Derive MPS from connection MTU to stop HCI fragmentation */
536 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
537 	/* Give enough credits for a full packet */
538 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
539 
540 	skb_queue_head_init(&chan->tx_q);
541 }
542 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)543 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
544 {
545 	l2cap_le_flowctl_init(chan, tx_credits);
546 
547 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
548 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
549 		chan->mps = L2CAP_ECRED_MIN_MPS;
550 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
551 	}
552 }
553 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)554 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 {
556 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
557 	       __le16_to_cpu(chan->psm), chan->dcid);
558 
559 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
560 
561 	chan->conn = conn;
562 
563 	switch (chan->chan_type) {
564 	case L2CAP_CHAN_CONN_ORIENTED:
565 		/* Alloc CID for connection-oriented socket */
566 		chan->scid = l2cap_alloc_cid(conn);
567 		if (conn->hcon->type == ACL_LINK)
568 			chan->omtu = L2CAP_DEFAULT_MTU;
569 		break;
570 
571 	case L2CAP_CHAN_CONN_LESS:
572 		/* Connectionless socket */
573 		chan->scid = L2CAP_CID_CONN_LESS;
574 		chan->dcid = L2CAP_CID_CONN_LESS;
575 		chan->omtu = L2CAP_DEFAULT_MTU;
576 		break;
577 
578 	case L2CAP_CHAN_FIXED:
579 		/* Caller will set CID and CID specific MTU values */
580 		break;
581 
582 	default:
583 		/* Raw socket can send/recv signalling messages only */
584 		chan->scid = L2CAP_CID_SIGNALING;
585 		chan->dcid = L2CAP_CID_SIGNALING;
586 		chan->omtu = L2CAP_DEFAULT_MTU;
587 	}
588 
589 	chan->local_id		= L2CAP_BESTEFFORT_ID;
590 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
591 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
592 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
593 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
594 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
595 
596 	l2cap_chan_hold(chan);
597 
598 	/* Only keep a reference for fixed channels if they requested it */
599 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
600 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
601 		hci_conn_hold(conn->hcon);
602 
603 	list_add(&chan->list, &conn->chan_l);
604 }
605 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)606 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
607 {
608 	mutex_lock(&conn->chan_lock);
609 	__l2cap_chan_add(conn, chan);
610 	mutex_unlock(&conn->chan_lock);
611 }
612 
l2cap_chan_del(struct l2cap_chan * chan,int err)613 void l2cap_chan_del(struct l2cap_chan *chan, int err)
614 {
615 	struct l2cap_conn *conn = chan->conn;
616 
617 	__clear_chan_timer(chan);
618 
619 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
620 	       state_to_string(chan->state));
621 
622 	chan->ops->teardown(chan, err);
623 
624 	if (conn) {
625 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
626 		/* Delete from channel list */
627 		list_del(&chan->list);
628 
629 		l2cap_chan_put(chan);
630 
631 		chan->conn = NULL;
632 
633 		/* Reference was only held for non-fixed channels or
634 		 * fixed channels that explicitly requested it using the
635 		 * FLAG_HOLD_HCI_CONN flag.
636 		 */
637 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
638 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
639 			hci_conn_drop(conn->hcon);
640 
641 		if (mgr && mgr->bredr_chan == chan)
642 			mgr->bredr_chan = NULL;
643 	}
644 
645 	if (chan->hs_hchan) {
646 		struct hci_chan *hs_hchan = chan->hs_hchan;
647 
648 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
649 		amp_disconnect_logical_link(hs_hchan);
650 	}
651 
652 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
653 		return;
654 
655 	switch(chan->mode) {
656 	case L2CAP_MODE_BASIC:
657 		break;
658 
659 	case L2CAP_MODE_LE_FLOWCTL:
660 	case L2CAP_MODE_EXT_FLOWCTL:
661 		skb_queue_purge(&chan->tx_q);
662 		break;
663 
664 	case L2CAP_MODE_ERTM:
665 		__clear_retrans_timer(chan);
666 		__clear_monitor_timer(chan);
667 		__clear_ack_timer(chan);
668 
669 		skb_queue_purge(&chan->srej_q);
670 
671 		l2cap_seq_list_free(&chan->srej_list);
672 		l2cap_seq_list_free(&chan->retrans_list);
673 		fallthrough;
674 
675 	case L2CAP_MODE_STREAMING:
676 		skb_queue_purge(&chan->tx_q);
677 		break;
678 	}
679 
680 	return;
681 }
682 EXPORT_SYMBOL_GPL(l2cap_chan_del);
683 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)684 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
685 			      void *data)
686 {
687 	struct l2cap_chan *chan;
688 
689 	list_for_each_entry(chan, &conn->chan_l, list) {
690 		func(chan, data);
691 	}
692 }
693 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)694 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
695 		     void *data)
696 {
697 	if (!conn)
698 		return;
699 
700 	mutex_lock(&conn->chan_lock);
701 	__l2cap_chan_list(conn, func, data);
702 	mutex_unlock(&conn->chan_lock);
703 }
704 
705 EXPORT_SYMBOL_GPL(l2cap_chan_list);
706 
l2cap_conn_update_id_addr(struct work_struct * work)707 static void l2cap_conn_update_id_addr(struct work_struct *work)
708 {
709 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
710 					       id_addr_update_work);
711 	struct hci_conn *hcon = conn->hcon;
712 	struct l2cap_chan *chan;
713 
714 	mutex_lock(&conn->chan_lock);
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		l2cap_chan_lock(chan);
718 		bacpy(&chan->dst, &hcon->dst);
719 		chan->dst_type = bdaddr_dst_type(hcon);
720 		l2cap_chan_unlock(chan);
721 	}
722 
723 	mutex_unlock(&conn->chan_lock);
724 }
725 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)726 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
727 {
728 	struct l2cap_conn *conn = chan->conn;
729 	struct l2cap_le_conn_rsp rsp;
730 	u16 result;
731 
732 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
733 		result = L2CAP_CR_LE_AUTHORIZATION;
734 	else
735 		result = L2CAP_CR_LE_BAD_PSM;
736 
737 	l2cap_state_change(chan, BT_DISCONN);
738 
739 	rsp.dcid    = cpu_to_le16(chan->scid);
740 	rsp.mtu     = cpu_to_le16(chan->imtu);
741 	rsp.mps     = cpu_to_le16(chan->mps);
742 	rsp.credits = cpu_to_le16(chan->rx_credits);
743 	rsp.result  = cpu_to_le16(result);
744 
745 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
746 		       &rsp);
747 }
748 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)749 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
750 {
751 	struct l2cap_conn *conn = chan->conn;
752 	struct l2cap_ecred_conn_rsp rsp;
753 	u16 result;
754 
755 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
756 		result = L2CAP_CR_LE_AUTHORIZATION;
757 	else
758 		result = L2CAP_CR_LE_BAD_PSM;
759 
760 	l2cap_state_change(chan, BT_DISCONN);
761 
762 	memset(&rsp, 0, sizeof(rsp));
763 
764 	rsp.result  = cpu_to_le16(result);
765 
766 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
767 		       &rsp);
768 }
769 
l2cap_chan_connect_reject(struct l2cap_chan * chan)770 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
771 {
772 	struct l2cap_conn *conn = chan->conn;
773 	struct l2cap_conn_rsp rsp;
774 	u16 result;
775 
776 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 		result = L2CAP_CR_SEC_BLOCK;
778 	else
779 		result = L2CAP_CR_BAD_PSM;
780 
781 	l2cap_state_change(chan, BT_DISCONN);
782 
783 	rsp.scid   = cpu_to_le16(chan->dcid);
784 	rsp.dcid   = cpu_to_le16(chan->scid);
785 	rsp.result = cpu_to_le16(result);
786 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
787 
788 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
789 }
790 
l2cap_chan_close(struct l2cap_chan * chan,int reason)791 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
792 {
793 	struct l2cap_conn *conn = chan->conn;
794 
795 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
796 
797 	switch (chan->state) {
798 	case BT_LISTEN:
799 		chan->ops->teardown(chan, 0);
800 		break;
801 
802 	case BT_CONNECTED:
803 	case BT_CONFIG:
804 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
805 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
806 			l2cap_send_disconn_req(chan, reason);
807 		} else
808 			l2cap_chan_del(chan, reason);
809 		break;
810 
811 	case BT_CONNECT2:
812 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
813 			if (conn->hcon->type == ACL_LINK)
814 				l2cap_chan_connect_reject(chan);
815 			else if (conn->hcon->type == LE_LINK) {
816 				switch (chan->mode) {
817 				case L2CAP_MODE_LE_FLOWCTL:
818 					l2cap_chan_le_connect_reject(chan);
819 					break;
820 				case L2CAP_MODE_EXT_FLOWCTL:
821 					l2cap_chan_ecred_connect_reject(chan);
822 					break;
823 				}
824 			}
825 		}
826 
827 		l2cap_chan_del(chan, reason);
828 		break;
829 
830 	case BT_CONNECT:
831 	case BT_DISCONN:
832 		l2cap_chan_del(chan, reason);
833 		break;
834 
835 	default:
836 		chan->ops->teardown(chan, 0);
837 		break;
838 	}
839 }
840 EXPORT_SYMBOL(l2cap_chan_close);
841 
l2cap_get_auth_type(struct l2cap_chan * chan)842 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
843 {
844 	switch (chan->chan_type) {
845 	case L2CAP_CHAN_RAW:
846 		switch (chan->sec_level) {
847 		case BT_SECURITY_HIGH:
848 		case BT_SECURITY_FIPS:
849 			return HCI_AT_DEDICATED_BONDING_MITM;
850 		case BT_SECURITY_MEDIUM:
851 			return HCI_AT_DEDICATED_BONDING;
852 		default:
853 			return HCI_AT_NO_BONDING;
854 		}
855 		break;
856 	case L2CAP_CHAN_CONN_LESS:
857 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
858 			if (chan->sec_level == BT_SECURITY_LOW)
859 				chan->sec_level = BT_SECURITY_SDP;
860 		}
861 		if (chan->sec_level == BT_SECURITY_HIGH ||
862 		    chan->sec_level == BT_SECURITY_FIPS)
863 			return HCI_AT_NO_BONDING_MITM;
864 		else
865 			return HCI_AT_NO_BONDING;
866 		break;
867 	case L2CAP_CHAN_CONN_ORIENTED:
868 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
869 			if (chan->sec_level == BT_SECURITY_LOW)
870 				chan->sec_level = BT_SECURITY_SDP;
871 
872 			if (chan->sec_level == BT_SECURITY_HIGH ||
873 			    chan->sec_level == BT_SECURITY_FIPS)
874 				return HCI_AT_NO_BONDING_MITM;
875 			else
876 				return HCI_AT_NO_BONDING;
877 		}
878 		fallthrough;
879 
880 	default:
881 		switch (chan->sec_level) {
882 		case BT_SECURITY_HIGH:
883 		case BT_SECURITY_FIPS:
884 			return HCI_AT_GENERAL_BONDING_MITM;
885 		case BT_SECURITY_MEDIUM:
886 			return HCI_AT_GENERAL_BONDING;
887 		default:
888 			return HCI_AT_NO_BONDING;
889 		}
890 		break;
891 	}
892 }
893 
894 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)895 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
896 {
897 	struct l2cap_conn *conn = chan->conn;
898 	__u8 auth_type;
899 
900 	if (conn->hcon->type == LE_LINK)
901 		return smp_conn_security(conn->hcon, chan->sec_level);
902 
903 	auth_type = l2cap_get_auth_type(chan);
904 
905 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
906 				 initiator);
907 }
908 
l2cap_get_ident(struct l2cap_conn * conn)909 static u8 l2cap_get_ident(struct l2cap_conn *conn)
910 {
911 	u8 id;
912 
913 	/* Get next available identificator.
914 	 *    1 - 128 are used by kernel.
915 	 *  129 - 199 are reserved.
916 	 *  200 - 254 are used by utilities like l2ping, etc.
917 	 */
918 
919 	mutex_lock(&conn->ident_lock);
920 
921 	if (++conn->tx_ident > 128)
922 		conn->tx_ident = 1;
923 
924 	id = conn->tx_ident;
925 
926 	mutex_unlock(&conn->ident_lock);
927 
928 	return id;
929 }
930 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)931 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
932 			   void *data)
933 {
934 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
935 	u8 flags;
936 
937 	BT_DBG("code 0x%2.2x", code);
938 
939 	if (!skb)
940 		return;
941 
942 	/* Use NO_FLUSH if supported or we have an LE link (which does
943 	 * not support auto-flushing packets) */
944 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
945 	    conn->hcon->type == LE_LINK)
946 		flags = ACL_START_NO_FLUSH;
947 	else
948 		flags = ACL_START;
949 
950 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
951 	skb->priority = HCI_PRIO_MAX;
952 
953 	hci_send_acl(conn->hchan, skb, flags);
954 }
955 
__chan_is_moving(struct l2cap_chan * chan)956 static bool __chan_is_moving(struct l2cap_chan *chan)
957 {
958 	return chan->move_state != L2CAP_MOVE_STABLE &&
959 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
960 }
961 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)962 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
963 {
964 	struct hci_conn *hcon = chan->conn->hcon;
965 	u16 flags;
966 
967 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
968 	       skb->priority);
969 
970 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
971 		if (chan->hs_hchan)
972 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
973 		else
974 			kfree_skb(skb);
975 
976 		return;
977 	}
978 
979 	/* Use NO_FLUSH for LE links (where this is the only option) or
980 	 * if the BR/EDR link supports it and flushing has not been
981 	 * explicitly requested (through FLAG_FLUSHABLE).
982 	 */
983 	if (hcon->type == LE_LINK ||
984 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
985 	     lmp_no_flush_capable(hcon->hdev)))
986 		flags = ACL_START_NO_FLUSH;
987 	else
988 		flags = ACL_START;
989 
990 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
991 	hci_send_acl(chan->conn->hchan, skb, flags);
992 }
993 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)994 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
995 {
996 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
997 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
998 
999 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1000 		/* S-Frame */
1001 		control->sframe = 1;
1002 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1003 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1004 
1005 		control->sar = 0;
1006 		control->txseq = 0;
1007 	} else {
1008 		/* I-Frame */
1009 		control->sframe = 0;
1010 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1011 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1012 
1013 		control->poll = 0;
1014 		control->super = 0;
1015 	}
1016 }
1017 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1018 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1019 {
1020 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1021 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1022 
1023 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1024 		/* S-Frame */
1025 		control->sframe = 1;
1026 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1027 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1028 
1029 		control->sar = 0;
1030 		control->txseq = 0;
1031 	} else {
1032 		/* I-Frame */
1033 		control->sframe = 0;
1034 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1035 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1036 
1037 		control->poll = 0;
1038 		control->super = 0;
1039 	}
1040 }
1041 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1042 static inline void __unpack_control(struct l2cap_chan *chan,
1043 				    struct sk_buff *skb)
1044 {
1045 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1046 		__unpack_extended_control(get_unaligned_le32(skb->data),
1047 					  &bt_cb(skb)->l2cap);
1048 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1049 	} else {
1050 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1051 					  &bt_cb(skb)->l2cap);
1052 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1053 	}
1054 }
1055 
__pack_extended_control(struct l2cap_ctrl * control)1056 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1057 {
1058 	u32 packed;
1059 
1060 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1061 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1062 
1063 	if (control->sframe) {
1064 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1065 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1066 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1067 	} else {
1068 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1069 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1070 	}
1071 
1072 	return packed;
1073 }
1074 
__pack_enhanced_control(struct l2cap_ctrl * control)1075 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1076 {
1077 	u16 packed;
1078 
1079 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1080 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1081 
1082 	if (control->sframe) {
1083 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1084 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1085 		packed |= L2CAP_CTRL_FRAME_TYPE;
1086 	} else {
1087 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1088 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1089 	}
1090 
1091 	return packed;
1092 }
1093 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1094 static inline void __pack_control(struct l2cap_chan *chan,
1095 				  struct l2cap_ctrl *control,
1096 				  struct sk_buff *skb)
1097 {
1098 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1099 		put_unaligned_le32(__pack_extended_control(control),
1100 				   skb->data + L2CAP_HDR_SIZE);
1101 	} else {
1102 		put_unaligned_le16(__pack_enhanced_control(control),
1103 				   skb->data + L2CAP_HDR_SIZE);
1104 	}
1105 }
1106 
__ertm_hdr_size(struct l2cap_chan * chan)1107 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1108 {
1109 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1110 		return L2CAP_EXT_HDR_SIZE;
1111 	else
1112 		return L2CAP_ENH_HDR_SIZE;
1113 }
1114 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1115 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1116 					       u32 control)
1117 {
1118 	struct sk_buff *skb;
1119 	struct l2cap_hdr *lh;
1120 	int hlen = __ertm_hdr_size(chan);
1121 
1122 	if (chan->fcs == L2CAP_FCS_CRC16)
1123 		hlen += L2CAP_FCS_SIZE;
1124 
1125 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1126 
1127 	if (!skb)
1128 		return ERR_PTR(-ENOMEM);
1129 
1130 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1131 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1132 	lh->cid = cpu_to_le16(chan->dcid);
1133 
1134 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1135 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1136 	else
1137 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1138 
1139 	if (chan->fcs == L2CAP_FCS_CRC16) {
1140 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1141 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1142 	}
1143 
1144 	skb->priority = HCI_PRIO_MAX;
1145 	return skb;
1146 }
1147 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1148 static void l2cap_send_sframe(struct l2cap_chan *chan,
1149 			      struct l2cap_ctrl *control)
1150 {
1151 	struct sk_buff *skb;
1152 	u32 control_field;
1153 
1154 	BT_DBG("chan %p, control %p", chan, control);
1155 
1156 	if (!control->sframe)
1157 		return;
1158 
1159 	if (__chan_is_moving(chan))
1160 		return;
1161 
1162 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1163 	    !control->poll)
1164 		control->final = 1;
1165 
1166 	if (control->super == L2CAP_SUPER_RR)
1167 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1168 	else if (control->super == L2CAP_SUPER_RNR)
1169 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1170 
1171 	if (control->super != L2CAP_SUPER_SREJ) {
1172 		chan->last_acked_seq = control->reqseq;
1173 		__clear_ack_timer(chan);
1174 	}
1175 
1176 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1177 	       control->final, control->poll, control->super);
1178 
1179 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1180 		control_field = __pack_extended_control(control);
1181 	else
1182 		control_field = __pack_enhanced_control(control);
1183 
1184 	skb = l2cap_create_sframe_pdu(chan, control_field);
1185 	if (!IS_ERR(skb))
1186 		l2cap_do_send(chan, skb);
1187 }
1188 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1189 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1190 {
1191 	struct l2cap_ctrl control;
1192 
1193 	BT_DBG("chan %p, poll %d", chan, poll);
1194 
1195 	memset(&control, 0, sizeof(control));
1196 	control.sframe = 1;
1197 	control.poll = poll;
1198 
1199 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1200 		control.super = L2CAP_SUPER_RNR;
1201 	else
1202 		control.super = L2CAP_SUPER_RR;
1203 
1204 	control.reqseq = chan->buffer_seq;
1205 	l2cap_send_sframe(chan, &control);
1206 }
1207 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1208 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1209 {
1210 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1211 		return true;
1212 
1213 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1214 }
1215 
__amp_capable(struct l2cap_chan * chan)1216 static bool __amp_capable(struct l2cap_chan *chan)
1217 {
1218 	struct l2cap_conn *conn = chan->conn;
1219 	struct hci_dev *hdev;
1220 	bool amp_available = false;
1221 
1222 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1223 		return false;
1224 
1225 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1226 		return false;
1227 
1228 	read_lock(&hci_dev_list_lock);
1229 	list_for_each_entry(hdev, &hci_dev_list, list) {
1230 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1231 		    test_bit(HCI_UP, &hdev->flags)) {
1232 			amp_available = true;
1233 			break;
1234 		}
1235 	}
1236 	read_unlock(&hci_dev_list_lock);
1237 
1238 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1239 		return amp_available;
1240 
1241 	return false;
1242 }
1243 
l2cap_check_efs(struct l2cap_chan * chan)1244 static bool l2cap_check_efs(struct l2cap_chan *chan)
1245 {
1246 	/* Check EFS parameters */
1247 	return true;
1248 }
1249 
l2cap_send_conn_req(struct l2cap_chan * chan)1250 void l2cap_send_conn_req(struct l2cap_chan *chan)
1251 {
1252 	struct l2cap_conn *conn = chan->conn;
1253 	struct l2cap_conn_req req;
1254 
1255 	req.scid = cpu_to_le16(chan->scid);
1256 	req.psm  = chan->psm;
1257 
1258 	chan->ident = l2cap_get_ident(conn);
1259 
1260 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1261 
1262 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1263 }
1264 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1265 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1266 {
1267 	struct l2cap_create_chan_req req;
1268 	req.scid = cpu_to_le16(chan->scid);
1269 	req.psm  = chan->psm;
1270 	req.amp_id = amp_id;
1271 
1272 	chan->ident = l2cap_get_ident(chan->conn);
1273 
1274 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1275 		       sizeof(req), &req);
1276 }
1277 
l2cap_move_setup(struct l2cap_chan * chan)1278 static void l2cap_move_setup(struct l2cap_chan *chan)
1279 {
1280 	struct sk_buff *skb;
1281 
1282 	BT_DBG("chan %p", chan);
1283 
1284 	if (chan->mode != L2CAP_MODE_ERTM)
1285 		return;
1286 
1287 	__clear_retrans_timer(chan);
1288 	__clear_monitor_timer(chan);
1289 	__clear_ack_timer(chan);
1290 
1291 	chan->retry_count = 0;
1292 	skb_queue_walk(&chan->tx_q, skb) {
1293 		if (bt_cb(skb)->l2cap.retries)
1294 			bt_cb(skb)->l2cap.retries = 1;
1295 		else
1296 			break;
1297 	}
1298 
1299 	chan->expected_tx_seq = chan->buffer_seq;
1300 
1301 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1302 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1303 	l2cap_seq_list_clear(&chan->retrans_list);
1304 	l2cap_seq_list_clear(&chan->srej_list);
1305 	skb_queue_purge(&chan->srej_q);
1306 
1307 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1308 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1309 
1310 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1311 }
1312 
l2cap_move_done(struct l2cap_chan * chan)1313 static void l2cap_move_done(struct l2cap_chan *chan)
1314 {
1315 	u8 move_role = chan->move_role;
1316 	BT_DBG("chan %p", chan);
1317 
1318 	chan->move_state = L2CAP_MOVE_STABLE;
1319 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1320 
1321 	if (chan->mode != L2CAP_MODE_ERTM)
1322 		return;
1323 
1324 	switch (move_role) {
1325 	case L2CAP_MOVE_ROLE_INITIATOR:
1326 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1327 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1328 		break;
1329 	case L2CAP_MOVE_ROLE_RESPONDER:
1330 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1331 		break;
1332 	}
1333 }
1334 
l2cap_chan_ready(struct l2cap_chan * chan)1335 static void l2cap_chan_ready(struct l2cap_chan *chan)
1336 {
1337 	/* The channel may have already been flagged as connected in
1338 	 * case of receiving data before the L2CAP info req/rsp
1339 	 * procedure is complete.
1340 	 */
1341 	if (chan->state == BT_CONNECTED)
1342 		return;
1343 
1344 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1345 	chan->conf_state = 0;
1346 	__clear_chan_timer(chan);
1347 
1348 	switch (chan->mode) {
1349 	case L2CAP_MODE_LE_FLOWCTL:
1350 	case L2CAP_MODE_EXT_FLOWCTL:
1351 		if (!chan->tx_credits)
1352 			chan->ops->suspend(chan);
1353 		break;
1354 	}
1355 
1356 	chan->state = BT_CONNECTED;
1357 
1358 	chan->ops->ready(chan);
1359 }
1360 
l2cap_le_connect(struct l2cap_chan * chan)1361 static void l2cap_le_connect(struct l2cap_chan *chan)
1362 {
1363 	struct l2cap_conn *conn = chan->conn;
1364 	struct l2cap_le_conn_req req;
1365 
1366 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1367 		return;
1368 
1369 	if (!chan->imtu)
1370 		chan->imtu = chan->conn->mtu;
1371 
1372 	l2cap_le_flowctl_init(chan, 0);
1373 
1374 	req.psm     = chan->psm;
1375 	req.scid    = cpu_to_le16(chan->scid);
1376 	req.mtu     = cpu_to_le16(chan->imtu);
1377 	req.mps     = cpu_to_le16(chan->mps);
1378 	req.credits = cpu_to_le16(chan->rx_credits);
1379 
1380 	chan->ident = l2cap_get_ident(conn);
1381 
1382 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1383 		       sizeof(req), &req);
1384 }
1385 
1386 struct l2cap_ecred_conn_data {
1387 	struct {
1388 		struct l2cap_ecred_conn_req req;
1389 		__le16 scid[5];
1390 	} __packed pdu;
1391 	struct l2cap_chan *chan;
1392 	struct pid *pid;
1393 	int count;
1394 };
1395 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1396 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1397 {
1398 	struct l2cap_ecred_conn_data *conn = data;
1399 	struct pid *pid;
1400 
1401 	if (chan == conn->chan)
1402 		return;
1403 
1404 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1405 		return;
1406 
1407 	pid = chan->ops->get_peer_pid(chan);
1408 
1409 	/* Only add deferred channels with the same PID/PSM */
1410 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1411 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1412 		return;
1413 
1414 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1415 		return;
1416 
1417 	l2cap_ecred_init(chan, 0);
1418 
1419 	/* Set the same ident so we can match on the rsp */
1420 	chan->ident = conn->chan->ident;
1421 
1422 	/* Include all channels deferred */
1423 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1424 
1425 	conn->count++;
1426 }
1427 
l2cap_ecred_connect(struct l2cap_chan * chan)1428 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1429 {
1430 	struct l2cap_conn *conn = chan->conn;
1431 	struct l2cap_ecred_conn_data data;
1432 
1433 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1434 		return;
1435 
1436 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1437 		return;
1438 
1439 	l2cap_ecred_init(chan, 0);
1440 
1441 	data.pdu.req.psm     = chan->psm;
1442 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1443 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1444 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1445 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1446 
1447 	chan->ident = l2cap_get_ident(conn);
1448 	data.pid = chan->ops->get_peer_pid(chan);
1449 
1450 	data.count = 1;
1451 	data.chan = chan;
1452 	data.pid = chan->ops->get_peer_pid(chan);
1453 
1454 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1455 
1456 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1457 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1458 		       &data.pdu);
1459 }
1460 
l2cap_le_start(struct l2cap_chan * chan)1461 static void l2cap_le_start(struct l2cap_chan *chan)
1462 {
1463 	struct l2cap_conn *conn = chan->conn;
1464 
1465 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1466 		return;
1467 
1468 	if (!chan->psm) {
1469 		l2cap_chan_ready(chan);
1470 		return;
1471 	}
1472 
1473 	if (chan->state == BT_CONNECT) {
1474 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1475 			l2cap_ecred_connect(chan);
1476 		else
1477 			l2cap_le_connect(chan);
1478 	}
1479 }
1480 
l2cap_start_connection(struct l2cap_chan * chan)1481 static void l2cap_start_connection(struct l2cap_chan *chan)
1482 {
1483 	if (__amp_capable(chan)) {
1484 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1485 		a2mp_discover_amp(chan);
1486 	} else if (chan->conn->hcon->type == LE_LINK) {
1487 		l2cap_le_start(chan);
1488 	} else {
1489 		l2cap_send_conn_req(chan);
1490 	}
1491 }
1492 
l2cap_request_info(struct l2cap_conn * conn)1493 static void l2cap_request_info(struct l2cap_conn *conn)
1494 {
1495 	struct l2cap_info_req req;
1496 
1497 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1498 		return;
1499 
1500 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1501 
1502 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1503 	conn->info_ident = l2cap_get_ident(conn);
1504 
1505 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1506 
1507 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1508 		       sizeof(req), &req);
1509 }
1510 
l2cap_check_enc_key_size(struct hci_conn * hcon)1511 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1512 {
1513 	/* The minimum encryption key size needs to be enforced by the
1514 	 * host stack before establishing any L2CAP connections. The
1515 	 * specification in theory allows a minimum of 1, but to align
1516 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1517 	 *
1518 	 * This check might also be called for unencrypted connections
1519 	 * that have no key size requirements. Ensure that the link is
1520 	 * actually encrypted before enforcing a key size.
1521 	 */
1522 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1523 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1524 }
1525 
l2cap_do_start(struct l2cap_chan * chan)1526 static void l2cap_do_start(struct l2cap_chan *chan)
1527 {
1528 	struct l2cap_conn *conn = chan->conn;
1529 
1530 	if (conn->hcon->type == LE_LINK) {
1531 		l2cap_le_start(chan);
1532 		return;
1533 	}
1534 
1535 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1536 		l2cap_request_info(conn);
1537 		return;
1538 	}
1539 
1540 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1541 		return;
1542 
1543 	if (!l2cap_chan_check_security(chan, true) ||
1544 	    !__l2cap_no_conn_pending(chan))
1545 		return;
1546 
1547 	if (l2cap_check_enc_key_size(conn->hcon))
1548 		l2cap_start_connection(chan);
1549 	else
1550 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1551 }
1552 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1553 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1554 {
1555 	u32 local_feat_mask = l2cap_feat_mask;
1556 	if (!disable_ertm)
1557 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1558 
1559 	switch (mode) {
1560 	case L2CAP_MODE_ERTM:
1561 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1562 	case L2CAP_MODE_STREAMING:
1563 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1564 	default:
1565 		return 0x00;
1566 	}
1567 }
1568 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1569 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1570 {
1571 	struct l2cap_conn *conn = chan->conn;
1572 	struct l2cap_disconn_req req;
1573 
1574 	if (!conn)
1575 		return;
1576 
1577 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1578 		__clear_retrans_timer(chan);
1579 		__clear_monitor_timer(chan);
1580 		__clear_ack_timer(chan);
1581 	}
1582 
1583 	if (chan->scid == L2CAP_CID_A2MP) {
1584 		l2cap_state_change(chan, BT_DISCONN);
1585 		return;
1586 	}
1587 
1588 	req.dcid = cpu_to_le16(chan->dcid);
1589 	req.scid = cpu_to_le16(chan->scid);
1590 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1591 		       sizeof(req), &req);
1592 
1593 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1594 }
1595 
1596 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1597 static void l2cap_conn_start(struct l2cap_conn *conn)
1598 {
1599 	struct l2cap_chan *chan, *tmp;
1600 
1601 	BT_DBG("conn %p", conn);
1602 
1603 	mutex_lock(&conn->chan_lock);
1604 
1605 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1606 		l2cap_chan_lock(chan);
1607 
1608 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1609 			l2cap_chan_ready(chan);
1610 			l2cap_chan_unlock(chan);
1611 			continue;
1612 		}
1613 
1614 		if (chan->state == BT_CONNECT) {
1615 			if (!l2cap_chan_check_security(chan, true) ||
1616 			    !__l2cap_no_conn_pending(chan)) {
1617 				l2cap_chan_unlock(chan);
1618 				continue;
1619 			}
1620 
1621 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1622 			    && test_bit(CONF_STATE2_DEVICE,
1623 					&chan->conf_state)) {
1624 				l2cap_chan_close(chan, ECONNRESET);
1625 				l2cap_chan_unlock(chan);
1626 				continue;
1627 			}
1628 
1629 			if (l2cap_check_enc_key_size(conn->hcon))
1630 				l2cap_start_connection(chan);
1631 			else
1632 				l2cap_chan_close(chan, ECONNREFUSED);
1633 
1634 		} else if (chan->state == BT_CONNECT2) {
1635 			struct l2cap_conn_rsp rsp;
1636 			char buf[128];
1637 			rsp.scid = cpu_to_le16(chan->dcid);
1638 			rsp.dcid = cpu_to_le16(chan->scid);
1639 
1640 			if (l2cap_chan_check_security(chan, false)) {
1641 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1642 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1643 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1644 					chan->ops->defer(chan);
1645 
1646 				} else {
1647 					l2cap_state_change(chan, BT_CONFIG);
1648 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1649 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1650 				}
1651 			} else {
1652 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1653 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1654 			}
1655 
1656 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1657 				       sizeof(rsp), &rsp);
1658 
1659 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1660 			    rsp.result != L2CAP_CR_SUCCESS) {
1661 				l2cap_chan_unlock(chan);
1662 				continue;
1663 			}
1664 
1665 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1666 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1667 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1668 			chan->num_conf_req++;
1669 		}
1670 
1671 		l2cap_chan_unlock(chan);
1672 	}
1673 
1674 	mutex_unlock(&conn->chan_lock);
1675 }
1676 
l2cap_le_conn_ready(struct l2cap_conn * conn)1677 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1678 {
1679 	struct hci_conn *hcon = conn->hcon;
1680 	struct hci_dev *hdev = hcon->hdev;
1681 
1682 	BT_DBG("%s conn %p", hdev->name, conn);
1683 
1684 	/* For outgoing pairing which doesn't necessarily have an
1685 	 * associated socket (e.g. mgmt_pair_device).
1686 	 */
1687 	if (hcon->out)
1688 		smp_conn_security(hcon, hcon->pending_sec_level);
1689 
1690 	/* For LE slave connections, make sure the connection interval
1691 	 * is in the range of the minium and maximum interval that has
1692 	 * been configured for this connection. If not, then trigger
1693 	 * the connection update procedure.
1694 	 */
1695 	if (hcon->role == HCI_ROLE_SLAVE &&
1696 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1697 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1698 		struct l2cap_conn_param_update_req req;
1699 
1700 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1701 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1702 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1703 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1704 
1705 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1706 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1707 	}
1708 }
1709 
l2cap_conn_ready(struct l2cap_conn * conn)1710 static void l2cap_conn_ready(struct l2cap_conn *conn)
1711 {
1712 	struct l2cap_chan *chan;
1713 	struct hci_conn *hcon = conn->hcon;
1714 
1715 	BT_DBG("conn %p", conn);
1716 
1717 	if (hcon->type == ACL_LINK)
1718 		l2cap_request_info(conn);
1719 
1720 	mutex_lock(&conn->chan_lock);
1721 
1722 	list_for_each_entry(chan, &conn->chan_l, list) {
1723 
1724 		l2cap_chan_lock(chan);
1725 
1726 		if (chan->scid == L2CAP_CID_A2MP) {
1727 			l2cap_chan_unlock(chan);
1728 			continue;
1729 		}
1730 
1731 		if (hcon->type == LE_LINK) {
1732 			l2cap_le_start(chan);
1733 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1735 				l2cap_chan_ready(chan);
1736 		} else if (chan->state == BT_CONNECT) {
1737 			l2cap_do_start(chan);
1738 		}
1739 
1740 		l2cap_chan_unlock(chan);
1741 	}
1742 
1743 	mutex_unlock(&conn->chan_lock);
1744 
1745 	if (hcon->type == LE_LINK)
1746 		l2cap_le_conn_ready(conn);
1747 
1748 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1749 }
1750 
1751 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1752 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1753 {
1754 	struct l2cap_chan *chan;
1755 
1756 	BT_DBG("conn %p", conn);
1757 
1758 	mutex_lock(&conn->chan_lock);
1759 
1760 	list_for_each_entry(chan, &conn->chan_l, list) {
1761 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1762 			l2cap_chan_set_err(chan, err);
1763 	}
1764 
1765 	mutex_unlock(&conn->chan_lock);
1766 }
1767 
l2cap_info_timeout(struct work_struct * work)1768 static void l2cap_info_timeout(struct work_struct *work)
1769 {
1770 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1771 					       info_timer.work);
1772 
1773 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1774 	conn->info_ident = 0;
1775 
1776 	l2cap_conn_start(conn);
1777 }
1778 
1779 /*
1780  * l2cap_user
1781  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1782  * callback is called during registration. The ->remove callback is called
1783  * during unregistration.
1784  * An l2cap_user object can either be explicitly unregistered or when the
1785  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1786  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1787  * External modules must own a reference to the l2cap_conn object if they intend
1788  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1789  * any time if they don't.
1790  */
1791 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1792 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1793 {
1794 	struct hci_dev *hdev = conn->hcon->hdev;
1795 	int ret;
1796 
1797 	/* We need to check whether l2cap_conn is registered. If it is not, we
1798 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1799 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1800 	 * relies on the parent hci_conn object to be locked. This itself relies
1801 	 * on the hci_dev object to be locked. So we must lock the hci device
1802 	 * here, too. */
1803 
1804 	hci_dev_lock(hdev);
1805 
1806 	if (!list_empty(&user->list)) {
1807 		ret = -EINVAL;
1808 		goto out_unlock;
1809 	}
1810 
1811 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1812 	if (!conn->hchan) {
1813 		ret = -ENODEV;
1814 		goto out_unlock;
1815 	}
1816 
1817 	ret = user->probe(conn, user);
1818 	if (ret)
1819 		goto out_unlock;
1820 
1821 	list_add(&user->list, &conn->users);
1822 	ret = 0;
1823 
1824 out_unlock:
1825 	hci_dev_unlock(hdev);
1826 	return ret;
1827 }
1828 EXPORT_SYMBOL(l2cap_register_user);
1829 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1830 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1831 {
1832 	struct hci_dev *hdev = conn->hcon->hdev;
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (list_empty(&user->list))
1837 		goto out_unlock;
1838 
1839 	list_del_init(&user->list);
1840 	user->remove(conn, user);
1841 
1842 out_unlock:
1843 	hci_dev_unlock(hdev);
1844 }
1845 EXPORT_SYMBOL(l2cap_unregister_user);
1846 
l2cap_unregister_all_users(struct l2cap_conn * conn)1847 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1848 {
1849 	struct l2cap_user *user;
1850 
1851 	while (!list_empty(&conn->users)) {
1852 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1853 		list_del_init(&user->list);
1854 		user->remove(conn, user);
1855 	}
1856 }
1857 
l2cap_conn_del(struct hci_conn * hcon,int err)1858 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1859 {
1860 	struct l2cap_conn *conn = hcon->l2cap_data;
1861 	struct l2cap_chan *chan, *l;
1862 
1863 	if (!conn)
1864 		return;
1865 
1866 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1867 
1868 	kfree_skb(conn->rx_skb);
1869 
1870 	skb_queue_purge(&conn->pending_rx);
1871 
1872 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1873 	 * might block if we are running on a worker from the same workqueue
1874 	 * pending_rx_work is waiting on.
1875 	 */
1876 	if (work_pending(&conn->pending_rx_work))
1877 		cancel_work_sync(&conn->pending_rx_work);
1878 
1879 	if (work_pending(&conn->id_addr_update_work))
1880 		cancel_work_sync(&conn->id_addr_update_work);
1881 
1882 	l2cap_unregister_all_users(conn);
1883 
1884 	/* Force the connection to be immediately dropped */
1885 	hcon->disc_timeout = 0;
1886 
1887 	mutex_lock(&conn->chan_lock);
1888 
1889 	/* Kill channels */
1890 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1891 		l2cap_chan_hold(chan);
1892 		l2cap_chan_lock(chan);
1893 
1894 		l2cap_chan_del(chan, err);
1895 
1896 		chan->ops->close(chan);
1897 
1898 		l2cap_chan_unlock(chan);
1899 		l2cap_chan_put(chan);
1900 	}
1901 
1902 	mutex_unlock(&conn->chan_lock);
1903 
1904 	hci_chan_del(conn->hchan);
1905 
1906 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1907 		cancel_delayed_work_sync(&conn->info_timer);
1908 
1909 	hcon->l2cap_data = NULL;
1910 	conn->hchan = NULL;
1911 	l2cap_conn_put(conn);
1912 }
1913 
l2cap_conn_free(struct kref * ref)1914 static void l2cap_conn_free(struct kref *ref)
1915 {
1916 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1917 
1918 	hci_conn_put(conn->hcon);
1919 	kfree(conn);
1920 }
1921 
l2cap_conn_get(struct l2cap_conn * conn)1922 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1923 {
1924 	kref_get(&conn->ref);
1925 	return conn;
1926 }
1927 EXPORT_SYMBOL(l2cap_conn_get);
1928 
l2cap_conn_put(struct l2cap_conn * conn)1929 void l2cap_conn_put(struct l2cap_conn *conn)
1930 {
1931 	kref_put(&conn->ref, l2cap_conn_free);
1932 }
1933 EXPORT_SYMBOL(l2cap_conn_put);
1934 
1935 /* ---- Socket interface ---- */
1936 
1937 /* Find socket with psm and source / destination bdaddr.
1938  * Returns closest match.
1939  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1940 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1941 						   bdaddr_t *src,
1942 						   bdaddr_t *dst,
1943 						   u8 link_type)
1944 {
1945 	struct l2cap_chan *c, *c1 = NULL;
1946 
1947 	read_lock(&chan_list_lock);
1948 
1949 	list_for_each_entry(c, &chan_list, global_l) {
1950 		if (state && c->state != state)
1951 			continue;
1952 
1953 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1954 			continue;
1955 
1956 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1957 			continue;
1958 
1959 		if (c->psm == psm) {
1960 			int src_match, dst_match;
1961 			int src_any, dst_any;
1962 
1963 			/* Exact match. */
1964 			src_match = !bacmp(&c->src, src);
1965 			dst_match = !bacmp(&c->dst, dst);
1966 			if (src_match && dst_match) {
1967 				l2cap_chan_hold(c);
1968 				read_unlock(&chan_list_lock);
1969 				return c;
1970 			}
1971 
1972 			/* Closest match */
1973 			src_any = !bacmp(&c->src, BDADDR_ANY);
1974 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1975 			if ((src_match && dst_any) || (src_any && dst_match) ||
1976 			    (src_any && dst_any))
1977 				c1 = c;
1978 		}
1979 	}
1980 
1981 	if (c1)
1982 		l2cap_chan_hold(c1);
1983 
1984 	read_unlock(&chan_list_lock);
1985 
1986 	return c1;
1987 }
1988 
l2cap_monitor_timeout(struct work_struct * work)1989 static void l2cap_monitor_timeout(struct work_struct *work)
1990 {
1991 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1992 					       monitor_timer.work);
1993 
1994 	BT_DBG("chan %p", chan);
1995 
1996 	l2cap_chan_lock(chan);
1997 
1998 	if (!chan->conn) {
1999 		l2cap_chan_unlock(chan);
2000 		l2cap_chan_put(chan);
2001 		return;
2002 	}
2003 
2004 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2005 
2006 	l2cap_chan_unlock(chan);
2007 	l2cap_chan_put(chan);
2008 }
2009 
l2cap_retrans_timeout(struct work_struct * work)2010 static void l2cap_retrans_timeout(struct work_struct *work)
2011 {
2012 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2013 					       retrans_timer.work);
2014 
2015 	BT_DBG("chan %p", chan);
2016 
2017 	l2cap_chan_lock(chan);
2018 
2019 	if (!chan->conn) {
2020 		l2cap_chan_unlock(chan);
2021 		l2cap_chan_put(chan);
2022 		return;
2023 	}
2024 
2025 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2026 	l2cap_chan_unlock(chan);
2027 	l2cap_chan_put(chan);
2028 }
2029 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2030 static void l2cap_streaming_send(struct l2cap_chan *chan,
2031 				 struct sk_buff_head *skbs)
2032 {
2033 	struct sk_buff *skb;
2034 	struct l2cap_ctrl *control;
2035 
2036 	BT_DBG("chan %p, skbs %p", chan, skbs);
2037 
2038 	if (__chan_is_moving(chan))
2039 		return;
2040 
2041 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2042 
2043 	while (!skb_queue_empty(&chan->tx_q)) {
2044 
2045 		skb = skb_dequeue(&chan->tx_q);
2046 
2047 		bt_cb(skb)->l2cap.retries = 1;
2048 		control = &bt_cb(skb)->l2cap;
2049 
2050 		control->reqseq = 0;
2051 		control->txseq = chan->next_tx_seq;
2052 
2053 		__pack_control(chan, control, skb);
2054 
2055 		if (chan->fcs == L2CAP_FCS_CRC16) {
2056 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2057 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2058 		}
2059 
2060 		l2cap_do_send(chan, skb);
2061 
2062 		BT_DBG("Sent txseq %u", control->txseq);
2063 
2064 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2065 		chan->frames_sent++;
2066 	}
2067 }
2068 
l2cap_ertm_send(struct l2cap_chan * chan)2069 static int l2cap_ertm_send(struct l2cap_chan *chan)
2070 {
2071 	struct sk_buff *skb, *tx_skb;
2072 	struct l2cap_ctrl *control;
2073 	int sent = 0;
2074 
2075 	BT_DBG("chan %p", chan);
2076 
2077 	if (chan->state != BT_CONNECTED)
2078 		return -ENOTCONN;
2079 
2080 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2081 		return 0;
2082 
2083 	if (__chan_is_moving(chan))
2084 		return 0;
2085 
2086 	while (chan->tx_send_head &&
2087 	       chan->unacked_frames < chan->remote_tx_win &&
2088 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2089 
2090 		skb = chan->tx_send_head;
2091 
2092 		bt_cb(skb)->l2cap.retries = 1;
2093 		control = &bt_cb(skb)->l2cap;
2094 
2095 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2096 			control->final = 1;
2097 
2098 		control->reqseq = chan->buffer_seq;
2099 		chan->last_acked_seq = chan->buffer_seq;
2100 		control->txseq = chan->next_tx_seq;
2101 
2102 		__pack_control(chan, control, skb);
2103 
2104 		if (chan->fcs == L2CAP_FCS_CRC16) {
2105 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2106 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2107 		}
2108 
2109 		/* Clone after data has been modified. Data is assumed to be
2110 		   read-only (for locking purposes) on cloned sk_buffs.
2111 		 */
2112 		tx_skb = skb_clone(skb, GFP_KERNEL);
2113 
2114 		if (!tx_skb)
2115 			break;
2116 
2117 		__set_retrans_timer(chan);
2118 
2119 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2120 		chan->unacked_frames++;
2121 		chan->frames_sent++;
2122 		sent++;
2123 
2124 		if (skb_queue_is_last(&chan->tx_q, skb))
2125 			chan->tx_send_head = NULL;
2126 		else
2127 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2128 
2129 		l2cap_do_send(chan, tx_skb);
2130 		BT_DBG("Sent txseq %u", control->txseq);
2131 	}
2132 
2133 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2134 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2135 
2136 	return sent;
2137 }
2138 
l2cap_ertm_resend(struct l2cap_chan * chan)2139 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2140 {
2141 	struct l2cap_ctrl control;
2142 	struct sk_buff *skb;
2143 	struct sk_buff *tx_skb;
2144 	u16 seq;
2145 
2146 	BT_DBG("chan %p", chan);
2147 
2148 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2149 		return;
2150 
2151 	if (__chan_is_moving(chan))
2152 		return;
2153 
2154 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2155 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2156 
2157 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2158 		if (!skb) {
2159 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2160 			       seq);
2161 			continue;
2162 		}
2163 
2164 		bt_cb(skb)->l2cap.retries++;
2165 		control = bt_cb(skb)->l2cap;
2166 
2167 		if (chan->max_tx != 0 &&
2168 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2169 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2170 			l2cap_send_disconn_req(chan, ECONNRESET);
2171 			l2cap_seq_list_clear(&chan->retrans_list);
2172 			break;
2173 		}
2174 
2175 		control.reqseq = chan->buffer_seq;
2176 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2177 			control.final = 1;
2178 		else
2179 			control.final = 0;
2180 
2181 		if (skb_cloned(skb)) {
2182 			/* Cloned sk_buffs are read-only, so we need a
2183 			 * writeable copy
2184 			 */
2185 			tx_skb = skb_copy(skb, GFP_KERNEL);
2186 		} else {
2187 			tx_skb = skb_clone(skb, GFP_KERNEL);
2188 		}
2189 
2190 		if (!tx_skb) {
2191 			l2cap_seq_list_clear(&chan->retrans_list);
2192 			break;
2193 		}
2194 
2195 		/* Update skb contents */
2196 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2197 			put_unaligned_le32(__pack_extended_control(&control),
2198 					   tx_skb->data + L2CAP_HDR_SIZE);
2199 		} else {
2200 			put_unaligned_le16(__pack_enhanced_control(&control),
2201 					   tx_skb->data + L2CAP_HDR_SIZE);
2202 		}
2203 
2204 		/* Update FCS */
2205 		if (chan->fcs == L2CAP_FCS_CRC16) {
2206 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2207 					tx_skb->len - L2CAP_FCS_SIZE);
2208 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2209 						L2CAP_FCS_SIZE);
2210 		}
2211 
2212 		l2cap_do_send(chan, tx_skb);
2213 
2214 		BT_DBG("Resent txseq %d", control.txseq);
2215 
2216 		chan->last_acked_seq = chan->buffer_seq;
2217 	}
2218 }
2219 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2220 static void l2cap_retransmit(struct l2cap_chan *chan,
2221 			     struct l2cap_ctrl *control)
2222 {
2223 	BT_DBG("chan %p, control %p", chan, control);
2224 
2225 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2226 	l2cap_ertm_resend(chan);
2227 }
2228 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2229 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2230 				 struct l2cap_ctrl *control)
2231 {
2232 	struct sk_buff *skb;
2233 
2234 	BT_DBG("chan %p, control %p", chan, control);
2235 
2236 	if (control->poll)
2237 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2238 
2239 	l2cap_seq_list_clear(&chan->retrans_list);
2240 
2241 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2242 		return;
2243 
2244 	if (chan->unacked_frames) {
2245 		skb_queue_walk(&chan->tx_q, skb) {
2246 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2247 			    skb == chan->tx_send_head)
2248 				break;
2249 		}
2250 
2251 		skb_queue_walk_from(&chan->tx_q, skb) {
2252 			if (skb == chan->tx_send_head)
2253 				break;
2254 
2255 			l2cap_seq_list_append(&chan->retrans_list,
2256 					      bt_cb(skb)->l2cap.txseq);
2257 		}
2258 
2259 		l2cap_ertm_resend(chan);
2260 	}
2261 }
2262 
l2cap_send_ack(struct l2cap_chan * chan)2263 static void l2cap_send_ack(struct l2cap_chan *chan)
2264 {
2265 	struct l2cap_ctrl control;
2266 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2267 					 chan->last_acked_seq);
2268 	int threshold;
2269 
2270 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2271 	       chan, chan->last_acked_seq, chan->buffer_seq);
2272 
2273 	memset(&control, 0, sizeof(control));
2274 	control.sframe = 1;
2275 
2276 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2277 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2278 		__clear_ack_timer(chan);
2279 		control.super = L2CAP_SUPER_RNR;
2280 		control.reqseq = chan->buffer_seq;
2281 		l2cap_send_sframe(chan, &control);
2282 	} else {
2283 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2284 			l2cap_ertm_send(chan);
2285 			/* If any i-frames were sent, they included an ack */
2286 			if (chan->buffer_seq == chan->last_acked_seq)
2287 				frames_to_ack = 0;
2288 		}
2289 
2290 		/* Ack now if the window is 3/4ths full.
2291 		 * Calculate without mul or div
2292 		 */
2293 		threshold = chan->ack_win;
2294 		threshold += threshold << 1;
2295 		threshold >>= 2;
2296 
2297 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2298 		       threshold);
2299 
2300 		if (frames_to_ack >= threshold) {
2301 			__clear_ack_timer(chan);
2302 			control.super = L2CAP_SUPER_RR;
2303 			control.reqseq = chan->buffer_seq;
2304 			l2cap_send_sframe(chan, &control);
2305 			frames_to_ack = 0;
2306 		}
2307 
2308 		if (frames_to_ack)
2309 			__set_ack_timer(chan);
2310 	}
2311 }
2312 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2313 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2314 					 struct msghdr *msg, int len,
2315 					 int count, struct sk_buff *skb)
2316 {
2317 	struct l2cap_conn *conn = chan->conn;
2318 	struct sk_buff **frag;
2319 	int sent = 0;
2320 
2321 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2322 		return -EFAULT;
2323 
2324 	sent += count;
2325 	len  -= count;
2326 
2327 	/* Continuation fragments (no L2CAP header) */
2328 	frag = &skb_shinfo(skb)->frag_list;
2329 	while (len) {
2330 		struct sk_buff *tmp;
2331 
2332 		count = min_t(unsigned int, conn->mtu, len);
2333 
2334 		tmp = chan->ops->alloc_skb(chan, 0, count,
2335 					   msg->msg_flags & MSG_DONTWAIT);
2336 		if (IS_ERR(tmp))
2337 			return PTR_ERR(tmp);
2338 
2339 		*frag = tmp;
2340 
2341 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2342 				   &msg->msg_iter))
2343 			return -EFAULT;
2344 
2345 		sent += count;
2346 		len  -= count;
2347 
2348 		skb->len += (*frag)->len;
2349 		skb->data_len += (*frag)->len;
2350 
2351 		frag = &(*frag)->next;
2352 	}
2353 
2354 	return sent;
2355 }
2356 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2357 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2358 						 struct msghdr *msg, size_t len)
2359 {
2360 	struct l2cap_conn *conn = chan->conn;
2361 	struct sk_buff *skb;
2362 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2363 	struct l2cap_hdr *lh;
2364 
2365 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2366 	       __le16_to_cpu(chan->psm), len);
2367 
2368 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2369 
2370 	skb = chan->ops->alloc_skb(chan, hlen, count,
2371 				   msg->msg_flags & MSG_DONTWAIT);
2372 	if (IS_ERR(skb))
2373 		return skb;
2374 
2375 	/* Create L2CAP header */
2376 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2377 	lh->cid = cpu_to_le16(chan->dcid);
2378 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2379 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2380 
2381 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2382 	if (unlikely(err < 0)) {
2383 		kfree_skb(skb);
2384 		return ERR_PTR(err);
2385 	}
2386 	return skb;
2387 }
2388 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2389 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2390 					      struct msghdr *msg, size_t len)
2391 {
2392 	struct l2cap_conn *conn = chan->conn;
2393 	struct sk_buff *skb;
2394 	int err, count;
2395 	struct l2cap_hdr *lh;
2396 
2397 	BT_DBG("chan %p len %zu", chan, len);
2398 
2399 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2400 
2401 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2402 				   msg->msg_flags & MSG_DONTWAIT);
2403 	if (IS_ERR(skb))
2404 		return skb;
2405 
2406 	/* Create L2CAP header */
2407 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2408 	lh->cid = cpu_to_le16(chan->dcid);
2409 	lh->len = cpu_to_le16(len);
2410 
2411 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2412 	if (unlikely(err < 0)) {
2413 		kfree_skb(skb);
2414 		return ERR_PTR(err);
2415 	}
2416 	return skb;
2417 }
2418 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2419 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2420 					       struct msghdr *msg, size_t len,
2421 					       u16 sdulen)
2422 {
2423 	struct l2cap_conn *conn = chan->conn;
2424 	struct sk_buff *skb;
2425 	int err, count, hlen;
2426 	struct l2cap_hdr *lh;
2427 
2428 	BT_DBG("chan %p len %zu", chan, len);
2429 
2430 	if (!conn)
2431 		return ERR_PTR(-ENOTCONN);
2432 
2433 	hlen = __ertm_hdr_size(chan);
2434 
2435 	if (sdulen)
2436 		hlen += L2CAP_SDULEN_SIZE;
2437 
2438 	if (chan->fcs == L2CAP_FCS_CRC16)
2439 		hlen += L2CAP_FCS_SIZE;
2440 
2441 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2442 
2443 	skb = chan->ops->alloc_skb(chan, hlen, count,
2444 				   msg->msg_flags & MSG_DONTWAIT);
2445 	if (IS_ERR(skb))
2446 		return skb;
2447 
2448 	/* Create L2CAP header */
2449 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2450 	lh->cid = cpu_to_le16(chan->dcid);
2451 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2452 
2453 	/* Control header is populated later */
2454 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2455 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2456 	else
2457 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2458 
2459 	if (sdulen)
2460 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2461 
2462 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2463 	if (unlikely(err < 0)) {
2464 		kfree_skb(skb);
2465 		return ERR_PTR(err);
2466 	}
2467 
2468 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2469 	bt_cb(skb)->l2cap.retries = 0;
2470 	return skb;
2471 }
2472 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2473 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2474 			     struct sk_buff_head *seg_queue,
2475 			     struct msghdr *msg, size_t len)
2476 {
2477 	struct sk_buff *skb;
2478 	u16 sdu_len;
2479 	size_t pdu_len;
2480 	u8 sar;
2481 
2482 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2483 
2484 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2485 	 * so fragmented skbs are not used.  The HCI layer's handling
2486 	 * of fragmented skbs is not compatible with ERTM's queueing.
2487 	 */
2488 
2489 	/* PDU size is derived from the HCI MTU */
2490 	pdu_len = chan->conn->mtu;
2491 
2492 	/* Constrain PDU size for BR/EDR connections */
2493 	if (!chan->hs_hcon)
2494 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2495 
2496 	/* Adjust for largest possible L2CAP overhead. */
2497 	if (chan->fcs)
2498 		pdu_len -= L2CAP_FCS_SIZE;
2499 
2500 	pdu_len -= __ertm_hdr_size(chan);
2501 
2502 	/* Remote device may have requested smaller PDUs */
2503 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2504 
2505 	if (len <= pdu_len) {
2506 		sar = L2CAP_SAR_UNSEGMENTED;
2507 		sdu_len = 0;
2508 		pdu_len = len;
2509 	} else {
2510 		sar = L2CAP_SAR_START;
2511 		sdu_len = len;
2512 	}
2513 
2514 	while (len > 0) {
2515 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2516 
2517 		if (IS_ERR(skb)) {
2518 			__skb_queue_purge(seg_queue);
2519 			return PTR_ERR(skb);
2520 		}
2521 
2522 		bt_cb(skb)->l2cap.sar = sar;
2523 		__skb_queue_tail(seg_queue, skb);
2524 
2525 		len -= pdu_len;
2526 		if (sdu_len)
2527 			sdu_len = 0;
2528 
2529 		if (len <= pdu_len) {
2530 			sar = L2CAP_SAR_END;
2531 			pdu_len = len;
2532 		} else {
2533 			sar = L2CAP_SAR_CONTINUE;
2534 		}
2535 	}
2536 
2537 	return 0;
2538 }
2539 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2540 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2541 						   struct msghdr *msg,
2542 						   size_t len, u16 sdulen)
2543 {
2544 	struct l2cap_conn *conn = chan->conn;
2545 	struct sk_buff *skb;
2546 	int err, count, hlen;
2547 	struct l2cap_hdr *lh;
2548 
2549 	BT_DBG("chan %p len %zu", chan, len);
2550 
2551 	if (!conn)
2552 		return ERR_PTR(-ENOTCONN);
2553 
2554 	hlen = L2CAP_HDR_SIZE;
2555 
2556 	if (sdulen)
2557 		hlen += L2CAP_SDULEN_SIZE;
2558 
2559 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2560 
2561 	skb = chan->ops->alloc_skb(chan, hlen, count,
2562 				   msg->msg_flags & MSG_DONTWAIT);
2563 	if (IS_ERR(skb))
2564 		return skb;
2565 
2566 	/* Create L2CAP header */
2567 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2568 	lh->cid = cpu_to_le16(chan->dcid);
2569 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2570 
2571 	if (sdulen)
2572 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2573 
2574 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2575 	if (unlikely(err < 0)) {
2576 		kfree_skb(skb);
2577 		return ERR_PTR(err);
2578 	}
2579 
2580 	return skb;
2581 }
2582 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2583 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2584 				struct sk_buff_head *seg_queue,
2585 				struct msghdr *msg, size_t len)
2586 {
2587 	struct sk_buff *skb;
2588 	size_t pdu_len;
2589 	u16 sdu_len;
2590 
2591 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2592 
2593 	sdu_len = len;
2594 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2595 
2596 	while (len > 0) {
2597 		if (len <= pdu_len)
2598 			pdu_len = len;
2599 
2600 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2601 		if (IS_ERR(skb)) {
2602 			__skb_queue_purge(seg_queue);
2603 			return PTR_ERR(skb);
2604 		}
2605 
2606 		__skb_queue_tail(seg_queue, skb);
2607 
2608 		len -= pdu_len;
2609 
2610 		if (sdu_len) {
2611 			sdu_len = 0;
2612 			pdu_len += L2CAP_SDULEN_SIZE;
2613 		}
2614 	}
2615 
2616 	return 0;
2617 }
2618 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2619 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2620 {
2621 	int sent = 0;
2622 
2623 	BT_DBG("chan %p", chan);
2624 
2625 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2626 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2627 		chan->tx_credits--;
2628 		sent++;
2629 	}
2630 
2631 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2632 	       skb_queue_len(&chan->tx_q));
2633 }
2634 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2635 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2636 {
2637 	struct sk_buff *skb;
2638 	int err;
2639 	struct sk_buff_head seg_queue;
2640 
2641 	if (!chan->conn)
2642 		return -ENOTCONN;
2643 
2644 	/* Connectionless channel */
2645 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2646 		skb = l2cap_create_connless_pdu(chan, msg, len);
2647 		if (IS_ERR(skb))
2648 			return PTR_ERR(skb);
2649 
2650 		/* Channel lock is released before requesting new skb and then
2651 		 * reacquired thus we need to recheck channel state.
2652 		 */
2653 		if (chan->state != BT_CONNECTED) {
2654 			kfree_skb(skb);
2655 			return -ENOTCONN;
2656 		}
2657 
2658 		l2cap_do_send(chan, skb);
2659 		return len;
2660 	}
2661 
2662 	switch (chan->mode) {
2663 	case L2CAP_MODE_LE_FLOWCTL:
2664 	case L2CAP_MODE_EXT_FLOWCTL:
2665 		/* Check outgoing MTU */
2666 		if (len > chan->omtu)
2667 			return -EMSGSIZE;
2668 
2669 		__skb_queue_head_init(&seg_queue);
2670 
2671 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2672 
2673 		if (chan->state != BT_CONNECTED) {
2674 			__skb_queue_purge(&seg_queue);
2675 			err = -ENOTCONN;
2676 		}
2677 
2678 		if (err)
2679 			return err;
2680 
2681 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2682 
2683 		l2cap_le_flowctl_send(chan);
2684 
2685 		if (!chan->tx_credits)
2686 			chan->ops->suspend(chan);
2687 
2688 		err = len;
2689 
2690 		break;
2691 
2692 	case L2CAP_MODE_BASIC:
2693 		/* Check outgoing MTU */
2694 		if (len > chan->omtu)
2695 			return -EMSGSIZE;
2696 
2697 		/* Create a basic PDU */
2698 		skb = l2cap_create_basic_pdu(chan, msg, len);
2699 		if (IS_ERR(skb))
2700 			return PTR_ERR(skb);
2701 
2702 		/* Channel lock is released before requesting new skb and then
2703 		 * reacquired thus we need to recheck channel state.
2704 		 */
2705 		if (chan->state != BT_CONNECTED) {
2706 			kfree_skb(skb);
2707 			return -ENOTCONN;
2708 		}
2709 
2710 		l2cap_do_send(chan, skb);
2711 		err = len;
2712 		break;
2713 
2714 	case L2CAP_MODE_ERTM:
2715 	case L2CAP_MODE_STREAMING:
2716 		/* Check outgoing MTU */
2717 		if (len > chan->omtu) {
2718 			err = -EMSGSIZE;
2719 			break;
2720 		}
2721 
2722 		__skb_queue_head_init(&seg_queue);
2723 
2724 		/* Do segmentation before calling in to the state machine,
2725 		 * since it's possible to block while waiting for memory
2726 		 * allocation.
2727 		 */
2728 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2729 
2730 		/* The channel could have been closed while segmenting,
2731 		 * check that it is still connected.
2732 		 */
2733 		if (chan->state != BT_CONNECTED) {
2734 			__skb_queue_purge(&seg_queue);
2735 			err = -ENOTCONN;
2736 		}
2737 
2738 		if (err)
2739 			break;
2740 
2741 		if (chan->mode == L2CAP_MODE_ERTM)
2742 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2743 		else
2744 			l2cap_streaming_send(chan, &seg_queue);
2745 
2746 		err = len;
2747 
2748 		/* If the skbs were not queued for sending, they'll still be in
2749 		 * seg_queue and need to be purged.
2750 		 */
2751 		__skb_queue_purge(&seg_queue);
2752 		break;
2753 
2754 	default:
2755 		BT_DBG("bad state %1.1x", chan->mode);
2756 		err = -EBADFD;
2757 	}
2758 
2759 	return err;
2760 }
2761 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2762 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2763 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2764 {
2765 	struct l2cap_ctrl control;
2766 	u16 seq;
2767 
2768 	BT_DBG("chan %p, txseq %u", chan, txseq);
2769 
2770 	memset(&control, 0, sizeof(control));
2771 	control.sframe = 1;
2772 	control.super = L2CAP_SUPER_SREJ;
2773 
2774 	for (seq = chan->expected_tx_seq; seq != txseq;
2775 	     seq = __next_seq(chan, seq)) {
2776 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2777 			control.reqseq = seq;
2778 			l2cap_send_sframe(chan, &control);
2779 			l2cap_seq_list_append(&chan->srej_list, seq);
2780 		}
2781 	}
2782 
2783 	chan->expected_tx_seq = __next_seq(chan, txseq);
2784 }
2785 
l2cap_send_srej_tail(struct l2cap_chan * chan)2786 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2787 {
2788 	struct l2cap_ctrl control;
2789 
2790 	BT_DBG("chan %p", chan);
2791 
2792 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2793 		return;
2794 
2795 	memset(&control, 0, sizeof(control));
2796 	control.sframe = 1;
2797 	control.super = L2CAP_SUPER_SREJ;
2798 	control.reqseq = chan->srej_list.tail;
2799 	l2cap_send_sframe(chan, &control);
2800 }
2801 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2802 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2803 {
2804 	struct l2cap_ctrl control;
2805 	u16 initial_head;
2806 	u16 seq;
2807 
2808 	BT_DBG("chan %p, txseq %u", chan, txseq);
2809 
2810 	memset(&control, 0, sizeof(control));
2811 	control.sframe = 1;
2812 	control.super = L2CAP_SUPER_SREJ;
2813 
2814 	/* Capture initial list head to allow only one pass through the list. */
2815 	initial_head = chan->srej_list.head;
2816 
2817 	do {
2818 		seq = l2cap_seq_list_pop(&chan->srej_list);
2819 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2820 			break;
2821 
2822 		control.reqseq = seq;
2823 		l2cap_send_sframe(chan, &control);
2824 		l2cap_seq_list_append(&chan->srej_list, seq);
2825 	} while (chan->srej_list.head != initial_head);
2826 }
2827 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2828 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2829 {
2830 	struct sk_buff *acked_skb;
2831 	u16 ackseq;
2832 
2833 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2834 
2835 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2836 		return;
2837 
2838 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2839 	       chan->expected_ack_seq, chan->unacked_frames);
2840 
2841 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2842 	     ackseq = __next_seq(chan, ackseq)) {
2843 
2844 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2845 		if (acked_skb) {
2846 			skb_unlink(acked_skb, &chan->tx_q);
2847 			kfree_skb(acked_skb);
2848 			chan->unacked_frames--;
2849 		}
2850 	}
2851 
2852 	chan->expected_ack_seq = reqseq;
2853 
2854 	if (chan->unacked_frames == 0)
2855 		__clear_retrans_timer(chan);
2856 
2857 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2858 }
2859 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2860 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2861 {
2862 	BT_DBG("chan %p", chan);
2863 
2864 	chan->expected_tx_seq = chan->buffer_seq;
2865 	l2cap_seq_list_clear(&chan->srej_list);
2866 	skb_queue_purge(&chan->srej_q);
2867 	chan->rx_state = L2CAP_RX_STATE_RECV;
2868 }
2869 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2870 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2871 				struct l2cap_ctrl *control,
2872 				struct sk_buff_head *skbs, u8 event)
2873 {
2874 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2875 	       event);
2876 
2877 	switch (event) {
2878 	case L2CAP_EV_DATA_REQUEST:
2879 		if (chan->tx_send_head == NULL)
2880 			chan->tx_send_head = skb_peek(skbs);
2881 
2882 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2883 		l2cap_ertm_send(chan);
2884 		break;
2885 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2886 		BT_DBG("Enter LOCAL_BUSY");
2887 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2888 
2889 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2890 			/* The SREJ_SENT state must be aborted if we are to
2891 			 * enter the LOCAL_BUSY state.
2892 			 */
2893 			l2cap_abort_rx_srej_sent(chan);
2894 		}
2895 
2896 		l2cap_send_ack(chan);
2897 
2898 		break;
2899 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2900 		BT_DBG("Exit LOCAL_BUSY");
2901 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2902 
2903 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2904 			struct l2cap_ctrl local_control;
2905 
2906 			memset(&local_control, 0, sizeof(local_control));
2907 			local_control.sframe = 1;
2908 			local_control.super = L2CAP_SUPER_RR;
2909 			local_control.poll = 1;
2910 			local_control.reqseq = chan->buffer_seq;
2911 			l2cap_send_sframe(chan, &local_control);
2912 
2913 			chan->retry_count = 1;
2914 			__set_monitor_timer(chan);
2915 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2916 		}
2917 		break;
2918 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2919 		l2cap_process_reqseq(chan, control->reqseq);
2920 		break;
2921 	case L2CAP_EV_EXPLICIT_POLL:
2922 		l2cap_send_rr_or_rnr(chan, 1);
2923 		chan->retry_count = 1;
2924 		__set_monitor_timer(chan);
2925 		__clear_ack_timer(chan);
2926 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2927 		break;
2928 	case L2CAP_EV_RETRANS_TO:
2929 		l2cap_send_rr_or_rnr(chan, 1);
2930 		chan->retry_count = 1;
2931 		__set_monitor_timer(chan);
2932 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2933 		break;
2934 	case L2CAP_EV_RECV_FBIT:
2935 		/* Nothing to process */
2936 		break;
2937 	default:
2938 		break;
2939 	}
2940 }
2941 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2942 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2943 				  struct l2cap_ctrl *control,
2944 				  struct sk_buff_head *skbs, u8 event)
2945 {
2946 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2947 	       event);
2948 
2949 	switch (event) {
2950 	case L2CAP_EV_DATA_REQUEST:
2951 		if (chan->tx_send_head == NULL)
2952 			chan->tx_send_head = skb_peek(skbs);
2953 		/* Queue data, but don't send. */
2954 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2955 		break;
2956 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2957 		BT_DBG("Enter LOCAL_BUSY");
2958 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2959 
2960 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2961 			/* The SREJ_SENT state must be aborted if we are to
2962 			 * enter the LOCAL_BUSY state.
2963 			 */
2964 			l2cap_abort_rx_srej_sent(chan);
2965 		}
2966 
2967 		l2cap_send_ack(chan);
2968 
2969 		break;
2970 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2971 		BT_DBG("Exit LOCAL_BUSY");
2972 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2973 
2974 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2975 			struct l2cap_ctrl local_control;
2976 			memset(&local_control, 0, sizeof(local_control));
2977 			local_control.sframe = 1;
2978 			local_control.super = L2CAP_SUPER_RR;
2979 			local_control.poll = 1;
2980 			local_control.reqseq = chan->buffer_seq;
2981 			l2cap_send_sframe(chan, &local_control);
2982 
2983 			chan->retry_count = 1;
2984 			__set_monitor_timer(chan);
2985 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2986 		}
2987 		break;
2988 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2989 		l2cap_process_reqseq(chan, control->reqseq);
2990 		fallthrough;
2991 
2992 	case L2CAP_EV_RECV_FBIT:
2993 		if (control && control->final) {
2994 			__clear_monitor_timer(chan);
2995 			if (chan->unacked_frames > 0)
2996 				__set_retrans_timer(chan);
2997 			chan->retry_count = 0;
2998 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2999 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3000 		}
3001 		break;
3002 	case L2CAP_EV_EXPLICIT_POLL:
3003 		/* Ignore */
3004 		break;
3005 	case L2CAP_EV_MONITOR_TO:
3006 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3007 			l2cap_send_rr_or_rnr(chan, 1);
3008 			__set_monitor_timer(chan);
3009 			chan->retry_count++;
3010 		} else {
3011 			l2cap_send_disconn_req(chan, ECONNABORTED);
3012 		}
3013 		break;
3014 	default:
3015 		break;
3016 	}
3017 }
3018 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3019 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3020 		     struct sk_buff_head *skbs, u8 event)
3021 {
3022 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3023 	       chan, control, skbs, event, chan->tx_state);
3024 
3025 	switch (chan->tx_state) {
3026 	case L2CAP_TX_STATE_XMIT:
3027 		l2cap_tx_state_xmit(chan, control, skbs, event);
3028 		break;
3029 	case L2CAP_TX_STATE_WAIT_F:
3030 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3031 		break;
3032 	default:
3033 		/* Ignore event */
3034 		break;
3035 	}
3036 }
3037 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3038 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3039 			     struct l2cap_ctrl *control)
3040 {
3041 	BT_DBG("chan %p, control %p", chan, control);
3042 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3043 }
3044 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3045 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3046 				  struct l2cap_ctrl *control)
3047 {
3048 	BT_DBG("chan %p, control %p", chan, control);
3049 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3050 }
3051 
3052 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3053 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3054 {
3055 	struct sk_buff *nskb;
3056 	struct l2cap_chan *chan;
3057 
3058 	BT_DBG("conn %p", conn);
3059 
3060 	mutex_lock(&conn->chan_lock);
3061 
3062 	list_for_each_entry(chan, &conn->chan_l, list) {
3063 		if (chan->chan_type != L2CAP_CHAN_RAW)
3064 			continue;
3065 
3066 		/* Don't send frame to the channel it came from */
3067 		if (bt_cb(skb)->l2cap.chan == chan)
3068 			continue;
3069 
3070 		nskb = skb_clone(skb, GFP_KERNEL);
3071 		if (!nskb)
3072 			continue;
3073 		if (chan->ops->recv(chan, nskb))
3074 			kfree_skb(nskb);
3075 	}
3076 
3077 	mutex_unlock(&conn->chan_lock);
3078 }
3079 
3080 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3081 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3082 				       u8 ident, u16 dlen, void *data)
3083 {
3084 	struct sk_buff *skb, **frag;
3085 	struct l2cap_cmd_hdr *cmd;
3086 	struct l2cap_hdr *lh;
3087 	int len, count;
3088 
3089 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3090 	       conn, code, ident, dlen);
3091 
3092 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3093 		return NULL;
3094 
3095 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3096 	count = min_t(unsigned int, conn->mtu, len);
3097 
3098 	skb = bt_skb_alloc(count, GFP_KERNEL);
3099 	if (!skb)
3100 		return NULL;
3101 
3102 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3103 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3104 
3105 	if (conn->hcon->type == LE_LINK)
3106 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3107 	else
3108 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3109 
3110 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3111 	cmd->code  = code;
3112 	cmd->ident = ident;
3113 	cmd->len   = cpu_to_le16(dlen);
3114 
3115 	if (dlen) {
3116 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3117 		skb_put_data(skb, data, count);
3118 		data += count;
3119 	}
3120 
3121 	len -= skb->len;
3122 
3123 	/* Continuation fragments (no L2CAP header) */
3124 	frag = &skb_shinfo(skb)->frag_list;
3125 	while (len) {
3126 		count = min_t(unsigned int, conn->mtu, len);
3127 
3128 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3129 		if (!*frag)
3130 			goto fail;
3131 
3132 		skb_put_data(*frag, data, count);
3133 
3134 		len  -= count;
3135 		data += count;
3136 
3137 		frag = &(*frag)->next;
3138 	}
3139 
3140 	return skb;
3141 
3142 fail:
3143 	kfree_skb(skb);
3144 	return NULL;
3145 }
3146 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3147 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3148 				     unsigned long *val)
3149 {
3150 	struct l2cap_conf_opt *opt = *ptr;
3151 	int len;
3152 
3153 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3154 	*ptr += len;
3155 
3156 	*type = opt->type;
3157 	*olen = opt->len;
3158 
3159 	switch (opt->len) {
3160 	case 1:
3161 		*val = *((u8 *) opt->val);
3162 		break;
3163 
3164 	case 2:
3165 		*val = get_unaligned_le16(opt->val);
3166 		break;
3167 
3168 	case 4:
3169 		*val = get_unaligned_le32(opt->val);
3170 		break;
3171 
3172 	default:
3173 		*val = (unsigned long) opt->val;
3174 		break;
3175 	}
3176 
3177 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3178 	return len;
3179 }
3180 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3181 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3182 {
3183 	struct l2cap_conf_opt *opt = *ptr;
3184 
3185 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3186 
3187 	if (size < L2CAP_CONF_OPT_SIZE + len)
3188 		return;
3189 
3190 	opt->type = type;
3191 	opt->len  = len;
3192 
3193 	switch (len) {
3194 	case 1:
3195 		*((u8 *) opt->val)  = val;
3196 		break;
3197 
3198 	case 2:
3199 		put_unaligned_le16(val, opt->val);
3200 		break;
3201 
3202 	case 4:
3203 		put_unaligned_le32(val, opt->val);
3204 		break;
3205 
3206 	default:
3207 		memcpy(opt->val, (void *) val, len);
3208 		break;
3209 	}
3210 
3211 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3212 }
3213 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3214 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3215 {
3216 	struct l2cap_conf_efs efs;
3217 
3218 	switch (chan->mode) {
3219 	case L2CAP_MODE_ERTM:
3220 		efs.id		= chan->local_id;
3221 		efs.stype	= chan->local_stype;
3222 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3223 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3224 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3225 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3226 		break;
3227 
3228 	case L2CAP_MODE_STREAMING:
3229 		efs.id		= 1;
3230 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3231 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3232 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3233 		efs.acc_lat	= 0;
3234 		efs.flush_to	= 0;
3235 		break;
3236 
3237 	default:
3238 		return;
3239 	}
3240 
3241 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3242 			   (unsigned long) &efs, size);
3243 }
3244 
l2cap_ack_timeout(struct work_struct * work)3245 static void l2cap_ack_timeout(struct work_struct *work)
3246 {
3247 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3248 					       ack_timer.work);
3249 	u16 frames_to_ack;
3250 
3251 	BT_DBG("chan %p", chan);
3252 
3253 	l2cap_chan_lock(chan);
3254 
3255 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3256 				     chan->last_acked_seq);
3257 
3258 	if (frames_to_ack)
3259 		l2cap_send_rr_or_rnr(chan, 0);
3260 
3261 	l2cap_chan_unlock(chan);
3262 	l2cap_chan_put(chan);
3263 }
3264 
l2cap_ertm_init(struct l2cap_chan * chan)3265 int l2cap_ertm_init(struct l2cap_chan *chan)
3266 {
3267 	int err;
3268 
3269 	chan->next_tx_seq = 0;
3270 	chan->expected_tx_seq = 0;
3271 	chan->expected_ack_seq = 0;
3272 	chan->unacked_frames = 0;
3273 	chan->buffer_seq = 0;
3274 	chan->frames_sent = 0;
3275 	chan->last_acked_seq = 0;
3276 	chan->sdu = NULL;
3277 	chan->sdu_last_frag = NULL;
3278 	chan->sdu_len = 0;
3279 
3280 	skb_queue_head_init(&chan->tx_q);
3281 
3282 	chan->local_amp_id = AMP_ID_BREDR;
3283 	chan->move_id = AMP_ID_BREDR;
3284 	chan->move_state = L2CAP_MOVE_STABLE;
3285 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3286 
3287 	if (chan->mode != L2CAP_MODE_ERTM)
3288 		return 0;
3289 
3290 	chan->rx_state = L2CAP_RX_STATE_RECV;
3291 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3292 
3293 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3294 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3295 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3296 
3297 	skb_queue_head_init(&chan->srej_q);
3298 
3299 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3300 	if (err < 0)
3301 		return err;
3302 
3303 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3304 	if (err < 0)
3305 		l2cap_seq_list_free(&chan->srej_list);
3306 
3307 	return err;
3308 }
3309 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3310 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3311 {
3312 	switch (mode) {
3313 	case L2CAP_MODE_STREAMING:
3314 	case L2CAP_MODE_ERTM:
3315 		if (l2cap_mode_supported(mode, remote_feat_mask))
3316 			return mode;
3317 		fallthrough;
3318 	default:
3319 		return L2CAP_MODE_BASIC;
3320 	}
3321 }
3322 
__l2cap_ews_supported(struct l2cap_conn * conn)3323 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3324 {
3325 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3326 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3327 }
3328 
__l2cap_efs_supported(struct l2cap_conn * conn)3329 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3330 {
3331 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3332 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3333 }
3334 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3335 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3336 				      struct l2cap_conf_rfc *rfc)
3337 {
3338 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3339 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3340 
3341 		/* Class 1 devices have must have ERTM timeouts
3342 		 * exceeding the Link Supervision Timeout.  The
3343 		 * default Link Supervision Timeout for AMP
3344 		 * controllers is 10 seconds.
3345 		 *
3346 		 * Class 1 devices use 0xffffffff for their
3347 		 * best-effort flush timeout, so the clamping logic
3348 		 * will result in a timeout that meets the above
3349 		 * requirement.  ERTM timeouts are 16-bit values, so
3350 		 * the maximum timeout is 65.535 seconds.
3351 		 */
3352 
3353 		/* Convert timeout to milliseconds and round */
3354 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3355 
3356 		/* This is the recommended formula for class 2 devices
3357 		 * that start ERTM timers when packets are sent to the
3358 		 * controller.
3359 		 */
3360 		ertm_to = 3 * ertm_to + 500;
3361 
3362 		if (ertm_to > 0xffff)
3363 			ertm_to = 0xffff;
3364 
3365 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3366 		rfc->monitor_timeout = rfc->retrans_timeout;
3367 	} else {
3368 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3369 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3370 	}
3371 }
3372 
l2cap_txwin_setup(struct l2cap_chan * chan)3373 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3374 {
3375 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3376 	    __l2cap_ews_supported(chan->conn)) {
3377 		/* use extended control field */
3378 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3379 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3380 	} else {
3381 		chan->tx_win = min_t(u16, chan->tx_win,
3382 				     L2CAP_DEFAULT_TX_WINDOW);
3383 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3384 	}
3385 	chan->ack_win = chan->tx_win;
3386 }
3387 
l2cap_mtu_auto(struct l2cap_chan * chan)3388 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3389 {
3390 	struct hci_conn *conn = chan->conn->hcon;
3391 
3392 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3393 
3394 	/* The 2-DH1 packet has between 2 and 56 information bytes
3395 	 * (including the 2-byte payload header)
3396 	 */
3397 	if (!(conn->pkt_type & HCI_2DH1))
3398 		chan->imtu = 54;
3399 
3400 	/* The 3-DH1 packet has between 2 and 85 information bytes
3401 	 * (including the 2-byte payload header)
3402 	 */
3403 	if (!(conn->pkt_type & HCI_3DH1))
3404 		chan->imtu = 83;
3405 
3406 	/* The 2-DH3 packet has between 2 and 369 information bytes
3407 	 * (including the 2-byte payload header)
3408 	 */
3409 	if (!(conn->pkt_type & HCI_2DH3))
3410 		chan->imtu = 367;
3411 
3412 	/* The 3-DH3 packet has between 2 and 554 information bytes
3413 	 * (including the 2-byte payload header)
3414 	 */
3415 	if (!(conn->pkt_type & HCI_3DH3))
3416 		chan->imtu = 552;
3417 
3418 	/* The 2-DH5 packet has between 2 and 681 information bytes
3419 	 * (including the 2-byte payload header)
3420 	 */
3421 	if (!(conn->pkt_type & HCI_2DH5))
3422 		chan->imtu = 679;
3423 
3424 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3425 	 * (including the 2-byte payload header)
3426 	 */
3427 	if (!(conn->pkt_type & HCI_3DH5))
3428 		chan->imtu = 1021;
3429 }
3430 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3431 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3432 {
3433 	struct l2cap_conf_req *req = data;
3434 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3435 	void *ptr = req->data;
3436 	void *endptr = data + data_size;
3437 	u16 size;
3438 
3439 	BT_DBG("chan %p", chan);
3440 
3441 	if (chan->num_conf_req || chan->num_conf_rsp)
3442 		goto done;
3443 
3444 	switch (chan->mode) {
3445 	case L2CAP_MODE_STREAMING:
3446 	case L2CAP_MODE_ERTM:
3447 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3448 			break;
3449 
3450 		if (__l2cap_efs_supported(chan->conn))
3451 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3452 
3453 		fallthrough;
3454 	default:
3455 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3456 		break;
3457 	}
3458 
3459 done:
3460 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3461 		if (!chan->imtu)
3462 			l2cap_mtu_auto(chan);
3463 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3464 				   endptr - ptr);
3465 	}
3466 
3467 	switch (chan->mode) {
3468 	case L2CAP_MODE_BASIC:
3469 		if (disable_ertm)
3470 			break;
3471 
3472 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3473 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3474 			break;
3475 
3476 		rfc.mode            = L2CAP_MODE_BASIC;
3477 		rfc.txwin_size      = 0;
3478 		rfc.max_transmit    = 0;
3479 		rfc.retrans_timeout = 0;
3480 		rfc.monitor_timeout = 0;
3481 		rfc.max_pdu_size    = 0;
3482 
3483 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3484 				   (unsigned long) &rfc, endptr - ptr);
3485 		break;
3486 
3487 	case L2CAP_MODE_ERTM:
3488 		rfc.mode            = L2CAP_MODE_ERTM;
3489 		rfc.max_transmit    = chan->max_tx;
3490 
3491 		__l2cap_set_ertm_timeouts(chan, &rfc);
3492 
3493 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3494 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3495 			     L2CAP_FCS_SIZE);
3496 		rfc.max_pdu_size = cpu_to_le16(size);
3497 
3498 		l2cap_txwin_setup(chan);
3499 
3500 		rfc.txwin_size = min_t(u16, chan->tx_win,
3501 				       L2CAP_DEFAULT_TX_WINDOW);
3502 
3503 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3504 				   (unsigned long) &rfc, endptr - ptr);
3505 
3506 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3507 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3508 
3509 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3510 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3511 					   chan->tx_win, endptr - ptr);
3512 
3513 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3514 			if (chan->fcs == L2CAP_FCS_NONE ||
3515 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3516 				chan->fcs = L2CAP_FCS_NONE;
3517 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3518 						   chan->fcs, endptr - ptr);
3519 			}
3520 		break;
3521 
3522 	case L2CAP_MODE_STREAMING:
3523 		l2cap_txwin_setup(chan);
3524 		rfc.mode            = L2CAP_MODE_STREAMING;
3525 		rfc.txwin_size      = 0;
3526 		rfc.max_transmit    = 0;
3527 		rfc.retrans_timeout = 0;
3528 		rfc.monitor_timeout = 0;
3529 
3530 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3531 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3532 			     L2CAP_FCS_SIZE);
3533 		rfc.max_pdu_size = cpu_to_le16(size);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3542 			if (chan->fcs == L2CAP_FCS_NONE ||
3543 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3544 				chan->fcs = L2CAP_FCS_NONE;
3545 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3546 						   chan->fcs, endptr - ptr);
3547 			}
3548 		break;
3549 	}
3550 
3551 	req->dcid  = cpu_to_le16(chan->dcid);
3552 	req->flags = cpu_to_le16(0);
3553 
3554 	return ptr - data;
3555 }
3556 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3557 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3558 {
3559 	struct l2cap_conf_rsp *rsp = data;
3560 	void *ptr = rsp->data;
3561 	void *endptr = data + data_size;
3562 	void *req = chan->conf_req;
3563 	int len = chan->conf_len;
3564 	int type, hint, olen;
3565 	unsigned long val;
3566 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3567 	struct l2cap_conf_efs efs;
3568 	u8 remote_efs = 0;
3569 	u16 mtu = L2CAP_DEFAULT_MTU;
3570 	u16 result = L2CAP_CONF_SUCCESS;
3571 	u16 size;
3572 
3573 	BT_DBG("chan %p", chan);
3574 
3575 	while (len >= L2CAP_CONF_OPT_SIZE) {
3576 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3577 		if (len < 0)
3578 			break;
3579 
3580 		hint  = type & L2CAP_CONF_HINT;
3581 		type &= L2CAP_CONF_MASK;
3582 
3583 		switch (type) {
3584 		case L2CAP_CONF_MTU:
3585 			if (olen != 2)
3586 				break;
3587 			mtu = val;
3588 			break;
3589 
3590 		case L2CAP_CONF_FLUSH_TO:
3591 			if (olen != 2)
3592 				break;
3593 			chan->flush_to = val;
3594 			break;
3595 
3596 		case L2CAP_CONF_QOS:
3597 			break;
3598 
3599 		case L2CAP_CONF_RFC:
3600 			if (olen != sizeof(rfc))
3601 				break;
3602 			memcpy(&rfc, (void *) val, olen);
3603 			break;
3604 
3605 		case L2CAP_CONF_FCS:
3606 			if (olen != 1)
3607 				break;
3608 			if (val == L2CAP_FCS_NONE)
3609 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3610 			break;
3611 
3612 		case L2CAP_CONF_EFS:
3613 			if (olen != sizeof(efs))
3614 				break;
3615 			remote_efs = 1;
3616 			memcpy(&efs, (void *) val, olen);
3617 			break;
3618 
3619 		case L2CAP_CONF_EWS:
3620 			if (olen != 2)
3621 				break;
3622 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3623 				return -ECONNREFUSED;
3624 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3625 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3626 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3627 			chan->remote_tx_win = val;
3628 			break;
3629 
3630 		default:
3631 			if (hint)
3632 				break;
3633 			result = L2CAP_CONF_UNKNOWN;
3634 			*((u8 *) ptr++) = type;
3635 			break;
3636 		}
3637 	}
3638 
3639 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3640 		goto done;
3641 
3642 	switch (chan->mode) {
3643 	case L2CAP_MODE_STREAMING:
3644 	case L2CAP_MODE_ERTM:
3645 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3646 			chan->mode = l2cap_select_mode(rfc.mode,
3647 						       chan->conn->feat_mask);
3648 			break;
3649 		}
3650 
3651 		if (remote_efs) {
3652 			if (__l2cap_efs_supported(chan->conn))
3653 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3654 			else
3655 				return -ECONNREFUSED;
3656 		}
3657 
3658 		if (chan->mode != rfc.mode)
3659 			return -ECONNREFUSED;
3660 
3661 		break;
3662 	}
3663 
3664 done:
3665 	if (chan->mode != rfc.mode) {
3666 		result = L2CAP_CONF_UNACCEPT;
3667 		rfc.mode = chan->mode;
3668 
3669 		if (chan->num_conf_rsp == 1)
3670 			return -ECONNREFUSED;
3671 
3672 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3673 				   (unsigned long) &rfc, endptr - ptr);
3674 	}
3675 
3676 	if (result == L2CAP_CONF_SUCCESS) {
3677 		/* Configure output options and let the other side know
3678 		 * which ones we don't like. */
3679 
3680 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3681 			result = L2CAP_CONF_UNACCEPT;
3682 		else {
3683 			chan->omtu = mtu;
3684 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3685 		}
3686 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3687 
3688 		if (remote_efs) {
3689 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3690 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3691 			    efs.stype != chan->local_stype) {
3692 
3693 				result = L2CAP_CONF_UNACCEPT;
3694 
3695 				if (chan->num_conf_req >= 1)
3696 					return -ECONNREFUSED;
3697 
3698 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3699 						   sizeof(efs),
3700 						   (unsigned long) &efs, endptr - ptr);
3701 			} else {
3702 				/* Send PENDING Conf Rsp */
3703 				result = L2CAP_CONF_PENDING;
3704 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3705 			}
3706 		}
3707 
3708 		switch (rfc.mode) {
3709 		case L2CAP_MODE_BASIC:
3710 			chan->fcs = L2CAP_FCS_NONE;
3711 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3712 			break;
3713 
3714 		case L2CAP_MODE_ERTM:
3715 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3716 				chan->remote_tx_win = rfc.txwin_size;
3717 			else
3718 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3719 
3720 			chan->remote_max_tx = rfc.max_transmit;
3721 
3722 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3723 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3724 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3725 			rfc.max_pdu_size = cpu_to_le16(size);
3726 			chan->remote_mps = size;
3727 
3728 			__l2cap_set_ertm_timeouts(chan, &rfc);
3729 
3730 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3731 
3732 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3733 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3734 
3735 			if (remote_efs &&
3736 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3737 				chan->remote_id = efs.id;
3738 				chan->remote_stype = efs.stype;
3739 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3740 				chan->remote_flush_to =
3741 					le32_to_cpu(efs.flush_to);
3742 				chan->remote_acc_lat =
3743 					le32_to_cpu(efs.acc_lat);
3744 				chan->remote_sdu_itime =
3745 					le32_to_cpu(efs.sdu_itime);
3746 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3747 						   sizeof(efs),
3748 						   (unsigned long) &efs, endptr - ptr);
3749 			}
3750 			break;
3751 
3752 		case L2CAP_MODE_STREAMING:
3753 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3754 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3755 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3756 			rfc.max_pdu_size = cpu_to_le16(size);
3757 			chan->remote_mps = size;
3758 
3759 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3760 
3761 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3762 					   (unsigned long) &rfc, endptr - ptr);
3763 
3764 			break;
3765 
3766 		default:
3767 			result = L2CAP_CONF_UNACCEPT;
3768 
3769 			memset(&rfc, 0, sizeof(rfc));
3770 			rfc.mode = chan->mode;
3771 		}
3772 
3773 		if (result == L2CAP_CONF_SUCCESS)
3774 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3775 	}
3776 	rsp->scid   = cpu_to_le16(chan->dcid);
3777 	rsp->result = cpu_to_le16(result);
3778 	rsp->flags  = cpu_to_le16(0);
3779 
3780 	return ptr - data;
3781 }
3782 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3783 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3784 				void *data, size_t size, u16 *result)
3785 {
3786 	struct l2cap_conf_req *req = data;
3787 	void *ptr = req->data;
3788 	void *endptr = data + size;
3789 	int type, olen;
3790 	unsigned long val;
3791 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3792 	struct l2cap_conf_efs efs;
3793 
3794 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3795 
3796 	while (len >= L2CAP_CONF_OPT_SIZE) {
3797 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3798 		if (len < 0)
3799 			break;
3800 
3801 		switch (type) {
3802 		case L2CAP_CONF_MTU:
3803 			if (olen != 2)
3804 				break;
3805 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3806 				*result = L2CAP_CONF_UNACCEPT;
3807 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3808 			} else
3809 				chan->imtu = val;
3810 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3811 					   endptr - ptr);
3812 			break;
3813 
3814 		case L2CAP_CONF_FLUSH_TO:
3815 			if (olen != 2)
3816 				break;
3817 			chan->flush_to = val;
3818 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3819 					   chan->flush_to, endptr - ptr);
3820 			break;
3821 
3822 		case L2CAP_CONF_RFC:
3823 			if (olen != sizeof(rfc))
3824 				break;
3825 			memcpy(&rfc, (void *)val, olen);
3826 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3827 			    rfc.mode != chan->mode)
3828 				return -ECONNREFUSED;
3829 			chan->fcs = 0;
3830 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3831 					   (unsigned long) &rfc, endptr - ptr);
3832 			break;
3833 
3834 		case L2CAP_CONF_EWS:
3835 			if (olen != 2)
3836 				break;
3837 			chan->ack_win = min_t(u16, val, chan->ack_win);
3838 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3839 					   chan->tx_win, endptr - ptr);
3840 			break;
3841 
3842 		case L2CAP_CONF_EFS:
3843 			if (olen != sizeof(efs))
3844 				break;
3845 			memcpy(&efs, (void *)val, olen);
3846 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3847 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3848 			    efs.stype != chan->local_stype)
3849 				return -ECONNREFUSED;
3850 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3851 					   (unsigned long) &efs, endptr - ptr);
3852 			break;
3853 
3854 		case L2CAP_CONF_FCS:
3855 			if (olen != 1)
3856 				break;
3857 			if (*result == L2CAP_CONF_PENDING)
3858 				if (val == L2CAP_FCS_NONE)
3859 					set_bit(CONF_RECV_NO_FCS,
3860 						&chan->conf_state);
3861 			break;
3862 		}
3863 	}
3864 
3865 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3866 		return -ECONNREFUSED;
3867 
3868 	chan->mode = rfc.mode;
3869 
3870 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3871 		switch (rfc.mode) {
3872 		case L2CAP_MODE_ERTM:
3873 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3874 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3875 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3876 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3877 				chan->ack_win = min_t(u16, chan->ack_win,
3878 						      rfc.txwin_size);
3879 
3880 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3881 				chan->local_msdu = le16_to_cpu(efs.msdu);
3882 				chan->local_sdu_itime =
3883 					le32_to_cpu(efs.sdu_itime);
3884 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3885 				chan->local_flush_to =
3886 					le32_to_cpu(efs.flush_to);
3887 			}
3888 			break;
3889 
3890 		case L2CAP_MODE_STREAMING:
3891 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3892 		}
3893 	}
3894 
3895 	req->dcid   = cpu_to_le16(chan->dcid);
3896 	req->flags  = cpu_to_le16(0);
3897 
3898 	return ptr - data;
3899 }
3900 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3901 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3902 				u16 result, u16 flags)
3903 {
3904 	struct l2cap_conf_rsp *rsp = data;
3905 	void *ptr = rsp->data;
3906 
3907 	BT_DBG("chan %p", chan);
3908 
3909 	rsp->scid   = cpu_to_le16(chan->dcid);
3910 	rsp->result = cpu_to_le16(result);
3911 	rsp->flags  = cpu_to_le16(flags);
3912 
3913 	return ptr - data;
3914 }
3915 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3916 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3917 {
3918 	struct l2cap_le_conn_rsp rsp;
3919 	struct l2cap_conn *conn = chan->conn;
3920 
3921 	BT_DBG("chan %p", chan);
3922 
3923 	rsp.dcid    = cpu_to_le16(chan->scid);
3924 	rsp.mtu     = cpu_to_le16(chan->imtu);
3925 	rsp.mps     = cpu_to_le16(chan->mps);
3926 	rsp.credits = cpu_to_le16(chan->rx_credits);
3927 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3928 
3929 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3930 		       &rsp);
3931 }
3932 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3933 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3934 {
3935 	struct {
3936 		struct l2cap_ecred_conn_rsp rsp;
3937 		__le16 dcid[5];
3938 	} __packed pdu;
3939 	struct l2cap_conn *conn = chan->conn;
3940 	u16 ident = chan->ident;
3941 	int i = 0;
3942 
3943 	if (!ident)
3944 		return;
3945 
3946 	BT_DBG("chan %p ident %d", chan, ident);
3947 
3948 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3949 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3950 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3951 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3952 
3953 	mutex_lock(&conn->chan_lock);
3954 
3955 	list_for_each_entry(chan, &conn->chan_l, list) {
3956 		if (chan->ident != ident)
3957 			continue;
3958 
3959 		/* Reset ident so only one response is sent */
3960 		chan->ident = 0;
3961 
3962 		/* Include all channels pending with the same ident */
3963 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3964 	}
3965 
3966 	mutex_unlock(&conn->chan_lock);
3967 
3968 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3969 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3970 }
3971 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3972 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3973 {
3974 	struct l2cap_conn_rsp rsp;
3975 	struct l2cap_conn *conn = chan->conn;
3976 	u8 buf[128];
3977 	u8 rsp_code;
3978 
3979 	rsp.scid   = cpu_to_le16(chan->dcid);
3980 	rsp.dcid   = cpu_to_le16(chan->scid);
3981 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3982 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3983 
3984 	if (chan->hs_hcon)
3985 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3986 	else
3987 		rsp_code = L2CAP_CONN_RSP;
3988 
3989 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3990 
3991 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3992 
3993 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3994 		return;
3995 
3996 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3997 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3998 	chan->num_conf_req++;
3999 }
4000 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4001 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4002 {
4003 	int type, olen;
4004 	unsigned long val;
4005 	/* Use sane default values in case a misbehaving remote device
4006 	 * did not send an RFC or extended window size option.
4007 	 */
4008 	u16 txwin_ext = chan->ack_win;
4009 	struct l2cap_conf_rfc rfc = {
4010 		.mode = chan->mode,
4011 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4012 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4013 		.max_pdu_size = cpu_to_le16(chan->imtu),
4014 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4015 	};
4016 
4017 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4018 
4019 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4020 		return;
4021 
4022 	while (len >= L2CAP_CONF_OPT_SIZE) {
4023 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4024 		if (len < 0)
4025 			break;
4026 
4027 		switch (type) {
4028 		case L2CAP_CONF_RFC:
4029 			if (olen != sizeof(rfc))
4030 				break;
4031 			memcpy(&rfc, (void *)val, olen);
4032 			break;
4033 		case L2CAP_CONF_EWS:
4034 			if (olen != 2)
4035 				break;
4036 			txwin_ext = val;
4037 			break;
4038 		}
4039 	}
4040 
4041 	switch (rfc.mode) {
4042 	case L2CAP_MODE_ERTM:
4043 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4044 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4045 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4046 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4047 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4048 		else
4049 			chan->ack_win = min_t(u16, chan->ack_win,
4050 					      rfc.txwin_size);
4051 		break;
4052 	case L2CAP_MODE_STREAMING:
4053 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4054 	}
4055 }
4056 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4057 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4058 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4059 				    u8 *data)
4060 {
4061 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4062 
4063 	if (cmd_len < sizeof(*rej))
4064 		return -EPROTO;
4065 
4066 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4067 		return 0;
4068 
4069 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4070 	    cmd->ident == conn->info_ident) {
4071 		cancel_delayed_work(&conn->info_timer);
4072 
4073 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4074 		conn->info_ident = 0;
4075 
4076 		l2cap_conn_start(conn);
4077 	}
4078 
4079 	return 0;
4080 }
4081 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4082 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4083 					struct l2cap_cmd_hdr *cmd,
4084 					u8 *data, u8 rsp_code, u8 amp_id)
4085 {
4086 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4087 	struct l2cap_conn_rsp rsp;
4088 	struct l2cap_chan *chan = NULL, *pchan;
4089 	int result, status = L2CAP_CS_NO_INFO;
4090 
4091 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4092 	__le16 psm = req->psm;
4093 
4094 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4095 
4096 	/* Check if we have socket listening on psm */
4097 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4098 					 &conn->hcon->dst, ACL_LINK);
4099 	if (!pchan) {
4100 		result = L2CAP_CR_BAD_PSM;
4101 		goto sendresp;
4102 	}
4103 
4104 	mutex_lock(&conn->chan_lock);
4105 	l2cap_chan_lock(pchan);
4106 
4107 	/* Check if the ACL is secure enough (if not SDP) */
4108 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4109 	    !hci_conn_check_link_mode(conn->hcon)) {
4110 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4111 		result = L2CAP_CR_SEC_BLOCK;
4112 		goto response;
4113 	}
4114 
4115 	result = L2CAP_CR_NO_MEM;
4116 
4117 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4118 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4119 		result = L2CAP_CR_INVALID_SCID;
4120 		goto response;
4121 	}
4122 
4123 	/* Check if we already have channel with that dcid */
4124 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4125 		result = L2CAP_CR_SCID_IN_USE;
4126 		goto response;
4127 	}
4128 
4129 	chan = pchan->ops->new_connection(pchan);
4130 	if (!chan)
4131 		goto response;
4132 
4133 	/* For certain devices (ex: HID mouse), support for authentication,
4134 	 * pairing and bonding is optional. For such devices, inorder to avoid
4135 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4136 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4137 	 */
4138 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4139 
4140 	bacpy(&chan->src, &conn->hcon->src);
4141 	bacpy(&chan->dst, &conn->hcon->dst);
4142 	chan->src_type = bdaddr_src_type(conn->hcon);
4143 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4144 	chan->psm  = psm;
4145 	chan->dcid = scid;
4146 	chan->local_amp_id = amp_id;
4147 
4148 	__l2cap_chan_add(conn, chan);
4149 
4150 	dcid = chan->scid;
4151 
4152 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4153 
4154 	chan->ident = cmd->ident;
4155 
4156 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4157 		if (l2cap_chan_check_security(chan, false)) {
4158 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4159 				l2cap_state_change(chan, BT_CONNECT2);
4160 				result = L2CAP_CR_PEND;
4161 				status = L2CAP_CS_AUTHOR_PEND;
4162 				chan->ops->defer(chan);
4163 			} else {
4164 				/* Force pending result for AMP controllers.
4165 				 * The connection will succeed after the
4166 				 * physical link is up.
4167 				 */
4168 				if (amp_id == AMP_ID_BREDR) {
4169 					l2cap_state_change(chan, BT_CONFIG);
4170 					result = L2CAP_CR_SUCCESS;
4171 				} else {
4172 					l2cap_state_change(chan, BT_CONNECT2);
4173 					result = L2CAP_CR_PEND;
4174 				}
4175 				status = L2CAP_CS_NO_INFO;
4176 			}
4177 		} else {
4178 			l2cap_state_change(chan, BT_CONNECT2);
4179 			result = L2CAP_CR_PEND;
4180 			status = L2CAP_CS_AUTHEN_PEND;
4181 		}
4182 	} else {
4183 		l2cap_state_change(chan, BT_CONNECT2);
4184 		result = L2CAP_CR_PEND;
4185 		status = L2CAP_CS_NO_INFO;
4186 	}
4187 
4188 response:
4189 	l2cap_chan_unlock(pchan);
4190 	mutex_unlock(&conn->chan_lock);
4191 	l2cap_chan_put(pchan);
4192 
4193 sendresp:
4194 	rsp.scid   = cpu_to_le16(scid);
4195 	rsp.dcid   = cpu_to_le16(dcid);
4196 	rsp.result = cpu_to_le16(result);
4197 	rsp.status = cpu_to_le16(status);
4198 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4199 
4200 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4201 		struct l2cap_info_req info;
4202 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4203 
4204 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4205 		conn->info_ident = l2cap_get_ident(conn);
4206 
4207 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4208 
4209 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4210 			       sizeof(info), &info);
4211 	}
4212 
4213 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4214 	    result == L2CAP_CR_SUCCESS) {
4215 		u8 buf[128];
4216 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4217 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4218 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4219 		chan->num_conf_req++;
4220 	}
4221 
4222 	return chan;
4223 }
4224 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4225 static int l2cap_connect_req(struct l2cap_conn *conn,
4226 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4227 {
4228 	struct hci_dev *hdev = conn->hcon->hdev;
4229 	struct hci_conn *hcon = conn->hcon;
4230 
4231 	if (cmd_len < sizeof(struct l2cap_conn_req))
4232 		return -EPROTO;
4233 
4234 	hci_dev_lock(hdev);
4235 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4236 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4237 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4238 	hci_dev_unlock(hdev);
4239 
4240 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4241 	return 0;
4242 }
4243 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4244 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4245 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4246 				    u8 *data)
4247 {
4248 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4249 	u16 scid, dcid, result, status;
4250 	struct l2cap_chan *chan;
4251 	u8 req[128];
4252 	int err;
4253 
4254 	if (cmd_len < sizeof(*rsp))
4255 		return -EPROTO;
4256 
4257 	scid   = __le16_to_cpu(rsp->scid);
4258 	dcid   = __le16_to_cpu(rsp->dcid);
4259 	result = __le16_to_cpu(rsp->result);
4260 	status = __le16_to_cpu(rsp->status);
4261 
4262 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4263 	       dcid, scid, result, status);
4264 
4265 	mutex_lock(&conn->chan_lock);
4266 
4267 	if (scid) {
4268 		chan = __l2cap_get_chan_by_scid(conn, scid);
4269 		if (!chan) {
4270 			err = -EBADSLT;
4271 			goto unlock;
4272 		}
4273 	} else {
4274 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4275 		if (!chan) {
4276 			err = -EBADSLT;
4277 			goto unlock;
4278 		}
4279 	}
4280 
4281 	err = 0;
4282 
4283 	l2cap_chan_lock(chan);
4284 
4285 	switch (result) {
4286 	case L2CAP_CR_SUCCESS:
4287 		l2cap_state_change(chan, BT_CONFIG);
4288 		chan->ident = 0;
4289 		chan->dcid = dcid;
4290 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4291 
4292 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4293 			break;
4294 
4295 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4296 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4297 		chan->num_conf_req++;
4298 		break;
4299 
4300 	case L2CAP_CR_PEND:
4301 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4302 		break;
4303 
4304 	default:
4305 		l2cap_chan_del(chan, ECONNREFUSED);
4306 		break;
4307 	}
4308 
4309 	l2cap_chan_unlock(chan);
4310 
4311 unlock:
4312 	mutex_unlock(&conn->chan_lock);
4313 
4314 	return err;
4315 }
4316 
set_default_fcs(struct l2cap_chan * chan)4317 static inline void set_default_fcs(struct l2cap_chan *chan)
4318 {
4319 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4320 	 * sides request it.
4321 	 */
4322 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4323 		chan->fcs = L2CAP_FCS_NONE;
4324 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4325 		chan->fcs = L2CAP_FCS_CRC16;
4326 }
4327 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4328 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4329 				    u8 ident, u16 flags)
4330 {
4331 	struct l2cap_conn *conn = chan->conn;
4332 
4333 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4334 	       flags);
4335 
4336 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4337 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4338 
4339 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4340 		       l2cap_build_conf_rsp(chan, data,
4341 					    L2CAP_CONF_SUCCESS, flags), data);
4342 }
4343 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4344 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4345 				   u16 scid, u16 dcid)
4346 {
4347 	struct l2cap_cmd_rej_cid rej;
4348 
4349 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4350 	rej.scid = __cpu_to_le16(scid);
4351 	rej.dcid = __cpu_to_le16(dcid);
4352 
4353 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4354 }
4355 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4356 static inline int l2cap_config_req(struct l2cap_conn *conn,
4357 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4358 				   u8 *data)
4359 {
4360 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4361 	u16 dcid, flags;
4362 	u8 rsp[64];
4363 	struct l2cap_chan *chan;
4364 	int len, err = 0;
4365 
4366 	if (cmd_len < sizeof(*req))
4367 		return -EPROTO;
4368 
4369 	dcid  = __le16_to_cpu(req->dcid);
4370 	flags = __le16_to_cpu(req->flags);
4371 
4372 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4373 
4374 	chan = l2cap_get_chan_by_scid(conn, dcid);
4375 	if (!chan) {
4376 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4377 		return 0;
4378 	}
4379 
4380 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4381 	    chan->state != BT_CONNECTED) {
4382 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4383 				       chan->dcid);
4384 		goto unlock;
4385 	}
4386 
4387 	/* Reject if config buffer is too small. */
4388 	len = cmd_len - sizeof(*req);
4389 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4390 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4391 			       l2cap_build_conf_rsp(chan, rsp,
4392 			       L2CAP_CONF_REJECT, flags), rsp);
4393 		goto unlock;
4394 	}
4395 
4396 	/* Store config. */
4397 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4398 	chan->conf_len += len;
4399 
4400 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4401 		/* Incomplete config. Send empty response. */
4402 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4403 			       l2cap_build_conf_rsp(chan, rsp,
4404 			       L2CAP_CONF_SUCCESS, flags), rsp);
4405 		goto unlock;
4406 	}
4407 
4408 	/* Complete config. */
4409 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4410 	if (len < 0) {
4411 		l2cap_send_disconn_req(chan, ECONNRESET);
4412 		goto unlock;
4413 	}
4414 
4415 	chan->ident = cmd->ident;
4416 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4417 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4418 		chan->num_conf_rsp++;
4419 
4420 	/* Reset config buffer. */
4421 	chan->conf_len = 0;
4422 
4423 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4424 		goto unlock;
4425 
4426 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4427 		set_default_fcs(chan);
4428 
4429 		if (chan->mode == L2CAP_MODE_ERTM ||
4430 		    chan->mode == L2CAP_MODE_STREAMING)
4431 			err = l2cap_ertm_init(chan);
4432 
4433 		if (err < 0)
4434 			l2cap_send_disconn_req(chan, -err);
4435 		else
4436 			l2cap_chan_ready(chan);
4437 
4438 		goto unlock;
4439 	}
4440 
4441 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4442 		u8 buf[64];
4443 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4444 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4445 		chan->num_conf_req++;
4446 	}
4447 
4448 	/* Got Conf Rsp PENDING from remote side and assume we sent
4449 	   Conf Rsp PENDING in the code above */
4450 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4451 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4452 
4453 		/* check compatibility */
4454 
4455 		/* Send rsp for BR/EDR channel */
4456 		if (!chan->hs_hcon)
4457 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4458 		else
4459 			chan->ident = cmd->ident;
4460 	}
4461 
4462 unlock:
4463 	l2cap_chan_unlock(chan);
4464 	return err;
4465 }
4466 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4467 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4468 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4469 				   u8 *data)
4470 {
4471 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4472 	u16 scid, flags, result;
4473 	struct l2cap_chan *chan;
4474 	int len = cmd_len - sizeof(*rsp);
4475 	int err = 0;
4476 
4477 	if (cmd_len < sizeof(*rsp))
4478 		return -EPROTO;
4479 
4480 	scid   = __le16_to_cpu(rsp->scid);
4481 	flags  = __le16_to_cpu(rsp->flags);
4482 	result = __le16_to_cpu(rsp->result);
4483 
4484 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4485 	       result, len);
4486 
4487 	chan = l2cap_get_chan_by_scid(conn, scid);
4488 	if (!chan)
4489 		return 0;
4490 
4491 	switch (result) {
4492 	case L2CAP_CONF_SUCCESS:
4493 		l2cap_conf_rfc_get(chan, rsp->data, len);
4494 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4495 		break;
4496 
4497 	case L2CAP_CONF_PENDING:
4498 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4499 
4500 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4501 			char buf[64];
4502 
4503 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4504 						   buf, sizeof(buf), &result);
4505 			if (len < 0) {
4506 				l2cap_send_disconn_req(chan, ECONNRESET);
4507 				goto done;
4508 			}
4509 
4510 			if (!chan->hs_hcon) {
4511 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4512 							0);
4513 			} else {
4514 				if (l2cap_check_efs(chan)) {
4515 					amp_create_logical_link(chan);
4516 					chan->ident = cmd->ident;
4517 				}
4518 			}
4519 		}
4520 		goto done;
4521 
4522 	case L2CAP_CONF_UNACCEPT:
4523 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4524 			char req[64];
4525 
4526 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4527 				l2cap_send_disconn_req(chan, ECONNRESET);
4528 				goto done;
4529 			}
4530 
4531 			/* throw out any old stored conf requests */
4532 			result = L2CAP_CONF_SUCCESS;
4533 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4534 						   req, sizeof(req), &result);
4535 			if (len < 0) {
4536 				l2cap_send_disconn_req(chan, ECONNRESET);
4537 				goto done;
4538 			}
4539 
4540 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4541 				       L2CAP_CONF_REQ, len, req);
4542 			chan->num_conf_req++;
4543 			if (result != L2CAP_CONF_SUCCESS)
4544 				goto done;
4545 			break;
4546 		}
4547 		fallthrough;
4548 
4549 	default:
4550 		l2cap_chan_set_err(chan, ECONNRESET);
4551 
4552 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4553 		l2cap_send_disconn_req(chan, ECONNRESET);
4554 		goto done;
4555 	}
4556 
4557 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4558 		goto done;
4559 
4560 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4561 
4562 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4563 		set_default_fcs(chan);
4564 
4565 		if (chan->mode == L2CAP_MODE_ERTM ||
4566 		    chan->mode == L2CAP_MODE_STREAMING)
4567 			err = l2cap_ertm_init(chan);
4568 
4569 		if (err < 0)
4570 			l2cap_send_disconn_req(chan, -err);
4571 		else
4572 			l2cap_chan_ready(chan);
4573 	}
4574 
4575 done:
4576 	l2cap_chan_unlock(chan);
4577 	return err;
4578 }
4579 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4580 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4581 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4582 				       u8 *data)
4583 {
4584 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4585 	struct l2cap_disconn_rsp rsp;
4586 	u16 dcid, scid;
4587 	struct l2cap_chan *chan;
4588 
4589 	if (cmd_len != sizeof(*req))
4590 		return -EPROTO;
4591 
4592 	scid = __le16_to_cpu(req->scid);
4593 	dcid = __le16_to_cpu(req->dcid);
4594 
4595 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4596 
4597 	mutex_lock(&conn->chan_lock);
4598 
4599 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4600 	if (!chan) {
4601 		mutex_unlock(&conn->chan_lock);
4602 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4603 		return 0;
4604 	}
4605 
4606 	l2cap_chan_hold(chan);
4607 	l2cap_chan_lock(chan);
4608 
4609 	rsp.dcid = cpu_to_le16(chan->scid);
4610 	rsp.scid = cpu_to_le16(chan->dcid);
4611 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4612 
4613 	chan->ops->set_shutdown(chan);
4614 
4615 	l2cap_chan_del(chan, ECONNRESET);
4616 
4617 	chan->ops->close(chan);
4618 
4619 	l2cap_chan_unlock(chan);
4620 	l2cap_chan_put(chan);
4621 
4622 	mutex_unlock(&conn->chan_lock);
4623 
4624 	return 0;
4625 }
4626 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4627 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4628 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4629 				       u8 *data)
4630 {
4631 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4632 	u16 dcid, scid;
4633 	struct l2cap_chan *chan;
4634 
4635 	if (cmd_len != sizeof(*rsp))
4636 		return -EPROTO;
4637 
4638 	scid = __le16_to_cpu(rsp->scid);
4639 	dcid = __le16_to_cpu(rsp->dcid);
4640 
4641 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4642 
4643 	mutex_lock(&conn->chan_lock);
4644 
4645 	chan = __l2cap_get_chan_by_scid(conn, scid);
4646 	if (!chan) {
4647 		mutex_unlock(&conn->chan_lock);
4648 		return 0;
4649 	}
4650 
4651 	l2cap_chan_hold(chan);
4652 	l2cap_chan_lock(chan);
4653 
4654 	if (chan->state != BT_DISCONN) {
4655 		l2cap_chan_unlock(chan);
4656 		l2cap_chan_put(chan);
4657 		mutex_unlock(&conn->chan_lock);
4658 		return 0;
4659 	}
4660 
4661 	l2cap_chan_del(chan, 0);
4662 
4663 	chan->ops->close(chan);
4664 
4665 	l2cap_chan_unlock(chan);
4666 	l2cap_chan_put(chan);
4667 
4668 	mutex_unlock(&conn->chan_lock);
4669 
4670 	return 0;
4671 }
4672 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4673 static inline int l2cap_information_req(struct l2cap_conn *conn,
4674 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4675 					u8 *data)
4676 {
4677 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4678 	u16 type;
4679 
4680 	if (cmd_len != sizeof(*req))
4681 		return -EPROTO;
4682 
4683 	type = __le16_to_cpu(req->type);
4684 
4685 	BT_DBG("type 0x%4.4x", type);
4686 
4687 	if (type == L2CAP_IT_FEAT_MASK) {
4688 		u8 buf[8];
4689 		u32 feat_mask = l2cap_feat_mask;
4690 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4691 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4692 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4693 		if (!disable_ertm)
4694 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4695 				| L2CAP_FEAT_FCS;
4696 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4697 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4698 				| L2CAP_FEAT_EXT_WINDOW;
4699 
4700 		put_unaligned_le32(feat_mask, rsp->data);
4701 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4702 			       buf);
4703 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4704 		u8 buf[12];
4705 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4706 
4707 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4708 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4709 		rsp->data[0] = conn->local_fixed_chan;
4710 		memset(rsp->data + 1, 0, 7);
4711 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4712 			       buf);
4713 	} else {
4714 		struct l2cap_info_rsp rsp;
4715 		rsp.type   = cpu_to_le16(type);
4716 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4717 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4718 			       &rsp);
4719 	}
4720 
4721 	return 0;
4722 }
4723 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4724 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4725 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4726 					u8 *data)
4727 {
4728 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4729 	u16 type, result;
4730 
4731 	if (cmd_len < sizeof(*rsp))
4732 		return -EPROTO;
4733 
4734 	type   = __le16_to_cpu(rsp->type);
4735 	result = __le16_to_cpu(rsp->result);
4736 
4737 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4738 
4739 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4740 	if (cmd->ident != conn->info_ident ||
4741 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4742 		return 0;
4743 
4744 	cancel_delayed_work(&conn->info_timer);
4745 
4746 	if (result != L2CAP_IR_SUCCESS) {
4747 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4748 		conn->info_ident = 0;
4749 
4750 		l2cap_conn_start(conn);
4751 
4752 		return 0;
4753 	}
4754 
4755 	switch (type) {
4756 	case L2CAP_IT_FEAT_MASK:
4757 		conn->feat_mask = get_unaligned_le32(rsp->data);
4758 
4759 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4760 			struct l2cap_info_req req;
4761 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4762 
4763 			conn->info_ident = l2cap_get_ident(conn);
4764 
4765 			l2cap_send_cmd(conn, conn->info_ident,
4766 				       L2CAP_INFO_REQ, sizeof(req), &req);
4767 		} else {
4768 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4769 			conn->info_ident = 0;
4770 
4771 			l2cap_conn_start(conn);
4772 		}
4773 		break;
4774 
4775 	case L2CAP_IT_FIXED_CHAN:
4776 		conn->remote_fixed_chan = rsp->data[0];
4777 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4778 		conn->info_ident = 0;
4779 
4780 		l2cap_conn_start(conn);
4781 		break;
4782 	}
4783 
4784 	return 0;
4785 }
4786 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4787 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4788 				    struct l2cap_cmd_hdr *cmd,
4789 				    u16 cmd_len, void *data)
4790 {
4791 	struct l2cap_create_chan_req *req = data;
4792 	struct l2cap_create_chan_rsp rsp;
4793 	struct l2cap_chan *chan;
4794 	struct hci_dev *hdev;
4795 	u16 psm, scid;
4796 
4797 	if (cmd_len != sizeof(*req))
4798 		return -EPROTO;
4799 
4800 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4801 		return -EINVAL;
4802 
4803 	psm = le16_to_cpu(req->psm);
4804 	scid = le16_to_cpu(req->scid);
4805 
4806 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4807 
4808 	/* For controller id 0 make BR/EDR connection */
4809 	if (req->amp_id == AMP_ID_BREDR) {
4810 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4811 			      req->amp_id);
4812 		return 0;
4813 	}
4814 
4815 	/* Validate AMP controller id */
4816 	hdev = hci_dev_get(req->amp_id);
4817 	if (!hdev)
4818 		goto error;
4819 
4820 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4821 		hci_dev_put(hdev);
4822 		goto error;
4823 	}
4824 
4825 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4826 			     req->amp_id);
4827 	if (chan) {
4828 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4829 		struct hci_conn *hs_hcon;
4830 
4831 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4832 						  &conn->hcon->dst);
4833 		if (!hs_hcon) {
4834 			hci_dev_put(hdev);
4835 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4836 					       chan->dcid);
4837 			return 0;
4838 		}
4839 
4840 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4841 
4842 		mgr->bredr_chan = chan;
4843 		chan->hs_hcon = hs_hcon;
4844 		chan->fcs = L2CAP_FCS_NONE;
4845 		conn->mtu = hdev->block_mtu;
4846 	}
4847 
4848 	hci_dev_put(hdev);
4849 
4850 	return 0;
4851 
4852 error:
4853 	rsp.dcid = 0;
4854 	rsp.scid = cpu_to_le16(scid);
4855 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4856 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4857 
4858 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4859 		       sizeof(rsp), &rsp);
4860 
4861 	return 0;
4862 }
4863 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4864 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4865 {
4866 	struct l2cap_move_chan_req req;
4867 	u8 ident;
4868 
4869 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4870 
4871 	ident = l2cap_get_ident(chan->conn);
4872 	chan->ident = ident;
4873 
4874 	req.icid = cpu_to_le16(chan->scid);
4875 	req.dest_amp_id = dest_amp_id;
4876 
4877 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4878 		       &req);
4879 
4880 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4881 }
4882 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4883 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4884 {
4885 	struct l2cap_move_chan_rsp rsp;
4886 
4887 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4888 
4889 	rsp.icid = cpu_to_le16(chan->dcid);
4890 	rsp.result = cpu_to_le16(result);
4891 
4892 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4893 		       sizeof(rsp), &rsp);
4894 }
4895 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4896 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4897 {
4898 	struct l2cap_move_chan_cfm cfm;
4899 
4900 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4901 
4902 	chan->ident = l2cap_get_ident(chan->conn);
4903 
4904 	cfm.icid = cpu_to_le16(chan->scid);
4905 	cfm.result = cpu_to_le16(result);
4906 
4907 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4908 		       sizeof(cfm), &cfm);
4909 
4910 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4911 }
4912 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4913 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4914 {
4915 	struct l2cap_move_chan_cfm cfm;
4916 
4917 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4918 
4919 	cfm.icid = cpu_to_le16(icid);
4920 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4921 
4922 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4923 		       sizeof(cfm), &cfm);
4924 }
4925 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4926 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4927 					 u16 icid)
4928 {
4929 	struct l2cap_move_chan_cfm_rsp rsp;
4930 
4931 	BT_DBG("icid 0x%4.4x", icid);
4932 
4933 	rsp.icid = cpu_to_le16(icid);
4934 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4935 }
4936 
__release_logical_link(struct l2cap_chan * chan)4937 static void __release_logical_link(struct l2cap_chan *chan)
4938 {
4939 	chan->hs_hchan = NULL;
4940 	chan->hs_hcon = NULL;
4941 
4942 	/* Placeholder - release the logical link */
4943 }
4944 
l2cap_logical_fail(struct l2cap_chan * chan)4945 static void l2cap_logical_fail(struct l2cap_chan *chan)
4946 {
4947 	/* Logical link setup failed */
4948 	if (chan->state != BT_CONNECTED) {
4949 		/* Create channel failure, disconnect */
4950 		l2cap_send_disconn_req(chan, ECONNRESET);
4951 		return;
4952 	}
4953 
4954 	switch (chan->move_role) {
4955 	case L2CAP_MOVE_ROLE_RESPONDER:
4956 		l2cap_move_done(chan);
4957 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4958 		break;
4959 	case L2CAP_MOVE_ROLE_INITIATOR:
4960 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4961 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4962 			/* Remote has only sent pending or
4963 			 * success responses, clean up
4964 			 */
4965 			l2cap_move_done(chan);
4966 		}
4967 
4968 		/* Other amp move states imply that the move
4969 		 * has already aborted
4970 		 */
4971 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4972 		break;
4973 	}
4974 }
4975 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4976 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4977 					struct hci_chan *hchan)
4978 {
4979 	struct l2cap_conf_rsp rsp;
4980 
4981 	chan->hs_hchan = hchan;
4982 	chan->hs_hcon->l2cap_data = chan->conn;
4983 
4984 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4985 
4986 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4987 		int err;
4988 
4989 		set_default_fcs(chan);
4990 
4991 		err = l2cap_ertm_init(chan);
4992 		if (err < 0)
4993 			l2cap_send_disconn_req(chan, -err);
4994 		else
4995 			l2cap_chan_ready(chan);
4996 	}
4997 }
4998 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4999 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5000 				      struct hci_chan *hchan)
5001 {
5002 	chan->hs_hcon = hchan->conn;
5003 	chan->hs_hcon->l2cap_data = chan->conn;
5004 
5005 	BT_DBG("move_state %d", chan->move_state);
5006 
5007 	switch (chan->move_state) {
5008 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5009 		/* Move confirm will be sent after a success
5010 		 * response is received
5011 		 */
5012 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5013 		break;
5014 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5015 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5016 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5017 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5018 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5019 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5020 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5021 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5022 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5023 		}
5024 		break;
5025 	default:
5026 		/* Move was not in expected state, free the channel */
5027 		__release_logical_link(chan);
5028 
5029 		chan->move_state = L2CAP_MOVE_STABLE;
5030 	}
5031 }
5032 
5033 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5034 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5035 		       u8 status)
5036 {
5037 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5038 
5039 	if (status) {
5040 		l2cap_logical_fail(chan);
5041 		__release_logical_link(chan);
5042 		return;
5043 	}
5044 
5045 	if (chan->state != BT_CONNECTED) {
5046 		/* Ignore logical link if channel is on BR/EDR */
5047 		if (chan->local_amp_id != AMP_ID_BREDR)
5048 			l2cap_logical_finish_create(chan, hchan);
5049 	} else {
5050 		l2cap_logical_finish_move(chan, hchan);
5051 	}
5052 }
5053 
l2cap_move_start(struct l2cap_chan * chan)5054 void l2cap_move_start(struct l2cap_chan *chan)
5055 {
5056 	BT_DBG("chan %p", chan);
5057 
5058 	if (chan->local_amp_id == AMP_ID_BREDR) {
5059 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5060 			return;
5061 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5062 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5063 		/* Placeholder - start physical link setup */
5064 	} else {
5065 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5066 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5067 		chan->move_id = 0;
5068 		l2cap_move_setup(chan);
5069 		l2cap_send_move_chan_req(chan, 0);
5070 	}
5071 }
5072 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5073 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5074 			    u8 local_amp_id, u8 remote_amp_id)
5075 {
5076 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5077 	       local_amp_id, remote_amp_id);
5078 
5079 	chan->fcs = L2CAP_FCS_NONE;
5080 
5081 	/* Outgoing channel on AMP */
5082 	if (chan->state == BT_CONNECT) {
5083 		if (result == L2CAP_CR_SUCCESS) {
5084 			chan->local_amp_id = local_amp_id;
5085 			l2cap_send_create_chan_req(chan, remote_amp_id);
5086 		} else {
5087 			/* Revert to BR/EDR connect */
5088 			l2cap_send_conn_req(chan);
5089 		}
5090 
5091 		return;
5092 	}
5093 
5094 	/* Incoming channel on AMP */
5095 	if (__l2cap_no_conn_pending(chan)) {
5096 		struct l2cap_conn_rsp rsp;
5097 		char buf[128];
5098 		rsp.scid = cpu_to_le16(chan->dcid);
5099 		rsp.dcid = cpu_to_le16(chan->scid);
5100 
5101 		if (result == L2CAP_CR_SUCCESS) {
5102 			/* Send successful response */
5103 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5104 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5105 		} else {
5106 			/* Send negative response */
5107 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5108 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5109 		}
5110 
5111 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5112 			       sizeof(rsp), &rsp);
5113 
5114 		if (result == L2CAP_CR_SUCCESS) {
5115 			l2cap_state_change(chan, BT_CONFIG);
5116 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5117 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5118 				       L2CAP_CONF_REQ,
5119 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5120 			chan->num_conf_req++;
5121 		}
5122 	}
5123 }
5124 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5125 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5126 				   u8 remote_amp_id)
5127 {
5128 	l2cap_move_setup(chan);
5129 	chan->move_id = local_amp_id;
5130 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5131 
5132 	l2cap_send_move_chan_req(chan, remote_amp_id);
5133 }
5134 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5135 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5136 {
5137 	struct hci_chan *hchan = NULL;
5138 
5139 	/* Placeholder - get hci_chan for logical link */
5140 
5141 	if (hchan) {
5142 		if (hchan->state == BT_CONNECTED) {
5143 			/* Logical link is ready to go */
5144 			chan->hs_hcon = hchan->conn;
5145 			chan->hs_hcon->l2cap_data = chan->conn;
5146 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5147 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5148 
5149 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5150 		} else {
5151 			/* Wait for logical link to be ready */
5152 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5153 		}
5154 	} else {
5155 		/* Logical link not available */
5156 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5157 	}
5158 }
5159 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5160 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5161 {
5162 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5163 		u8 rsp_result;
5164 		if (result == -EINVAL)
5165 			rsp_result = L2CAP_MR_BAD_ID;
5166 		else
5167 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5168 
5169 		l2cap_send_move_chan_rsp(chan, rsp_result);
5170 	}
5171 
5172 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5173 	chan->move_state = L2CAP_MOVE_STABLE;
5174 
5175 	/* Restart data transmission */
5176 	l2cap_ertm_send(chan);
5177 }
5178 
5179 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5180 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5181 {
5182 	u8 local_amp_id = chan->local_amp_id;
5183 	u8 remote_amp_id = chan->remote_amp_id;
5184 
5185 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5186 	       chan, result, local_amp_id, remote_amp_id);
5187 
5188 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5189 		return;
5190 
5191 	if (chan->state != BT_CONNECTED) {
5192 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5193 	} else if (result != L2CAP_MR_SUCCESS) {
5194 		l2cap_do_move_cancel(chan, result);
5195 	} else {
5196 		switch (chan->move_role) {
5197 		case L2CAP_MOVE_ROLE_INITIATOR:
5198 			l2cap_do_move_initiate(chan, local_amp_id,
5199 					       remote_amp_id);
5200 			break;
5201 		case L2CAP_MOVE_ROLE_RESPONDER:
5202 			l2cap_do_move_respond(chan, result);
5203 			break;
5204 		default:
5205 			l2cap_do_move_cancel(chan, result);
5206 			break;
5207 		}
5208 	}
5209 }
5210 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5211 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5212 					 struct l2cap_cmd_hdr *cmd,
5213 					 u16 cmd_len, void *data)
5214 {
5215 	struct l2cap_move_chan_req *req = data;
5216 	struct l2cap_move_chan_rsp rsp;
5217 	struct l2cap_chan *chan;
5218 	u16 icid = 0;
5219 	u16 result = L2CAP_MR_NOT_ALLOWED;
5220 
5221 	if (cmd_len != sizeof(*req))
5222 		return -EPROTO;
5223 
5224 	icid = le16_to_cpu(req->icid);
5225 
5226 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5227 
5228 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5229 		return -EINVAL;
5230 
5231 	chan = l2cap_get_chan_by_dcid(conn, icid);
5232 	if (!chan) {
5233 		rsp.icid = cpu_to_le16(icid);
5234 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5235 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5236 			       sizeof(rsp), &rsp);
5237 		return 0;
5238 	}
5239 
5240 	chan->ident = cmd->ident;
5241 
5242 	if (chan->scid < L2CAP_CID_DYN_START ||
5243 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5244 	    (chan->mode != L2CAP_MODE_ERTM &&
5245 	     chan->mode != L2CAP_MODE_STREAMING)) {
5246 		result = L2CAP_MR_NOT_ALLOWED;
5247 		goto send_move_response;
5248 	}
5249 
5250 	if (chan->local_amp_id == req->dest_amp_id) {
5251 		result = L2CAP_MR_SAME_ID;
5252 		goto send_move_response;
5253 	}
5254 
5255 	if (req->dest_amp_id != AMP_ID_BREDR) {
5256 		struct hci_dev *hdev;
5257 		hdev = hci_dev_get(req->dest_amp_id);
5258 		if (!hdev || hdev->dev_type != HCI_AMP ||
5259 		    !test_bit(HCI_UP, &hdev->flags)) {
5260 			if (hdev)
5261 				hci_dev_put(hdev);
5262 
5263 			result = L2CAP_MR_BAD_ID;
5264 			goto send_move_response;
5265 		}
5266 		hci_dev_put(hdev);
5267 	}
5268 
5269 	/* Detect a move collision.  Only send a collision response
5270 	 * if this side has "lost", otherwise proceed with the move.
5271 	 * The winner has the larger bd_addr.
5272 	 */
5273 	if ((__chan_is_moving(chan) ||
5274 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5275 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5276 		result = L2CAP_MR_COLLISION;
5277 		goto send_move_response;
5278 	}
5279 
5280 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5281 	l2cap_move_setup(chan);
5282 	chan->move_id = req->dest_amp_id;
5283 
5284 	if (req->dest_amp_id == AMP_ID_BREDR) {
5285 		/* Moving to BR/EDR */
5286 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5287 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5288 			result = L2CAP_MR_PEND;
5289 		} else {
5290 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5291 			result = L2CAP_MR_SUCCESS;
5292 		}
5293 	} else {
5294 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5295 		/* Placeholder - uncomment when amp functions are available */
5296 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5297 		result = L2CAP_MR_PEND;
5298 	}
5299 
5300 send_move_response:
5301 	l2cap_send_move_chan_rsp(chan, result);
5302 
5303 	l2cap_chan_unlock(chan);
5304 
5305 	return 0;
5306 }
5307 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5308 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5309 {
5310 	struct l2cap_chan *chan;
5311 	struct hci_chan *hchan = NULL;
5312 
5313 	chan = l2cap_get_chan_by_scid(conn, icid);
5314 	if (!chan) {
5315 		l2cap_send_move_chan_cfm_icid(conn, icid);
5316 		return;
5317 	}
5318 
5319 	__clear_chan_timer(chan);
5320 	if (result == L2CAP_MR_PEND)
5321 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5322 
5323 	switch (chan->move_state) {
5324 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5325 		/* Move confirm will be sent when logical link
5326 		 * is complete.
5327 		 */
5328 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5329 		break;
5330 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5331 		if (result == L2CAP_MR_PEND) {
5332 			break;
5333 		} else if (test_bit(CONN_LOCAL_BUSY,
5334 				    &chan->conn_state)) {
5335 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5336 		} else {
5337 			/* Logical link is up or moving to BR/EDR,
5338 			 * proceed with move
5339 			 */
5340 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5341 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5342 		}
5343 		break;
5344 	case L2CAP_MOVE_WAIT_RSP:
5345 		/* Moving to AMP */
5346 		if (result == L2CAP_MR_SUCCESS) {
5347 			/* Remote is ready, send confirm immediately
5348 			 * after logical link is ready
5349 			 */
5350 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5351 		} else {
5352 			/* Both logical link and move success
5353 			 * are required to confirm
5354 			 */
5355 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5356 		}
5357 
5358 		/* Placeholder - get hci_chan for logical link */
5359 		if (!hchan) {
5360 			/* Logical link not available */
5361 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5362 			break;
5363 		}
5364 
5365 		/* If the logical link is not yet connected, do not
5366 		 * send confirmation.
5367 		 */
5368 		if (hchan->state != BT_CONNECTED)
5369 			break;
5370 
5371 		/* Logical link is already ready to go */
5372 
5373 		chan->hs_hcon = hchan->conn;
5374 		chan->hs_hcon->l2cap_data = chan->conn;
5375 
5376 		if (result == L2CAP_MR_SUCCESS) {
5377 			/* Can confirm now */
5378 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5379 		} else {
5380 			/* Now only need move success
5381 			 * to confirm
5382 			 */
5383 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5384 		}
5385 
5386 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5387 		break;
5388 	default:
5389 		/* Any other amp move state means the move failed. */
5390 		chan->move_id = chan->local_amp_id;
5391 		l2cap_move_done(chan);
5392 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5393 	}
5394 
5395 	l2cap_chan_unlock(chan);
5396 }
5397 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5398 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5399 			    u16 result)
5400 {
5401 	struct l2cap_chan *chan;
5402 
5403 	chan = l2cap_get_chan_by_ident(conn, ident);
5404 	if (!chan) {
5405 		/* Could not locate channel, icid is best guess */
5406 		l2cap_send_move_chan_cfm_icid(conn, icid);
5407 		return;
5408 	}
5409 
5410 	__clear_chan_timer(chan);
5411 
5412 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5413 		if (result == L2CAP_MR_COLLISION) {
5414 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5415 		} else {
5416 			/* Cleanup - cancel move */
5417 			chan->move_id = chan->local_amp_id;
5418 			l2cap_move_done(chan);
5419 		}
5420 	}
5421 
5422 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5423 
5424 	l2cap_chan_unlock(chan);
5425 }
5426 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5427 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5428 				  struct l2cap_cmd_hdr *cmd,
5429 				  u16 cmd_len, void *data)
5430 {
5431 	struct l2cap_move_chan_rsp *rsp = data;
5432 	u16 icid, result;
5433 
5434 	if (cmd_len != sizeof(*rsp))
5435 		return -EPROTO;
5436 
5437 	icid = le16_to_cpu(rsp->icid);
5438 	result = le16_to_cpu(rsp->result);
5439 
5440 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5441 
5442 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5443 		l2cap_move_continue(conn, icid, result);
5444 	else
5445 		l2cap_move_fail(conn, cmd->ident, icid, result);
5446 
5447 	return 0;
5448 }
5449 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5450 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5451 				      struct l2cap_cmd_hdr *cmd,
5452 				      u16 cmd_len, void *data)
5453 {
5454 	struct l2cap_move_chan_cfm *cfm = data;
5455 	struct l2cap_chan *chan;
5456 	u16 icid, result;
5457 
5458 	if (cmd_len != sizeof(*cfm))
5459 		return -EPROTO;
5460 
5461 	icid = le16_to_cpu(cfm->icid);
5462 	result = le16_to_cpu(cfm->result);
5463 
5464 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5465 
5466 	chan = l2cap_get_chan_by_dcid(conn, icid);
5467 	if (!chan) {
5468 		/* Spec requires a response even if the icid was not found */
5469 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5470 		return 0;
5471 	}
5472 
5473 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5474 		if (result == L2CAP_MC_CONFIRMED) {
5475 			chan->local_amp_id = chan->move_id;
5476 			if (chan->local_amp_id == AMP_ID_BREDR)
5477 				__release_logical_link(chan);
5478 		} else {
5479 			chan->move_id = chan->local_amp_id;
5480 		}
5481 
5482 		l2cap_move_done(chan);
5483 	}
5484 
5485 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5486 
5487 	l2cap_chan_unlock(chan);
5488 
5489 	return 0;
5490 }
5491 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5492 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5493 						 struct l2cap_cmd_hdr *cmd,
5494 						 u16 cmd_len, void *data)
5495 {
5496 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5497 	struct l2cap_chan *chan;
5498 	u16 icid;
5499 
5500 	if (cmd_len != sizeof(*rsp))
5501 		return -EPROTO;
5502 
5503 	icid = le16_to_cpu(rsp->icid);
5504 
5505 	BT_DBG("icid 0x%4.4x", icid);
5506 
5507 	chan = l2cap_get_chan_by_scid(conn, icid);
5508 	if (!chan)
5509 		return 0;
5510 
5511 	__clear_chan_timer(chan);
5512 
5513 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5514 		chan->local_amp_id = chan->move_id;
5515 
5516 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5517 			__release_logical_link(chan);
5518 
5519 		l2cap_move_done(chan);
5520 	}
5521 
5522 	l2cap_chan_unlock(chan);
5523 
5524 	return 0;
5525 }
5526 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5527 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5528 					      struct l2cap_cmd_hdr *cmd,
5529 					      u16 cmd_len, u8 *data)
5530 {
5531 	struct hci_conn *hcon = conn->hcon;
5532 	struct l2cap_conn_param_update_req *req;
5533 	struct l2cap_conn_param_update_rsp rsp;
5534 	u16 min, max, latency, to_multiplier;
5535 	int err;
5536 
5537 	if (hcon->role != HCI_ROLE_MASTER)
5538 		return -EINVAL;
5539 
5540 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5541 		return -EPROTO;
5542 
5543 	req = (struct l2cap_conn_param_update_req *) data;
5544 	min		= __le16_to_cpu(req->min);
5545 	max		= __le16_to_cpu(req->max);
5546 	latency		= __le16_to_cpu(req->latency);
5547 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5548 
5549 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5550 	       min, max, latency, to_multiplier);
5551 
5552 	memset(&rsp, 0, sizeof(rsp));
5553 
5554 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5555 	if (err)
5556 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5557 	else
5558 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5559 
5560 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5561 		       sizeof(rsp), &rsp);
5562 
5563 	if (!err) {
5564 		u8 store_hint;
5565 
5566 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5567 						to_multiplier);
5568 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5569 				    store_hint, min, max, latency,
5570 				    to_multiplier);
5571 
5572 	}
5573 
5574 	return 0;
5575 }
5576 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5577 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5578 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5579 				u8 *data)
5580 {
5581 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5582 	struct hci_conn *hcon = conn->hcon;
5583 	u16 dcid, mtu, mps, credits, result;
5584 	struct l2cap_chan *chan;
5585 	int err, sec_level;
5586 
5587 	if (cmd_len < sizeof(*rsp))
5588 		return -EPROTO;
5589 
5590 	dcid    = __le16_to_cpu(rsp->dcid);
5591 	mtu     = __le16_to_cpu(rsp->mtu);
5592 	mps     = __le16_to_cpu(rsp->mps);
5593 	credits = __le16_to_cpu(rsp->credits);
5594 	result  = __le16_to_cpu(rsp->result);
5595 
5596 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5597 					   dcid < L2CAP_CID_DYN_START ||
5598 					   dcid > L2CAP_CID_LE_DYN_END))
5599 		return -EPROTO;
5600 
5601 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5602 	       dcid, mtu, mps, credits, result);
5603 
5604 	mutex_lock(&conn->chan_lock);
5605 
5606 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5607 	if (!chan) {
5608 		err = -EBADSLT;
5609 		goto unlock;
5610 	}
5611 
5612 	err = 0;
5613 
5614 	l2cap_chan_lock(chan);
5615 
5616 	switch (result) {
5617 	case L2CAP_CR_LE_SUCCESS:
5618 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5619 			err = -EBADSLT;
5620 			break;
5621 		}
5622 
5623 		chan->ident = 0;
5624 		chan->dcid = dcid;
5625 		chan->omtu = mtu;
5626 		chan->remote_mps = mps;
5627 		chan->tx_credits = credits;
5628 		l2cap_chan_ready(chan);
5629 		break;
5630 
5631 	case L2CAP_CR_LE_AUTHENTICATION:
5632 	case L2CAP_CR_LE_ENCRYPTION:
5633 		/* If we already have MITM protection we can't do
5634 		 * anything.
5635 		 */
5636 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5637 			l2cap_chan_del(chan, ECONNREFUSED);
5638 			break;
5639 		}
5640 
5641 		sec_level = hcon->sec_level + 1;
5642 		if (chan->sec_level < sec_level)
5643 			chan->sec_level = sec_level;
5644 
5645 		/* We'll need to send a new Connect Request */
5646 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5647 
5648 		smp_conn_security(hcon, chan->sec_level);
5649 		break;
5650 
5651 	default:
5652 		l2cap_chan_del(chan, ECONNREFUSED);
5653 		break;
5654 	}
5655 
5656 	l2cap_chan_unlock(chan);
5657 
5658 unlock:
5659 	mutex_unlock(&conn->chan_lock);
5660 
5661 	return err;
5662 }
5663 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5664 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5665 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5666 				      u8 *data)
5667 {
5668 	int err = 0;
5669 
5670 	switch (cmd->code) {
5671 	case L2CAP_COMMAND_REJ:
5672 		l2cap_command_rej(conn, cmd, cmd_len, data);
5673 		break;
5674 
5675 	case L2CAP_CONN_REQ:
5676 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5677 		break;
5678 
5679 	case L2CAP_CONN_RSP:
5680 	case L2CAP_CREATE_CHAN_RSP:
5681 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5682 		break;
5683 
5684 	case L2CAP_CONF_REQ:
5685 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5686 		break;
5687 
5688 	case L2CAP_CONF_RSP:
5689 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5690 		break;
5691 
5692 	case L2CAP_DISCONN_REQ:
5693 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5694 		break;
5695 
5696 	case L2CAP_DISCONN_RSP:
5697 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5698 		break;
5699 
5700 	case L2CAP_ECHO_REQ:
5701 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5702 		break;
5703 
5704 	case L2CAP_ECHO_RSP:
5705 		break;
5706 
5707 	case L2CAP_INFO_REQ:
5708 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5709 		break;
5710 
5711 	case L2CAP_INFO_RSP:
5712 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_CREATE_CHAN_REQ:
5716 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_MOVE_CHAN_REQ:
5720 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_MOVE_CHAN_RSP:
5724 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_MOVE_CHAN_CFM:
5728 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_MOVE_CHAN_CFM_RSP:
5732 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5733 		break;
5734 
5735 	default:
5736 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5737 		err = -EINVAL;
5738 		break;
5739 	}
5740 
5741 	return err;
5742 }
5743 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5744 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5745 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5746 				u8 *data)
5747 {
5748 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5749 	struct l2cap_le_conn_rsp rsp;
5750 	struct l2cap_chan *chan, *pchan;
5751 	u16 dcid, scid, credits, mtu, mps;
5752 	__le16 psm;
5753 	u8 result;
5754 
5755 	if (cmd_len != sizeof(*req))
5756 		return -EPROTO;
5757 
5758 	scid = __le16_to_cpu(req->scid);
5759 	mtu  = __le16_to_cpu(req->mtu);
5760 	mps  = __le16_to_cpu(req->mps);
5761 	psm  = req->psm;
5762 	dcid = 0;
5763 	credits = 0;
5764 
5765 	if (mtu < 23 || mps < 23)
5766 		return -EPROTO;
5767 
5768 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5769 	       scid, mtu, mps);
5770 
5771 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5772 	 * page 1059:
5773 	 *
5774 	 * Valid range: 0x0001-0x00ff
5775 	 *
5776 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5777 	 */
5778 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5779 		result = L2CAP_CR_LE_BAD_PSM;
5780 		chan = NULL;
5781 		goto response;
5782 	}
5783 
5784 	/* Check if we have socket listening on psm */
5785 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5786 					 &conn->hcon->dst, LE_LINK);
5787 	if (!pchan) {
5788 		result = L2CAP_CR_LE_BAD_PSM;
5789 		chan = NULL;
5790 		goto response;
5791 	}
5792 
5793 	mutex_lock(&conn->chan_lock);
5794 	l2cap_chan_lock(pchan);
5795 
5796 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5797 				     SMP_ALLOW_STK)) {
5798 		result = L2CAP_CR_LE_AUTHENTICATION;
5799 		chan = NULL;
5800 		goto response_unlock;
5801 	}
5802 
5803 	/* Check for valid dynamic CID range */
5804 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5805 		result = L2CAP_CR_LE_INVALID_SCID;
5806 		chan = NULL;
5807 		goto response_unlock;
5808 	}
5809 
5810 	/* Check if we already have channel with that dcid */
5811 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5812 		result = L2CAP_CR_LE_SCID_IN_USE;
5813 		chan = NULL;
5814 		goto response_unlock;
5815 	}
5816 
5817 	chan = pchan->ops->new_connection(pchan);
5818 	if (!chan) {
5819 		result = L2CAP_CR_LE_NO_MEM;
5820 		goto response_unlock;
5821 	}
5822 
5823 	bacpy(&chan->src, &conn->hcon->src);
5824 	bacpy(&chan->dst, &conn->hcon->dst);
5825 	chan->src_type = bdaddr_src_type(conn->hcon);
5826 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5827 	chan->psm  = psm;
5828 	chan->dcid = scid;
5829 	chan->omtu = mtu;
5830 	chan->remote_mps = mps;
5831 
5832 	__l2cap_chan_add(conn, chan);
5833 
5834 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5835 
5836 	dcid = chan->scid;
5837 	credits = chan->rx_credits;
5838 
5839 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5840 
5841 	chan->ident = cmd->ident;
5842 
5843 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5844 		l2cap_state_change(chan, BT_CONNECT2);
5845 		/* The following result value is actually not defined
5846 		 * for LE CoC but we use it to let the function know
5847 		 * that it should bail out after doing its cleanup
5848 		 * instead of sending a response.
5849 		 */
5850 		result = L2CAP_CR_PEND;
5851 		chan->ops->defer(chan);
5852 	} else {
5853 		l2cap_chan_ready(chan);
5854 		result = L2CAP_CR_LE_SUCCESS;
5855 	}
5856 
5857 response_unlock:
5858 	l2cap_chan_unlock(pchan);
5859 	mutex_unlock(&conn->chan_lock);
5860 	l2cap_chan_put(pchan);
5861 
5862 	if (result == L2CAP_CR_PEND)
5863 		return 0;
5864 
5865 response:
5866 	if (chan) {
5867 		rsp.mtu = cpu_to_le16(chan->imtu);
5868 		rsp.mps = cpu_to_le16(chan->mps);
5869 	} else {
5870 		rsp.mtu = 0;
5871 		rsp.mps = 0;
5872 	}
5873 
5874 	rsp.dcid    = cpu_to_le16(dcid);
5875 	rsp.credits = cpu_to_le16(credits);
5876 	rsp.result  = cpu_to_le16(result);
5877 
5878 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5879 
5880 	return 0;
5881 }
5882 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5883 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5884 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5885 				   u8 *data)
5886 {
5887 	struct l2cap_le_credits *pkt;
5888 	struct l2cap_chan *chan;
5889 	u16 cid, credits, max_credits;
5890 
5891 	if (cmd_len != sizeof(*pkt))
5892 		return -EPROTO;
5893 
5894 	pkt = (struct l2cap_le_credits *) data;
5895 	cid	= __le16_to_cpu(pkt->cid);
5896 	credits	= __le16_to_cpu(pkt->credits);
5897 
5898 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5899 
5900 	chan = l2cap_get_chan_by_dcid(conn, cid);
5901 	if (!chan)
5902 		return -EBADSLT;
5903 
5904 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5905 	if (credits > max_credits) {
5906 		BT_ERR("LE credits overflow");
5907 		l2cap_send_disconn_req(chan, ECONNRESET);
5908 		l2cap_chan_unlock(chan);
5909 
5910 		/* Return 0 so that we don't trigger an unnecessary
5911 		 * command reject packet.
5912 		 */
5913 		return 0;
5914 	}
5915 
5916 	chan->tx_credits += credits;
5917 
5918 	/* Resume sending */
5919 	l2cap_le_flowctl_send(chan);
5920 
5921 	if (chan->tx_credits)
5922 		chan->ops->resume(chan);
5923 
5924 	l2cap_chan_unlock(chan);
5925 
5926 	return 0;
5927 }
5928 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5929 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5930 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5931 				       u8 *data)
5932 {
5933 	struct l2cap_ecred_conn_req *req = (void *) data;
5934 	struct {
5935 		struct l2cap_ecred_conn_rsp rsp;
5936 		__le16 dcid[5];
5937 	} __packed pdu;
5938 	struct l2cap_chan *chan, *pchan;
5939 	u16 mtu, mps;
5940 	__le16 psm;
5941 	u8 result, len = 0;
5942 	int i, num_scid;
5943 	bool defer = false;
5944 
5945 	if (!enable_ecred)
5946 		return -EINVAL;
5947 
5948 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5949 		result = L2CAP_CR_LE_INVALID_PARAMS;
5950 		goto response;
5951 	}
5952 
5953 	mtu  = __le16_to_cpu(req->mtu);
5954 	mps  = __le16_to_cpu(req->mps);
5955 
5956 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5957 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5958 		goto response;
5959 	}
5960 
5961 	psm  = req->psm;
5962 
5963 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5964 	 * page 1059:
5965 	 *
5966 	 * Valid range: 0x0001-0x00ff
5967 	 *
5968 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5969 	 */
5970 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5971 		result = L2CAP_CR_LE_BAD_PSM;
5972 		goto response;
5973 	}
5974 
5975 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5976 
5977 	memset(&pdu, 0, sizeof(pdu));
5978 
5979 	/* Check if we have socket listening on psm */
5980 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5981 					 &conn->hcon->dst, LE_LINK);
5982 	if (!pchan) {
5983 		result = L2CAP_CR_LE_BAD_PSM;
5984 		goto response;
5985 	}
5986 
5987 	mutex_lock(&conn->chan_lock);
5988 	l2cap_chan_lock(pchan);
5989 
5990 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5991 				     SMP_ALLOW_STK)) {
5992 		result = L2CAP_CR_LE_AUTHENTICATION;
5993 		goto unlock;
5994 	}
5995 
5996 	result = L2CAP_CR_LE_SUCCESS;
5997 	cmd_len -= sizeof(*req);
5998 	num_scid = cmd_len / sizeof(u16);
5999 
6000 	for (i = 0; i < num_scid; i++) {
6001 		u16 scid = __le16_to_cpu(req->scid[i]);
6002 
6003 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6004 
6005 		pdu.dcid[i] = 0x0000;
6006 		len += sizeof(*pdu.dcid);
6007 
6008 		/* Check for valid dynamic CID range */
6009 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6010 			result = L2CAP_CR_LE_INVALID_SCID;
6011 			continue;
6012 		}
6013 
6014 		/* Check if we already have channel with that dcid */
6015 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6016 			result = L2CAP_CR_LE_SCID_IN_USE;
6017 			continue;
6018 		}
6019 
6020 		chan = pchan->ops->new_connection(pchan);
6021 		if (!chan) {
6022 			result = L2CAP_CR_LE_NO_MEM;
6023 			continue;
6024 		}
6025 
6026 		bacpy(&chan->src, &conn->hcon->src);
6027 		bacpy(&chan->dst, &conn->hcon->dst);
6028 		chan->src_type = bdaddr_src_type(conn->hcon);
6029 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6030 		chan->psm  = psm;
6031 		chan->dcid = scid;
6032 		chan->omtu = mtu;
6033 		chan->remote_mps = mps;
6034 
6035 		__l2cap_chan_add(conn, chan);
6036 
6037 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6038 
6039 		/* Init response */
6040 		if (!pdu.rsp.credits) {
6041 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6042 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6043 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6044 		}
6045 
6046 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6047 
6048 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6049 
6050 		chan->ident = cmd->ident;
6051 
6052 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6053 			l2cap_state_change(chan, BT_CONNECT2);
6054 			defer = true;
6055 			chan->ops->defer(chan);
6056 		} else {
6057 			l2cap_chan_ready(chan);
6058 		}
6059 	}
6060 
6061 unlock:
6062 	l2cap_chan_unlock(pchan);
6063 	mutex_unlock(&conn->chan_lock);
6064 	l2cap_chan_put(pchan);
6065 
6066 response:
6067 	pdu.rsp.result = cpu_to_le16(result);
6068 
6069 	if (defer)
6070 		return 0;
6071 
6072 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6073 		       sizeof(pdu.rsp) + len, &pdu);
6074 
6075 	return 0;
6076 }
6077 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6078 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6079 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6080 				       u8 *data)
6081 {
6082 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6083 	struct hci_conn *hcon = conn->hcon;
6084 	u16 mtu, mps, credits, result;
6085 	struct l2cap_chan *chan, *tmp;
6086 	int err = 0, sec_level;
6087 	int i = 0;
6088 
6089 	if (cmd_len < sizeof(*rsp))
6090 		return -EPROTO;
6091 
6092 	mtu     = __le16_to_cpu(rsp->mtu);
6093 	mps     = __le16_to_cpu(rsp->mps);
6094 	credits = __le16_to_cpu(rsp->credits);
6095 	result  = __le16_to_cpu(rsp->result);
6096 
6097 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6098 	       result);
6099 
6100 	mutex_lock(&conn->chan_lock);
6101 
6102 	cmd_len -= sizeof(*rsp);
6103 
6104 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6105 		u16 dcid;
6106 
6107 		if (chan->ident != cmd->ident ||
6108 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6109 		    chan->state == BT_CONNECTED)
6110 			continue;
6111 
6112 		l2cap_chan_lock(chan);
6113 
6114 		/* Check that there is a dcid for each pending channel */
6115 		if (cmd_len < sizeof(dcid)) {
6116 			l2cap_chan_del(chan, ECONNREFUSED);
6117 			l2cap_chan_unlock(chan);
6118 			continue;
6119 		}
6120 
6121 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6122 		cmd_len -= sizeof(u16);
6123 
6124 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6125 
6126 		/* Check if dcid is already in use */
6127 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6128 			/* If a device receives a
6129 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6130 			 * already-assigned Destination CID, then both the
6131 			 * original channel and the new channel shall be
6132 			 * immediately discarded and not used.
6133 			 */
6134 			l2cap_chan_del(chan, ECONNREFUSED);
6135 			l2cap_chan_unlock(chan);
6136 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6137 			l2cap_chan_lock(chan);
6138 			l2cap_chan_del(chan, ECONNRESET);
6139 			l2cap_chan_unlock(chan);
6140 			continue;
6141 		}
6142 
6143 		switch (result) {
6144 		case L2CAP_CR_LE_AUTHENTICATION:
6145 		case L2CAP_CR_LE_ENCRYPTION:
6146 			/* If we already have MITM protection we can't do
6147 			 * anything.
6148 			 */
6149 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6150 				l2cap_chan_del(chan, ECONNREFUSED);
6151 				break;
6152 			}
6153 
6154 			sec_level = hcon->sec_level + 1;
6155 			if (chan->sec_level < sec_level)
6156 				chan->sec_level = sec_level;
6157 
6158 			/* We'll need to send a new Connect Request */
6159 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6160 
6161 			smp_conn_security(hcon, chan->sec_level);
6162 			break;
6163 
6164 		case L2CAP_CR_LE_BAD_PSM:
6165 			l2cap_chan_del(chan, ECONNREFUSED);
6166 			break;
6167 
6168 		default:
6169 			/* If dcid was not set it means channels was refused */
6170 			if (!dcid) {
6171 				l2cap_chan_del(chan, ECONNREFUSED);
6172 				break;
6173 			}
6174 
6175 			chan->ident = 0;
6176 			chan->dcid = dcid;
6177 			chan->omtu = mtu;
6178 			chan->remote_mps = mps;
6179 			chan->tx_credits = credits;
6180 			l2cap_chan_ready(chan);
6181 			break;
6182 		}
6183 
6184 		l2cap_chan_unlock(chan);
6185 	}
6186 
6187 	mutex_unlock(&conn->chan_lock);
6188 
6189 	return err;
6190 }
6191 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6192 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6193 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6194 					 u8 *data)
6195 {
6196 	struct l2cap_ecred_reconf_req *req = (void *) data;
6197 	struct l2cap_ecred_reconf_rsp rsp;
6198 	u16 mtu, mps, result;
6199 	struct l2cap_chan *chan;
6200 	int i, num_scid;
6201 
6202 	if (!enable_ecred)
6203 		return -EINVAL;
6204 
6205 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6206 		result = L2CAP_CR_LE_INVALID_PARAMS;
6207 		goto respond;
6208 	}
6209 
6210 	mtu = __le16_to_cpu(req->mtu);
6211 	mps = __le16_to_cpu(req->mps);
6212 
6213 	BT_DBG("mtu %u mps %u", mtu, mps);
6214 
6215 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6216 		result = L2CAP_RECONF_INVALID_MTU;
6217 		goto respond;
6218 	}
6219 
6220 	if (mps < L2CAP_ECRED_MIN_MPS) {
6221 		result = L2CAP_RECONF_INVALID_MPS;
6222 		goto respond;
6223 	}
6224 
6225 	cmd_len -= sizeof(*req);
6226 	num_scid = cmd_len / sizeof(u16);
6227 	result = L2CAP_RECONF_SUCCESS;
6228 
6229 	for (i = 0; i < num_scid; i++) {
6230 		u16 scid;
6231 
6232 		scid = __le16_to_cpu(req->scid[i]);
6233 		if (!scid)
6234 			return -EPROTO;
6235 
6236 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6237 		if (!chan)
6238 			continue;
6239 
6240 		/* If the MTU value is decreased for any of the included
6241 		 * channels, then the receiver shall disconnect all
6242 		 * included channels.
6243 		 */
6244 		if (chan->omtu > mtu) {
6245 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6246 			       chan->omtu, mtu);
6247 			result = L2CAP_RECONF_INVALID_MTU;
6248 		}
6249 
6250 		chan->omtu = mtu;
6251 		chan->remote_mps = mps;
6252 	}
6253 
6254 respond:
6255 	rsp.result = cpu_to_le16(result);
6256 
6257 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6258 		       &rsp);
6259 
6260 	return 0;
6261 }
6262 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6263 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6264 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6265 					 u8 *data)
6266 {
6267 	struct l2cap_chan *chan, *tmp;
6268 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6269 	u16 result;
6270 
6271 	if (cmd_len < sizeof(*rsp))
6272 		return -EPROTO;
6273 
6274 	result = __le16_to_cpu(rsp->result);
6275 
6276 	BT_DBG("result 0x%4.4x", rsp->result);
6277 
6278 	if (!result)
6279 		return 0;
6280 
6281 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6282 		if (chan->ident != cmd->ident)
6283 			continue;
6284 
6285 		l2cap_chan_del(chan, ECONNRESET);
6286 	}
6287 
6288 	return 0;
6289 }
6290 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6291 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6292 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6293 				       u8 *data)
6294 {
6295 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6296 	struct l2cap_chan *chan;
6297 
6298 	if (cmd_len < sizeof(*rej))
6299 		return -EPROTO;
6300 
6301 	mutex_lock(&conn->chan_lock);
6302 
6303 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6304 	if (!chan)
6305 		goto done;
6306 
6307 	l2cap_chan_lock(chan);
6308 	l2cap_chan_del(chan, ECONNREFUSED);
6309 	l2cap_chan_unlock(chan);
6310 
6311 done:
6312 	mutex_unlock(&conn->chan_lock);
6313 	return 0;
6314 }
6315 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6316 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6317 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6318 				   u8 *data)
6319 {
6320 	int err = 0;
6321 
6322 	switch (cmd->code) {
6323 	case L2CAP_COMMAND_REJ:
6324 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6325 		break;
6326 
6327 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6328 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6329 		break;
6330 
6331 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6332 		break;
6333 
6334 	case L2CAP_LE_CONN_RSP:
6335 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6336 		break;
6337 
6338 	case L2CAP_LE_CONN_REQ:
6339 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6340 		break;
6341 
6342 	case L2CAP_LE_CREDITS:
6343 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6344 		break;
6345 
6346 	case L2CAP_ECRED_CONN_REQ:
6347 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6348 		break;
6349 
6350 	case L2CAP_ECRED_CONN_RSP:
6351 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6352 		break;
6353 
6354 	case L2CAP_ECRED_RECONF_REQ:
6355 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6356 		break;
6357 
6358 	case L2CAP_ECRED_RECONF_RSP:
6359 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6360 		break;
6361 
6362 	case L2CAP_DISCONN_REQ:
6363 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6364 		break;
6365 
6366 	case L2CAP_DISCONN_RSP:
6367 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6368 		break;
6369 
6370 	default:
6371 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6372 		err = -EINVAL;
6373 		break;
6374 	}
6375 
6376 	return err;
6377 }
6378 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6379 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6380 					struct sk_buff *skb)
6381 {
6382 	struct hci_conn *hcon = conn->hcon;
6383 	struct l2cap_cmd_hdr *cmd;
6384 	u16 len;
6385 	int err;
6386 
6387 	if (hcon->type != LE_LINK)
6388 		goto drop;
6389 
6390 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6391 		goto drop;
6392 
6393 	cmd = (void *) skb->data;
6394 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6395 
6396 	len = le16_to_cpu(cmd->len);
6397 
6398 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6399 
6400 	if (len != skb->len || !cmd->ident) {
6401 		BT_DBG("corrupted command");
6402 		goto drop;
6403 	}
6404 
6405 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6406 	if (err) {
6407 		struct l2cap_cmd_rej_unk rej;
6408 
6409 		BT_ERR("Wrong link type (%d)", err);
6410 
6411 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6412 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6413 			       sizeof(rej), &rej);
6414 	}
6415 
6416 drop:
6417 	kfree_skb(skb);
6418 }
6419 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6420 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6421 				     struct sk_buff *skb)
6422 {
6423 	struct hci_conn *hcon = conn->hcon;
6424 	struct l2cap_cmd_hdr *cmd;
6425 	int err;
6426 
6427 	l2cap_raw_recv(conn, skb);
6428 
6429 	if (hcon->type != ACL_LINK)
6430 		goto drop;
6431 
6432 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6433 		u16 len;
6434 
6435 		cmd = (void *) skb->data;
6436 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6437 
6438 		len = le16_to_cpu(cmd->len);
6439 
6440 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6441 		       cmd->ident);
6442 
6443 		if (len > skb->len || !cmd->ident) {
6444 			BT_DBG("corrupted command");
6445 			break;
6446 		}
6447 
6448 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6449 		if (err) {
6450 			struct l2cap_cmd_rej_unk rej;
6451 
6452 			BT_ERR("Wrong link type (%d)", err);
6453 
6454 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6455 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6456 				       sizeof(rej), &rej);
6457 		}
6458 
6459 		skb_pull(skb, len);
6460 	}
6461 
6462 drop:
6463 	kfree_skb(skb);
6464 }
6465 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6466 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6467 {
6468 	u16 our_fcs, rcv_fcs;
6469 	int hdr_size;
6470 
6471 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6472 		hdr_size = L2CAP_EXT_HDR_SIZE;
6473 	else
6474 		hdr_size = L2CAP_ENH_HDR_SIZE;
6475 
6476 	if (chan->fcs == L2CAP_FCS_CRC16) {
6477 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6478 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6479 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6480 
6481 		if (our_fcs != rcv_fcs)
6482 			return -EBADMSG;
6483 	}
6484 	return 0;
6485 }
6486 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6487 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6488 {
6489 	struct l2cap_ctrl control;
6490 
6491 	BT_DBG("chan %p", chan);
6492 
6493 	memset(&control, 0, sizeof(control));
6494 	control.sframe = 1;
6495 	control.final = 1;
6496 	control.reqseq = chan->buffer_seq;
6497 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6498 
6499 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6500 		control.super = L2CAP_SUPER_RNR;
6501 		l2cap_send_sframe(chan, &control);
6502 	}
6503 
6504 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6505 	    chan->unacked_frames > 0)
6506 		__set_retrans_timer(chan);
6507 
6508 	/* Send pending iframes */
6509 	l2cap_ertm_send(chan);
6510 
6511 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6512 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6513 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6514 		 * send it now.
6515 		 */
6516 		control.super = L2CAP_SUPER_RR;
6517 		l2cap_send_sframe(chan, &control);
6518 	}
6519 }
6520 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6521 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6522 			    struct sk_buff **last_frag)
6523 {
6524 	/* skb->len reflects data in skb as well as all fragments
6525 	 * skb->data_len reflects only data in fragments
6526 	 */
6527 	if (!skb_has_frag_list(skb))
6528 		skb_shinfo(skb)->frag_list = new_frag;
6529 
6530 	new_frag->next = NULL;
6531 
6532 	(*last_frag)->next = new_frag;
6533 	*last_frag = new_frag;
6534 
6535 	skb->len += new_frag->len;
6536 	skb->data_len += new_frag->len;
6537 	skb->truesize += new_frag->truesize;
6538 }
6539 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6540 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6541 				struct l2cap_ctrl *control)
6542 {
6543 	int err = -EINVAL;
6544 
6545 	switch (control->sar) {
6546 	case L2CAP_SAR_UNSEGMENTED:
6547 		if (chan->sdu)
6548 			break;
6549 
6550 		err = chan->ops->recv(chan, skb);
6551 		break;
6552 
6553 	case L2CAP_SAR_START:
6554 		if (chan->sdu)
6555 			break;
6556 
6557 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6558 			break;
6559 
6560 		chan->sdu_len = get_unaligned_le16(skb->data);
6561 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6562 
6563 		if (chan->sdu_len > chan->imtu) {
6564 			err = -EMSGSIZE;
6565 			break;
6566 		}
6567 
6568 		if (skb->len >= chan->sdu_len)
6569 			break;
6570 
6571 		chan->sdu = skb;
6572 		chan->sdu_last_frag = skb;
6573 
6574 		skb = NULL;
6575 		err = 0;
6576 		break;
6577 
6578 	case L2CAP_SAR_CONTINUE:
6579 		if (!chan->sdu)
6580 			break;
6581 
6582 		append_skb_frag(chan->sdu, skb,
6583 				&chan->sdu_last_frag);
6584 		skb = NULL;
6585 
6586 		if (chan->sdu->len >= chan->sdu_len)
6587 			break;
6588 
6589 		err = 0;
6590 		break;
6591 
6592 	case L2CAP_SAR_END:
6593 		if (!chan->sdu)
6594 			break;
6595 
6596 		append_skb_frag(chan->sdu, skb,
6597 				&chan->sdu_last_frag);
6598 		skb = NULL;
6599 
6600 		if (chan->sdu->len != chan->sdu_len)
6601 			break;
6602 
6603 		err = chan->ops->recv(chan, chan->sdu);
6604 
6605 		if (!err) {
6606 			/* Reassembly complete */
6607 			chan->sdu = NULL;
6608 			chan->sdu_last_frag = NULL;
6609 			chan->sdu_len = 0;
6610 		}
6611 		break;
6612 	}
6613 
6614 	if (err) {
6615 		kfree_skb(skb);
6616 		kfree_skb(chan->sdu);
6617 		chan->sdu = NULL;
6618 		chan->sdu_last_frag = NULL;
6619 		chan->sdu_len = 0;
6620 	}
6621 
6622 	return err;
6623 }
6624 
l2cap_resegment(struct l2cap_chan * chan)6625 static int l2cap_resegment(struct l2cap_chan *chan)
6626 {
6627 	/* Placeholder */
6628 	return 0;
6629 }
6630 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6631 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6632 {
6633 	u8 event;
6634 
6635 	if (chan->mode != L2CAP_MODE_ERTM)
6636 		return;
6637 
6638 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6639 	l2cap_tx(chan, NULL, NULL, event);
6640 }
6641 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6642 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6643 {
6644 	int err = 0;
6645 	/* Pass sequential frames to l2cap_reassemble_sdu()
6646 	 * until a gap is encountered.
6647 	 */
6648 
6649 	BT_DBG("chan %p", chan);
6650 
6651 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6652 		struct sk_buff *skb;
6653 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6654 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6655 
6656 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6657 
6658 		if (!skb)
6659 			break;
6660 
6661 		skb_unlink(skb, &chan->srej_q);
6662 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6663 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6664 		if (err)
6665 			break;
6666 	}
6667 
6668 	if (skb_queue_empty(&chan->srej_q)) {
6669 		chan->rx_state = L2CAP_RX_STATE_RECV;
6670 		l2cap_send_ack(chan);
6671 	}
6672 
6673 	return err;
6674 }
6675 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6676 static void l2cap_handle_srej(struct l2cap_chan *chan,
6677 			      struct l2cap_ctrl *control)
6678 {
6679 	struct sk_buff *skb;
6680 
6681 	BT_DBG("chan %p, control %p", chan, control);
6682 
6683 	if (control->reqseq == chan->next_tx_seq) {
6684 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6685 		l2cap_send_disconn_req(chan, ECONNRESET);
6686 		return;
6687 	}
6688 
6689 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6690 
6691 	if (skb == NULL) {
6692 		BT_DBG("Seq %d not available for retransmission",
6693 		       control->reqseq);
6694 		return;
6695 	}
6696 
6697 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6698 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6699 		l2cap_send_disconn_req(chan, ECONNRESET);
6700 		return;
6701 	}
6702 
6703 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6704 
6705 	if (control->poll) {
6706 		l2cap_pass_to_tx(chan, control);
6707 
6708 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6709 		l2cap_retransmit(chan, control);
6710 		l2cap_ertm_send(chan);
6711 
6712 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6713 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6714 			chan->srej_save_reqseq = control->reqseq;
6715 		}
6716 	} else {
6717 		l2cap_pass_to_tx_fbit(chan, control);
6718 
6719 		if (control->final) {
6720 			if (chan->srej_save_reqseq != control->reqseq ||
6721 			    !test_and_clear_bit(CONN_SREJ_ACT,
6722 						&chan->conn_state))
6723 				l2cap_retransmit(chan, control);
6724 		} else {
6725 			l2cap_retransmit(chan, control);
6726 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6727 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6728 				chan->srej_save_reqseq = control->reqseq;
6729 			}
6730 		}
6731 	}
6732 }
6733 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6734 static void l2cap_handle_rej(struct l2cap_chan *chan,
6735 			     struct l2cap_ctrl *control)
6736 {
6737 	struct sk_buff *skb;
6738 
6739 	BT_DBG("chan %p, control %p", chan, control);
6740 
6741 	if (control->reqseq == chan->next_tx_seq) {
6742 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6743 		l2cap_send_disconn_req(chan, ECONNRESET);
6744 		return;
6745 	}
6746 
6747 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6748 
6749 	if (chan->max_tx && skb &&
6750 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6751 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6752 		l2cap_send_disconn_req(chan, ECONNRESET);
6753 		return;
6754 	}
6755 
6756 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6757 
6758 	l2cap_pass_to_tx(chan, control);
6759 
6760 	if (control->final) {
6761 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6762 			l2cap_retransmit_all(chan, control);
6763 	} else {
6764 		l2cap_retransmit_all(chan, control);
6765 		l2cap_ertm_send(chan);
6766 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6767 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6768 	}
6769 }
6770 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6771 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6772 {
6773 	BT_DBG("chan %p, txseq %d", chan, txseq);
6774 
6775 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6776 	       chan->expected_tx_seq);
6777 
6778 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6779 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6780 		    chan->tx_win) {
6781 			/* See notes below regarding "double poll" and
6782 			 * invalid packets.
6783 			 */
6784 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6785 				BT_DBG("Invalid/Ignore - after SREJ");
6786 				return L2CAP_TXSEQ_INVALID_IGNORE;
6787 			} else {
6788 				BT_DBG("Invalid - in window after SREJ sent");
6789 				return L2CAP_TXSEQ_INVALID;
6790 			}
6791 		}
6792 
6793 		if (chan->srej_list.head == txseq) {
6794 			BT_DBG("Expected SREJ");
6795 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6796 		}
6797 
6798 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6799 			BT_DBG("Duplicate SREJ - txseq already stored");
6800 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6801 		}
6802 
6803 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6804 			BT_DBG("Unexpected SREJ - not requested");
6805 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6806 		}
6807 	}
6808 
6809 	if (chan->expected_tx_seq == txseq) {
6810 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6811 		    chan->tx_win) {
6812 			BT_DBG("Invalid - txseq outside tx window");
6813 			return L2CAP_TXSEQ_INVALID;
6814 		} else {
6815 			BT_DBG("Expected");
6816 			return L2CAP_TXSEQ_EXPECTED;
6817 		}
6818 	}
6819 
6820 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6821 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6822 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6823 		return L2CAP_TXSEQ_DUPLICATE;
6824 	}
6825 
6826 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6827 		/* A source of invalid packets is a "double poll" condition,
6828 		 * where delays cause us to send multiple poll packets.  If
6829 		 * the remote stack receives and processes both polls,
6830 		 * sequence numbers can wrap around in such a way that a
6831 		 * resent frame has a sequence number that looks like new data
6832 		 * with a sequence gap.  This would trigger an erroneous SREJ
6833 		 * request.
6834 		 *
6835 		 * Fortunately, this is impossible with a tx window that's
6836 		 * less than half of the maximum sequence number, which allows
6837 		 * invalid frames to be safely ignored.
6838 		 *
6839 		 * With tx window sizes greater than half of the tx window
6840 		 * maximum, the frame is invalid and cannot be ignored.  This
6841 		 * causes a disconnect.
6842 		 */
6843 
6844 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6845 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6846 			return L2CAP_TXSEQ_INVALID_IGNORE;
6847 		} else {
6848 			BT_DBG("Invalid - txseq outside tx window");
6849 			return L2CAP_TXSEQ_INVALID;
6850 		}
6851 	} else {
6852 		BT_DBG("Unexpected - txseq indicates missing frames");
6853 		return L2CAP_TXSEQ_UNEXPECTED;
6854 	}
6855 }
6856 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6857 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6858 			       struct l2cap_ctrl *control,
6859 			       struct sk_buff *skb, u8 event)
6860 {
6861 	struct l2cap_ctrl local_control;
6862 	int err = 0;
6863 	bool skb_in_use = false;
6864 
6865 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6866 	       event);
6867 
6868 	switch (event) {
6869 	case L2CAP_EV_RECV_IFRAME:
6870 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6871 		case L2CAP_TXSEQ_EXPECTED:
6872 			l2cap_pass_to_tx(chan, control);
6873 
6874 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6875 				BT_DBG("Busy, discarding expected seq %d",
6876 				       control->txseq);
6877 				break;
6878 			}
6879 
6880 			chan->expected_tx_seq = __next_seq(chan,
6881 							   control->txseq);
6882 
6883 			chan->buffer_seq = chan->expected_tx_seq;
6884 			skb_in_use = true;
6885 
6886 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6887 			 * control, so make a copy in advance to use it after
6888 			 * l2cap_reassemble_sdu returns and to avoid the race
6889 			 * condition, for example:
6890 			 *
6891 			 * The current thread calls:
6892 			 *   l2cap_reassemble_sdu
6893 			 *     chan->ops->recv == l2cap_sock_recv_cb
6894 			 *       __sock_queue_rcv_skb
6895 			 * Another thread calls:
6896 			 *   bt_sock_recvmsg
6897 			 *     skb_recv_datagram
6898 			 *     skb_free_datagram
6899 			 * Then the current thread tries to access control, but
6900 			 * it was freed by skb_free_datagram.
6901 			 */
6902 			local_control = *control;
6903 			err = l2cap_reassemble_sdu(chan, skb, control);
6904 			if (err)
6905 				break;
6906 
6907 			if (local_control.final) {
6908 				if (!test_and_clear_bit(CONN_REJ_ACT,
6909 							&chan->conn_state)) {
6910 					local_control.final = 0;
6911 					l2cap_retransmit_all(chan, &local_control);
6912 					l2cap_ertm_send(chan);
6913 				}
6914 			}
6915 
6916 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6917 				l2cap_send_ack(chan);
6918 			break;
6919 		case L2CAP_TXSEQ_UNEXPECTED:
6920 			l2cap_pass_to_tx(chan, control);
6921 
6922 			/* Can't issue SREJ frames in the local busy state.
6923 			 * Drop this frame, it will be seen as missing
6924 			 * when local busy is exited.
6925 			 */
6926 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6927 				BT_DBG("Busy, discarding unexpected seq %d",
6928 				       control->txseq);
6929 				break;
6930 			}
6931 
6932 			/* There was a gap in the sequence, so an SREJ
6933 			 * must be sent for each missing frame.  The
6934 			 * current frame is stored for later use.
6935 			 */
6936 			skb_queue_tail(&chan->srej_q, skb);
6937 			skb_in_use = true;
6938 			BT_DBG("Queued %p (queue len %d)", skb,
6939 			       skb_queue_len(&chan->srej_q));
6940 
6941 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6942 			l2cap_seq_list_clear(&chan->srej_list);
6943 			l2cap_send_srej(chan, control->txseq);
6944 
6945 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6946 			break;
6947 		case L2CAP_TXSEQ_DUPLICATE:
6948 			l2cap_pass_to_tx(chan, control);
6949 			break;
6950 		case L2CAP_TXSEQ_INVALID_IGNORE:
6951 			break;
6952 		case L2CAP_TXSEQ_INVALID:
6953 		default:
6954 			l2cap_send_disconn_req(chan, ECONNRESET);
6955 			break;
6956 		}
6957 		break;
6958 	case L2CAP_EV_RECV_RR:
6959 		l2cap_pass_to_tx(chan, control);
6960 		if (control->final) {
6961 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6962 
6963 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6964 			    !__chan_is_moving(chan)) {
6965 				control->final = 0;
6966 				l2cap_retransmit_all(chan, control);
6967 			}
6968 
6969 			l2cap_ertm_send(chan);
6970 		} else if (control->poll) {
6971 			l2cap_send_i_or_rr_or_rnr(chan);
6972 		} else {
6973 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6974 					       &chan->conn_state) &&
6975 			    chan->unacked_frames)
6976 				__set_retrans_timer(chan);
6977 
6978 			l2cap_ertm_send(chan);
6979 		}
6980 		break;
6981 	case L2CAP_EV_RECV_RNR:
6982 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6983 		l2cap_pass_to_tx(chan, control);
6984 		if (control && control->poll) {
6985 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6986 			l2cap_send_rr_or_rnr(chan, 0);
6987 		}
6988 		__clear_retrans_timer(chan);
6989 		l2cap_seq_list_clear(&chan->retrans_list);
6990 		break;
6991 	case L2CAP_EV_RECV_REJ:
6992 		l2cap_handle_rej(chan, control);
6993 		break;
6994 	case L2CAP_EV_RECV_SREJ:
6995 		l2cap_handle_srej(chan, control);
6996 		break;
6997 	default:
6998 		break;
6999 	}
7000 
7001 	if (skb && !skb_in_use) {
7002 		BT_DBG("Freeing %p", skb);
7003 		kfree_skb(skb);
7004 	}
7005 
7006 	return err;
7007 }
7008 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7009 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7010 				    struct l2cap_ctrl *control,
7011 				    struct sk_buff *skb, u8 event)
7012 {
7013 	int err = 0;
7014 	u16 txseq = control->txseq;
7015 	bool skb_in_use = false;
7016 
7017 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7018 	       event);
7019 
7020 	switch (event) {
7021 	case L2CAP_EV_RECV_IFRAME:
7022 		switch (l2cap_classify_txseq(chan, txseq)) {
7023 		case L2CAP_TXSEQ_EXPECTED:
7024 			/* Keep frame for reassembly later */
7025 			l2cap_pass_to_tx(chan, control);
7026 			skb_queue_tail(&chan->srej_q, skb);
7027 			skb_in_use = true;
7028 			BT_DBG("Queued %p (queue len %d)", skb,
7029 			       skb_queue_len(&chan->srej_q));
7030 
7031 			chan->expected_tx_seq = __next_seq(chan, txseq);
7032 			break;
7033 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7034 			l2cap_seq_list_pop(&chan->srej_list);
7035 
7036 			l2cap_pass_to_tx(chan, control);
7037 			skb_queue_tail(&chan->srej_q, skb);
7038 			skb_in_use = true;
7039 			BT_DBG("Queued %p (queue len %d)", skb,
7040 			       skb_queue_len(&chan->srej_q));
7041 
7042 			err = l2cap_rx_queued_iframes(chan);
7043 			if (err)
7044 				break;
7045 
7046 			break;
7047 		case L2CAP_TXSEQ_UNEXPECTED:
7048 			/* Got a frame that can't be reassembled yet.
7049 			 * Save it for later, and send SREJs to cover
7050 			 * the missing frames.
7051 			 */
7052 			skb_queue_tail(&chan->srej_q, skb);
7053 			skb_in_use = true;
7054 			BT_DBG("Queued %p (queue len %d)", skb,
7055 			       skb_queue_len(&chan->srej_q));
7056 
7057 			l2cap_pass_to_tx(chan, control);
7058 			l2cap_send_srej(chan, control->txseq);
7059 			break;
7060 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7061 			/* This frame was requested with an SREJ, but
7062 			 * some expected retransmitted frames are
7063 			 * missing.  Request retransmission of missing
7064 			 * SREJ'd frames.
7065 			 */
7066 			skb_queue_tail(&chan->srej_q, skb);
7067 			skb_in_use = true;
7068 			BT_DBG("Queued %p (queue len %d)", skb,
7069 			       skb_queue_len(&chan->srej_q));
7070 
7071 			l2cap_pass_to_tx(chan, control);
7072 			l2cap_send_srej_list(chan, control->txseq);
7073 			break;
7074 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7075 			/* We've already queued this frame.  Drop this copy. */
7076 			l2cap_pass_to_tx(chan, control);
7077 			break;
7078 		case L2CAP_TXSEQ_DUPLICATE:
7079 			/* Expecting a later sequence number, so this frame
7080 			 * was already received.  Ignore it completely.
7081 			 */
7082 			break;
7083 		case L2CAP_TXSEQ_INVALID_IGNORE:
7084 			break;
7085 		case L2CAP_TXSEQ_INVALID:
7086 		default:
7087 			l2cap_send_disconn_req(chan, ECONNRESET);
7088 			break;
7089 		}
7090 		break;
7091 	case L2CAP_EV_RECV_RR:
7092 		l2cap_pass_to_tx(chan, control);
7093 		if (control->final) {
7094 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7095 
7096 			if (!test_and_clear_bit(CONN_REJ_ACT,
7097 						&chan->conn_state)) {
7098 				control->final = 0;
7099 				l2cap_retransmit_all(chan, control);
7100 			}
7101 
7102 			l2cap_ertm_send(chan);
7103 		} else if (control->poll) {
7104 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7105 					       &chan->conn_state) &&
7106 			    chan->unacked_frames) {
7107 				__set_retrans_timer(chan);
7108 			}
7109 
7110 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7111 			l2cap_send_srej_tail(chan);
7112 		} else {
7113 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7114 					       &chan->conn_state) &&
7115 			    chan->unacked_frames)
7116 				__set_retrans_timer(chan);
7117 
7118 			l2cap_send_ack(chan);
7119 		}
7120 		break;
7121 	case L2CAP_EV_RECV_RNR:
7122 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7123 		l2cap_pass_to_tx(chan, control);
7124 		if (control->poll) {
7125 			l2cap_send_srej_tail(chan);
7126 		} else {
7127 			struct l2cap_ctrl rr_control;
7128 			memset(&rr_control, 0, sizeof(rr_control));
7129 			rr_control.sframe = 1;
7130 			rr_control.super = L2CAP_SUPER_RR;
7131 			rr_control.reqseq = chan->buffer_seq;
7132 			l2cap_send_sframe(chan, &rr_control);
7133 		}
7134 
7135 		break;
7136 	case L2CAP_EV_RECV_REJ:
7137 		l2cap_handle_rej(chan, control);
7138 		break;
7139 	case L2CAP_EV_RECV_SREJ:
7140 		l2cap_handle_srej(chan, control);
7141 		break;
7142 	}
7143 
7144 	if (skb && !skb_in_use) {
7145 		BT_DBG("Freeing %p", skb);
7146 		kfree_skb(skb);
7147 	}
7148 
7149 	return err;
7150 }
7151 
l2cap_finish_move(struct l2cap_chan * chan)7152 static int l2cap_finish_move(struct l2cap_chan *chan)
7153 {
7154 	BT_DBG("chan %p", chan);
7155 
7156 	chan->rx_state = L2CAP_RX_STATE_RECV;
7157 
7158 	if (chan->hs_hcon)
7159 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7160 	else
7161 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7162 
7163 	return l2cap_resegment(chan);
7164 }
7165 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7166 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7167 				 struct l2cap_ctrl *control,
7168 				 struct sk_buff *skb, u8 event)
7169 {
7170 	int err;
7171 
7172 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7173 	       event);
7174 
7175 	if (!control->poll)
7176 		return -EPROTO;
7177 
7178 	l2cap_process_reqseq(chan, control->reqseq);
7179 
7180 	if (!skb_queue_empty(&chan->tx_q))
7181 		chan->tx_send_head = skb_peek(&chan->tx_q);
7182 	else
7183 		chan->tx_send_head = NULL;
7184 
7185 	/* Rewind next_tx_seq to the point expected
7186 	 * by the receiver.
7187 	 */
7188 	chan->next_tx_seq = control->reqseq;
7189 	chan->unacked_frames = 0;
7190 
7191 	err = l2cap_finish_move(chan);
7192 	if (err)
7193 		return err;
7194 
7195 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7196 	l2cap_send_i_or_rr_or_rnr(chan);
7197 
7198 	if (event == L2CAP_EV_RECV_IFRAME)
7199 		return -EPROTO;
7200 
7201 	return l2cap_rx_state_recv(chan, control, NULL, event);
7202 }
7203 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7204 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7205 				 struct l2cap_ctrl *control,
7206 				 struct sk_buff *skb, u8 event)
7207 {
7208 	int err;
7209 
7210 	if (!control->final)
7211 		return -EPROTO;
7212 
7213 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7214 
7215 	chan->rx_state = L2CAP_RX_STATE_RECV;
7216 	l2cap_process_reqseq(chan, control->reqseq);
7217 
7218 	if (!skb_queue_empty(&chan->tx_q))
7219 		chan->tx_send_head = skb_peek(&chan->tx_q);
7220 	else
7221 		chan->tx_send_head = NULL;
7222 
7223 	/* Rewind next_tx_seq to the point expected
7224 	 * by the receiver.
7225 	 */
7226 	chan->next_tx_seq = control->reqseq;
7227 	chan->unacked_frames = 0;
7228 
7229 	if (chan->hs_hcon)
7230 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7231 	else
7232 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7233 
7234 	err = l2cap_resegment(chan);
7235 
7236 	if (!err)
7237 		err = l2cap_rx_state_recv(chan, control, skb, event);
7238 
7239 	return err;
7240 }
7241 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7242 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7243 {
7244 	/* Make sure reqseq is for a packet that has been sent but not acked */
7245 	u16 unacked;
7246 
7247 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7248 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7249 }
7250 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7251 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7252 		    struct sk_buff *skb, u8 event)
7253 {
7254 	int err = 0;
7255 
7256 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7257 	       control, skb, event, chan->rx_state);
7258 
7259 	if (__valid_reqseq(chan, control->reqseq)) {
7260 		switch (chan->rx_state) {
7261 		case L2CAP_RX_STATE_RECV:
7262 			err = l2cap_rx_state_recv(chan, control, skb, event);
7263 			break;
7264 		case L2CAP_RX_STATE_SREJ_SENT:
7265 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7266 						       event);
7267 			break;
7268 		case L2CAP_RX_STATE_WAIT_P:
7269 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7270 			break;
7271 		case L2CAP_RX_STATE_WAIT_F:
7272 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7273 			break;
7274 		default:
7275 			/* shut it down */
7276 			break;
7277 		}
7278 	} else {
7279 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7280 		       control->reqseq, chan->next_tx_seq,
7281 		       chan->expected_ack_seq);
7282 		l2cap_send_disconn_req(chan, ECONNRESET);
7283 	}
7284 
7285 	return err;
7286 }
7287 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7288 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7289 			   struct sk_buff *skb)
7290 {
7291 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7292 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7293 	 * returns and to avoid the race condition, for example:
7294 	 *
7295 	 * The current thread calls:
7296 	 *   l2cap_reassemble_sdu
7297 	 *     chan->ops->recv == l2cap_sock_recv_cb
7298 	 *       __sock_queue_rcv_skb
7299 	 * Another thread calls:
7300 	 *   bt_sock_recvmsg
7301 	 *     skb_recv_datagram
7302 	 *     skb_free_datagram
7303 	 * Then the current thread tries to access control, but it was freed by
7304 	 * skb_free_datagram.
7305 	 */
7306 	u16 txseq = control->txseq;
7307 
7308 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7309 	       chan->rx_state);
7310 
7311 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7312 		l2cap_pass_to_tx(chan, control);
7313 
7314 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7315 		       __next_seq(chan, chan->buffer_seq));
7316 
7317 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7318 
7319 		l2cap_reassemble_sdu(chan, skb, control);
7320 	} else {
7321 		if (chan->sdu) {
7322 			kfree_skb(chan->sdu);
7323 			chan->sdu = NULL;
7324 		}
7325 		chan->sdu_last_frag = NULL;
7326 		chan->sdu_len = 0;
7327 
7328 		if (skb) {
7329 			BT_DBG("Freeing %p", skb);
7330 			kfree_skb(skb);
7331 		}
7332 	}
7333 
7334 	chan->last_acked_seq = txseq;
7335 	chan->expected_tx_seq = __next_seq(chan, txseq);
7336 
7337 	return 0;
7338 }
7339 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7340 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7341 {
7342 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7343 	u16 len;
7344 	u8 event;
7345 
7346 	__unpack_control(chan, skb);
7347 
7348 	len = skb->len;
7349 
7350 	/*
7351 	 * We can just drop the corrupted I-frame here.
7352 	 * Receiver will miss it and start proper recovery
7353 	 * procedures and ask for retransmission.
7354 	 */
7355 	if (l2cap_check_fcs(chan, skb))
7356 		goto drop;
7357 
7358 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7359 		len -= L2CAP_SDULEN_SIZE;
7360 
7361 	if (chan->fcs == L2CAP_FCS_CRC16)
7362 		len -= L2CAP_FCS_SIZE;
7363 
7364 	if (len > chan->mps) {
7365 		l2cap_send_disconn_req(chan, ECONNRESET);
7366 		goto drop;
7367 	}
7368 
7369 	if (chan->ops->filter) {
7370 		if (chan->ops->filter(chan, skb))
7371 			goto drop;
7372 	}
7373 
7374 	if (!control->sframe) {
7375 		int err;
7376 
7377 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7378 		       control->sar, control->reqseq, control->final,
7379 		       control->txseq);
7380 
7381 		/* Validate F-bit - F=0 always valid, F=1 only
7382 		 * valid in TX WAIT_F
7383 		 */
7384 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7385 			goto drop;
7386 
7387 		if (chan->mode != L2CAP_MODE_STREAMING) {
7388 			event = L2CAP_EV_RECV_IFRAME;
7389 			err = l2cap_rx(chan, control, skb, event);
7390 		} else {
7391 			err = l2cap_stream_rx(chan, control, skb);
7392 		}
7393 
7394 		if (err)
7395 			l2cap_send_disconn_req(chan, ECONNRESET);
7396 	} else {
7397 		const u8 rx_func_to_event[4] = {
7398 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7399 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7400 		};
7401 
7402 		/* Only I-frames are expected in streaming mode */
7403 		if (chan->mode == L2CAP_MODE_STREAMING)
7404 			goto drop;
7405 
7406 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7407 		       control->reqseq, control->final, control->poll,
7408 		       control->super);
7409 
7410 		if (len != 0) {
7411 			BT_ERR("Trailing bytes: %d in sframe", len);
7412 			l2cap_send_disconn_req(chan, ECONNRESET);
7413 			goto drop;
7414 		}
7415 
7416 		/* Validate F and P bits */
7417 		if (control->final && (control->poll ||
7418 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7419 			goto drop;
7420 
7421 		event = rx_func_to_event[control->super];
7422 		if (l2cap_rx(chan, control, skb, event))
7423 			l2cap_send_disconn_req(chan, ECONNRESET);
7424 	}
7425 
7426 	return 0;
7427 
7428 drop:
7429 	kfree_skb(skb);
7430 	return 0;
7431 }
7432 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7433 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7434 {
7435 	struct l2cap_conn *conn = chan->conn;
7436 	struct l2cap_le_credits pkt;
7437 	u16 return_credits;
7438 
7439 	return_credits = (chan->imtu / chan->mps) + 1;
7440 
7441 	if (chan->rx_credits >= return_credits)
7442 		return;
7443 
7444 	return_credits -= chan->rx_credits;
7445 
7446 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7447 
7448 	chan->rx_credits += return_credits;
7449 
7450 	pkt.cid     = cpu_to_le16(chan->scid);
7451 	pkt.credits = cpu_to_le16(return_credits);
7452 
7453 	chan->ident = l2cap_get_ident(conn);
7454 
7455 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7456 }
7457 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7458 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7459 {
7460 	int err;
7461 
7462 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7463 
7464 	/* Wait recv to confirm reception before updating the credits */
7465 	err = chan->ops->recv(chan, skb);
7466 
7467 	/* Update credits whenever an SDU is received */
7468 	l2cap_chan_le_send_credits(chan);
7469 
7470 	return err;
7471 }
7472 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7473 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7474 {
7475 	int err;
7476 
7477 	if (!chan->rx_credits) {
7478 		BT_ERR("No credits to receive LE L2CAP data");
7479 		l2cap_send_disconn_req(chan, ECONNRESET);
7480 		return -ENOBUFS;
7481 	}
7482 
7483 	if (chan->imtu < skb->len) {
7484 		BT_ERR("Too big LE L2CAP PDU");
7485 		return -ENOBUFS;
7486 	}
7487 
7488 	chan->rx_credits--;
7489 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7490 
7491 	/* Update if remote had run out of credits, this should only happens
7492 	 * if the remote is not using the entire MPS.
7493 	 */
7494 	if (!chan->rx_credits)
7495 		l2cap_chan_le_send_credits(chan);
7496 
7497 	err = 0;
7498 
7499 	if (!chan->sdu) {
7500 		u16 sdu_len;
7501 
7502 		sdu_len = get_unaligned_le16(skb->data);
7503 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7504 
7505 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7506 		       sdu_len, skb->len, chan->imtu);
7507 
7508 		if (sdu_len > chan->imtu) {
7509 			BT_ERR("Too big LE L2CAP SDU length received");
7510 			err = -EMSGSIZE;
7511 			goto failed;
7512 		}
7513 
7514 		if (skb->len > sdu_len) {
7515 			BT_ERR("Too much LE L2CAP data received");
7516 			err = -EINVAL;
7517 			goto failed;
7518 		}
7519 
7520 		if (skb->len == sdu_len)
7521 			return l2cap_ecred_recv(chan, skb);
7522 
7523 		chan->sdu = skb;
7524 		chan->sdu_len = sdu_len;
7525 		chan->sdu_last_frag = skb;
7526 
7527 		/* Detect if remote is not able to use the selected MPS */
7528 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7529 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7530 
7531 			/* Adjust the number of credits */
7532 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7533 			chan->mps = mps_len;
7534 			l2cap_chan_le_send_credits(chan);
7535 		}
7536 
7537 		return 0;
7538 	}
7539 
7540 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7541 	       chan->sdu->len, skb->len, chan->sdu_len);
7542 
7543 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7544 		BT_ERR("Too much LE L2CAP data received");
7545 		err = -EINVAL;
7546 		goto failed;
7547 	}
7548 
7549 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7550 	skb = NULL;
7551 
7552 	if (chan->sdu->len == chan->sdu_len) {
7553 		err = l2cap_ecred_recv(chan, chan->sdu);
7554 		if (!err) {
7555 			chan->sdu = NULL;
7556 			chan->sdu_last_frag = NULL;
7557 			chan->sdu_len = 0;
7558 		}
7559 	}
7560 
7561 failed:
7562 	if (err) {
7563 		kfree_skb(skb);
7564 		kfree_skb(chan->sdu);
7565 		chan->sdu = NULL;
7566 		chan->sdu_last_frag = NULL;
7567 		chan->sdu_len = 0;
7568 	}
7569 
7570 	/* We can't return an error here since we took care of the skb
7571 	 * freeing internally. An error return would cause the caller to
7572 	 * do a double-free of the skb.
7573 	 */
7574 	return 0;
7575 }
7576 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7577 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7578 			       struct sk_buff *skb)
7579 {
7580 	struct l2cap_chan *chan;
7581 
7582 	chan = l2cap_get_chan_by_scid(conn, cid);
7583 	if (!chan) {
7584 		if (cid == L2CAP_CID_A2MP) {
7585 			chan = a2mp_channel_create(conn, skb);
7586 			if (!chan) {
7587 				kfree_skb(skb);
7588 				return;
7589 			}
7590 
7591 			l2cap_chan_lock(chan);
7592 		} else {
7593 			BT_DBG("unknown cid 0x%4.4x", cid);
7594 			/* Drop packet and return */
7595 			kfree_skb(skb);
7596 			return;
7597 		}
7598 	}
7599 
7600 	BT_DBG("chan %p, len %d", chan, skb->len);
7601 
7602 	/* If we receive data on a fixed channel before the info req/rsp
7603 	 * procdure is done simply assume that the channel is supported
7604 	 * and mark it as ready.
7605 	 */
7606 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7607 		l2cap_chan_ready(chan);
7608 
7609 	if (chan->state != BT_CONNECTED)
7610 		goto drop;
7611 
7612 	switch (chan->mode) {
7613 	case L2CAP_MODE_LE_FLOWCTL:
7614 	case L2CAP_MODE_EXT_FLOWCTL:
7615 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7616 			goto drop;
7617 
7618 		goto done;
7619 
7620 	case L2CAP_MODE_BASIC:
7621 		/* If socket recv buffers overflows we drop data here
7622 		 * which is *bad* because L2CAP has to be reliable.
7623 		 * But we don't have any other choice. L2CAP doesn't
7624 		 * provide flow control mechanism. */
7625 
7626 		if (chan->imtu < skb->len) {
7627 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7628 			goto drop;
7629 		}
7630 
7631 		if (!chan->ops->recv(chan, skb))
7632 			goto done;
7633 		break;
7634 
7635 	case L2CAP_MODE_ERTM:
7636 	case L2CAP_MODE_STREAMING:
7637 		l2cap_data_rcv(chan, skb);
7638 		goto done;
7639 
7640 	default:
7641 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7642 		break;
7643 	}
7644 
7645 drop:
7646 	kfree_skb(skb);
7647 
7648 done:
7649 	l2cap_chan_unlock(chan);
7650 }
7651 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7652 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7653 				  struct sk_buff *skb)
7654 {
7655 	struct hci_conn *hcon = conn->hcon;
7656 	struct l2cap_chan *chan;
7657 
7658 	if (hcon->type != ACL_LINK)
7659 		goto free_skb;
7660 
7661 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7662 					ACL_LINK);
7663 	if (!chan)
7664 		goto free_skb;
7665 
7666 	BT_DBG("chan %p, len %d", chan, skb->len);
7667 
7668 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7669 		goto drop;
7670 
7671 	if (chan->imtu < skb->len)
7672 		goto drop;
7673 
7674 	/* Store remote BD_ADDR and PSM for msg_name */
7675 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7676 	bt_cb(skb)->l2cap.psm = psm;
7677 
7678 	if (!chan->ops->recv(chan, skb)) {
7679 		l2cap_chan_put(chan);
7680 		return;
7681 	}
7682 
7683 drop:
7684 	l2cap_chan_put(chan);
7685 free_skb:
7686 	kfree_skb(skb);
7687 }
7688 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7689 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7690 {
7691 	struct l2cap_hdr *lh = (void *) skb->data;
7692 	struct hci_conn *hcon = conn->hcon;
7693 	u16 cid, len;
7694 	__le16 psm;
7695 
7696 	if (hcon->state != BT_CONNECTED) {
7697 		BT_DBG("queueing pending rx skb");
7698 		skb_queue_tail(&conn->pending_rx, skb);
7699 		return;
7700 	}
7701 
7702 	skb_pull(skb, L2CAP_HDR_SIZE);
7703 	cid = __le16_to_cpu(lh->cid);
7704 	len = __le16_to_cpu(lh->len);
7705 
7706 	if (len != skb->len) {
7707 		kfree_skb(skb);
7708 		return;
7709 	}
7710 
7711 	/* Since we can't actively block incoming LE connections we must
7712 	 * at least ensure that we ignore incoming data from them.
7713 	 */
7714 	if (hcon->type == LE_LINK &&
7715 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7716 				   bdaddr_dst_type(hcon))) {
7717 		kfree_skb(skb);
7718 		return;
7719 	}
7720 
7721 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7722 
7723 	switch (cid) {
7724 	case L2CAP_CID_SIGNALING:
7725 		l2cap_sig_channel(conn, skb);
7726 		break;
7727 
7728 	case L2CAP_CID_CONN_LESS:
7729 		psm = get_unaligned((__le16 *) skb->data);
7730 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7731 		l2cap_conless_channel(conn, psm, skb);
7732 		break;
7733 
7734 	case L2CAP_CID_LE_SIGNALING:
7735 		l2cap_le_sig_channel(conn, skb);
7736 		break;
7737 
7738 	default:
7739 		l2cap_data_channel(conn, cid, skb);
7740 		break;
7741 	}
7742 }
7743 
process_pending_rx(struct work_struct * work)7744 static void process_pending_rx(struct work_struct *work)
7745 {
7746 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7747 					       pending_rx_work);
7748 	struct sk_buff *skb;
7749 
7750 	BT_DBG("");
7751 
7752 	while ((skb = skb_dequeue(&conn->pending_rx)))
7753 		l2cap_recv_frame(conn, skb);
7754 }
7755 
l2cap_conn_add(struct hci_conn * hcon)7756 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7757 {
7758 	struct l2cap_conn *conn = hcon->l2cap_data;
7759 	struct hci_chan *hchan;
7760 
7761 	if (conn)
7762 		return conn;
7763 
7764 	hchan = hci_chan_create(hcon);
7765 	if (!hchan)
7766 		return NULL;
7767 
7768 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7769 	if (!conn) {
7770 		hci_chan_del(hchan);
7771 		return NULL;
7772 	}
7773 
7774 	kref_init(&conn->ref);
7775 	hcon->l2cap_data = conn;
7776 	conn->hcon = hci_conn_get(hcon);
7777 	conn->hchan = hchan;
7778 
7779 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7780 
7781 	switch (hcon->type) {
7782 	case LE_LINK:
7783 		if (hcon->hdev->le_mtu) {
7784 			conn->mtu = hcon->hdev->le_mtu;
7785 			break;
7786 		}
7787 		fallthrough;
7788 	default:
7789 		conn->mtu = hcon->hdev->acl_mtu;
7790 		break;
7791 	}
7792 
7793 	conn->feat_mask = 0;
7794 
7795 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7796 
7797 	if (hcon->type == ACL_LINK &&
7798 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7799 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7800 
7801 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7802 	    (bredr_sc_enabled(hcon->hdev) ||
7803 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7804 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7805 
7806 	mutex_init(&conn->ident_lock);
7807 	mutex_init(&conn->chan_lock);
7808 
7809 	INIT_LIST_HEAD(&conn->chan_l);
7810 	INIT_LIST_HEAD(&conn->users);
7811 
7812 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7813 
7814 	skb_queue_head_init(&conn->pending_rx);
7815 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7816 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7817 
7818 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7819 
7820 	return conn;
7821 }
7822 
is_valid_psm(u16 psm,u8 dst_type)7823 static bool is_valid_psm(u16 psm, u8 dst_type) {
7824 	if (!psm)
7825 		return false;
7826 
7827 	if (bdaddr_type_is_le(dst_type))
7828 		return (psm <= 0x00ff);
7829 
7830 	/* PSM must be odd and lsb of upper byte must be 0 */
7831 	return ((psm & 0x0101) == 0x0001);
7832 }
7833 
7834 struct l2cap_chan_data {
7835 	struct l2cap_chan *chan;
7836 	struct pid *pid;
7837 	int count;
7838 };
7839 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7840 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7841 {
7842 	struct l2cap_chan_data *d = data;
7843 	struct pid *pid;
7844 
7845 	if (chan == d->chan)
7846 		return;
7847 
7848 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7849 		return;
7850 
7851 	pid = chan->ops->get_peer_pid(chan);
7852 
7853 	/* Only count deferred channels with the same PID/PSM */
7854 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7855 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7856 		return;
7857 
7858 	d->count++;
7859 }
7860 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7861 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7862 		       bdaddr_t *dst, u8 dst_type)
7863 {
7864 	struct l2cap_conn *conn;
7865 	struct hci_conn *hcon;
7866 	struct hci_dev *hdev;
7867 	int err;
7868 
7869 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7870 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7871 
7872 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7873 	if (!hdev)
7874 		return -EHOSTUNREACH;
7875 
7876 	hci_dev_lock(hdev);
7877 
7878 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7879 	    chan->chan_type != L2CAP_CHAN_RAW) {
7880 		err = -EINVAL;
7881 		goto done;
7882 	}
7883 
7884 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7885 		err = -EINVAL;
7886 		goto done;
7887 	}
7888 
7889 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7890 		err = -EINVAL;
7891 		goto done;
7892 	}
7893 
7894 	switch (chan->mode) {
7895 	case L2CAP_MODE_BASIC:
7896 		break;
7897 	case L2CAP_MODE_LE_FLOWCTL:
7898 		break;
7899 	case L2CAP_MODE_EXT_FLOWCTL:
7900 		if (!enable_ecred) {
7901 			err = -EOPNOTSUPP;
7902 			goto done;
7903 		}
7904 		break;
7905 	case L2CAP_MODE_ERTM:
7906 	case L2CAP_MODE_STREAMING:
7907 		if (!disable_ertm)
7908 			break;
7909 		fallthrough;
7910 	default:
7911 		err = -EOPNOTSUPP;
7912 		goto done;
7913 	}
7914 
7915 	switch (chan->state) {
7916 	case BT_CONNECT:
7917 	case BT_CONNECT2:
7918 	case BT_CONFIG:
7919 		/* Already connecting */
7920 		err = 0;
7921 		goto done;
7922 
7923 	case BT_CONNECTED:
7924 		/* Already connected */
7925 		err = -EISCONN;
7926 		goto done;
7927 
7928 	case BT_OPEN:
7929 	case BT_BOUND:
7930 		/* Can connect */
7931 		break;
7932 
7933 	default:
7934 		err = -EBADFD;
7935 		goto done;
7936 	}
7937 
7938 	/* Set destination address and psm */
7939 	bacpy(&chan->dst, dst);
7940 	chan->dst_type = dst_type;
7941 
7942 	chan->psm = psm;
7943 	chan->dcid = cid;
7944 
7945 	if (bdaddr_type_is_le(dst_type)) {
7946 		/* Convert from L2CAP channel address type to HCI address type
7947 		 */
7948 		if (dst_type == BDADDR_LE_PUBLIC)
7949 			dst_type = ADDR_LE_DEV_PUBLIC;
7950 		else
7951 			dst_type = ADDR_LE_DEV_RANDOM;
7952 
7953 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7954 			hcon = hci_connect_le(hdev, dst, dst_type,
7955 					      chan->sec_level,
7956 					      HCI_LE_CONN_TIMEOUT,
7957 					      HCI_ROLE_SLAVE, NULL);
7958 		else
7959 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7960 						   chan->sec_level,
7961 						   HCI_LE_CONN_TIMEOUT,
7962 						   CONN_REASON_L2CAP_CHAN);
7963 
7964 	} else {
7965 		u8 auth_type = l2cap_get_auth_type(chan);
7966 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7967 				       CONN_REASON_L2CAP_CHAN);
7968 	}
7969 
7970 	if (IS_ERR(hcon)) {
7971 		err = PTR_ERR(hcon);
7972 		goto done;
7973 	}
7974 
7975 	conn = l2cap_conn_add(hcon);
7976 	if (!conn) {
7977 		hci_conn_drop(hcon);
7978 		err = -ENOMEM;
7979 		goto done;
7980 	}
7981 
7982 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7983 		struct l2cap_chan_data data;
7984 
7985 		data.chan = chan;
7986 		data.pid = chan->ops->get_peer_pid(chan);
7987 		data.count = 1;
7988 
7989 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7990 
7991 		/* Check if there isn't too many channels being connected */
7992 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7993 			hci_conn_drop(hcon);
7994 			err = -EPROTO;
7995 			goto done;
7996 		}
7997 	}
7998 
7999 	mutex_lock(&conn->chan_lock);
8000 	l2cap_chan_lock(chan);
8001 
8002 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8003 		hci_conn_drop(hcon);
8004 		err = -EBUSY;
8005 		goto chan_unlock;
8006 	}
8007 
8008 	/* Update source addr of the socket */
8009 	bacpy(&chan->src, &hcon->src);
8010 	chan->src_type = bdaddr_src_type(hcon);
8011 
8012 	__l2cap_chan_add(conn, chan);
8013 
8014 	/* l2cap_chan_add takes its own ref so we can drop this one */
8015 	hci_conn_drop(hcon);
8016 
8017 	l2cap_state_change(chan, BT_CONNECT);
8018 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8019 
8020 	/* Release chan->sport so that it can be reused by other
8021 	 * sockets (as it's only used for listening sockets).
8022 	 */
8023 	write_lock(&chan_list_lock);
8024 	chan->sport = 0;
8025 	write_unlock(&chan_list_lock);
8026 
8027 	if (hcon->state == BT_CONNECTED) {
8028 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8029 			__clear_chan_timer(chan);
8030 			if (l2cap_chan_check_security(chan, true))
8031 				l2cap_state_change(chan, BT_CONNECTED);
8032 		} else
8033 			l2cap_do_start(chan);
8034 	}
8035 
8036 	err = 0;
8037 
8038 chan_unlock:
8039 	l2cap_chan_unlock(chan);
8040 	mutex_unlock(&conn->chan_lock);
8041 done:
8042 	hci_dev_unlock(hdev);
8043 	hci_dev_put(hdev);
8044 	return err;
8045 }
8046 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8047 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8048 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8049 {
8050 	struct l2cap_conn *conn = chan->conn;
8051 	struct {
8052 		struct l2cap_ecred_reconf_req req;
8053 		__le16 scid;
8054 	} pdu;
8055 
8056 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8057 	pdu.req.mps = cpu_to_le16(chan->mps);
8058 	pdu.scid    = cpu_to_le16(chan->scid);
8059 
8060 	chan->ident = l2cap_get_ident(conn);
8061 
8062 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8063 		       sizeof(pdu), &pdu);
8064 }
8065 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8066 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8067 {
8068 	if (chan->imtu > mtu)
8069 		return -EINVAL;
8070 
8071 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8072 
8073 	chan->imtu = mtu;
8074 
8075 	l2cap_ecred_reconfigure(chan);
8076 
8077 	return 0;
8078 }
8079 
8080 /* ---- L2CAP interface with lower layer (HCI) ---- */
8081 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8082 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8083 {
8084 	int exact = 0, lm1 = 0, lm2 = 0;
8085 	struct l2cap_chan *c;
8086 
8087 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8088 
8089 	/* Find listening sockets and check their link_mode */
8090 	read_lock(&chan_list_lock);
8091 	list_for_each_entry(c, &chan_list, global_l) {
8092 		if (c->state != BT_LISTEN)
8093 			continue;
8094 
8095 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8096 			lm1 |= HCI_LM_ACCEPT;
8097 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8098 				lm1 |= HCI_LM_MASTER;
8099 			exact++;
8100 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8101 			lm2 |= HCI_LM_ACCEPT;
8102 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8103 				lm2 |= HCI_LM_MASTER;
8104 		}
8105 	}
8106 	read_unlock(&chan_list_lock);
8107 
8108 	return exact ? lm1 : lm2;
8109 }
8110 
8111 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8112  * from an existing channel in the list or from the beginning of the
8113  * global list (by passing NULL as first parameter).
8114  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8115 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8116 						  struct hci_conn *hcon)
8117 {
8118 	u8 src_type = bdaddr_src_type(hcon);
8119 
8120 	read_lock(&chan_list_lock);
8121 
8122 	if (c)
8123 		c = list_next_entry(c, global_l);
8124 	else
8125 		c = list_entry(chan_list.next, typeof(*c), global_l);
8126 
8127 	list_for_each_entry_from(c, &chan_list, global_l) {
8128 		if (c->chan_type != L2CAP_CHAN_FIXED)
8129 			continue;
8130 		if (c->state != BT_LISTEN)
8131 			continue;
8132 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8133 			continue;
8134 		if (src_type != c->src_type)
8135 			continue;
8136 
8137 		l2cap_chan_hold(c);
8138 		read_unlock(&chan_list_lock);
8139 		return c;
8140 	}
8141 
8142 	read_unlock(&chan_list_lock);
8143 
8144 	return NULL;
8145 }
8146 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8147 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8148 {
8149 	struct hci_dev *hdev = hcon->hdev;
8150 	struct l2cap_conn *conn;
8151 	struct l2cap_chan *pchan;
8152 	u8 dst_type;
8153 
8154 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8155 		return;
8156 
8157 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8158 
8159 	if (status) {
8160 		l2cap_conn_del(hcon, bt_to_errno(status));
8161 		return;
8162 	}
8163 
8164 	conn = l2cap_conn_add(hcon);
8165 	if (!conn)
8166 		return;
8167 
8168 	dst_type = bdaddr_dst_type(hcon);
8169 
8170 	/* If device is blocked, do not create channels for it */
8171 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8172 		return;
8173 
8174 	/* Find fixed channels and notify them of the new connection. We
8175 	 * use multiple individual lookups, continuing each time where
8176 	 * we left off, because the list lock would prevent calling the
8177 	 * potentially sleeping l2cap_chan_lock() function.
8178 	 */
8179 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8180 	while (pchan) {
8181 		struct l2cap_chan *chan, *next;
8182 
8183 		/* Client fixed channels should override server ones */
8184 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8185 			goto next;
8186 
8187 		l2cap_chan_lock(pchan);
8188 		chan = pchan->ops->new_connection(pchan);
8189 		if (chan) {
8190 			bacpy(&chan->src, &hcon->src);
8191 			bacpy(&chan->dst, &hcon->dst);
8192 			chan->src_type = bdaddr_src_type(hcon);
8193 			chan->dst_type = dst_type;
8194 
8195 			__l2cap_chan_add(conn, chan);
8196 		}
8197 
8198 		l2cap_chan_unlock(pchan);
8199 next:
8200 		next = l2cap_global_fixed_chan(pchan, hcon);
8201 		l2cap_chan_put(pchan);
8202 		pchan = next;
8203 	}
8204 
8205 	l2cap_conn_ready(conn);
8206 }
8207 
l2cap_disconn_ind(struct hci_conn * hcon)8208 int l2cap_disconn_ind(struct hci_conn *hcon)
8209 {
8210 	struct l2cap_conn *conn = hcon->l2cap_data;
8211 
8212 	BT_DBG("hcon %p", hcon);
8213 
8214 	if (!conn)
8215 		return HCI_ERROR_REMOTE_USER_TERM;
8216 	return conn->disc_reason;
8217 }
8218 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8219 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8220 {
8221 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8222 		return;
8223 
8224 	BT_DBG("hcon %p reason %d", hcon, reason);
8225 
8226 	l2cap_conn_del(hcon, bt_to_errno(reason));
8227 }
8228 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8229 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8230 {
8231 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8232 		return;
8233 
8234 	if (encrypt == 0x00) {
8235 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8236 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8237 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8238 			   chan->sec_level == BT_SECURITY_FIPS)
8239 			l2cap_chan_close(chan, ECONNREFUSED);
8240 	} else {
8241 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8242 			__clear_chan_timer(chan);
8243 	}
8244 }
8245 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8246 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8247 {
8248 	struct l2cap_conn *conn = hcon->l2cap_data;
8249 	struct l2cap_chan *chan;
8250 
8251 	if (!conn)
8252 		return;
8253 
8254 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8255 
8256 	mutex_lock(&conn->chan_lock);
8257 
8258 	list_for_each_entry(chan, &conn->chan_l, list) {
8259 		l2cap_chan_lock(chan);
8260 
8261 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8262 		       state_to_string(chan->state));
8263 
8264 		if (chan->scid == L2CAP_CID_A2MP) {
8265 			l2cap_chan_unlock(chan);
8266 			continue;
8267 		}
8268 
8269 		if (!status && encrypt)
8270 			chan->sec_level = hcon->sec_level;
8271 
8272 		if (!__l2cap_no_conn_pending(chan)) {
8273 			l2cap_chan_unlock(chan);
8274 			continue;
8275 		}
8276 
8277 		if (!status && (chan->state == BT_CONNECTED ||
8278 				chan->state == BT_CONFIG)) {
8279 			chan->ops->resume(chan);
8280 			l2cap_check_encryption(chan, encrypt);
8281 			l2cap_chan_unlock(chan);
8282 			continue;
8283 		}
8284 
8285 		if (chan->state == BT_CONNECT) {
8286 			if (!status && l2cap_check_enc_key_size(hcon))
8287 				l2cap_start_connection(chan);
8288 			else
8289 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8290 		} else if (chan->state == BT_CONNECT2 &&
8291 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8292 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8293 			struct l2cap_conn_rsp rsp;
8294 			__u16 res, stat;
8295 
8296 			if (!status && l2cap_check_enc_key_size(hcon)) {
8297 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8298 					res = L2CAP_CR_PEND;
8299 					stat = L2CAP_CS_AUTHOR_PEND;
8300 					chan->ops->defer(chan);
8301 				} else {
8302 					l2cap_state_change(chan, BT_CONFIG);
8303 					res = L2CAP_CR_SUCCESS;
8304 					stat = L2CAP_CS_NO_INFO;
8305 				}
8306 			} else {
8307 				l2cap_state_change(chan, BT_DISCONN);
8308 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8309 				res = L2CAP_CR_SEC_BLOCK;
8310 				stat = L2CAP_CS_NO_INFO;
8311 			}
8312 
8313 			rsp.scid   = cpu_to_le16(chan->dcid);
8314 			rsp.dcid   = cpu_to_le16(chan->scid);
8315 			rsp.result = cpu_to_le16(res);
8316 			rsp.status = cpu_to_le16(stat);
8317 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8318 				       sizeof(rsp), &rsp);
8319 
8320 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8321 			    res == L2CAP_CR_SUCCESS) {
8322 				char buf[128];
8323 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8324 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8325 					       L2CAP_CONF_REQ,
8326 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8327 					       buf);
8328 				chan->num_conf_req++;
8329 			}
8330 		}
8331 
8332 		l2cap_chan_unlock(chan);
8333 	}
8334 
8335 	mutex_unlock(&conn->chan_lock);
8336 }
8337 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8338 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8339 {
8340 	struct l2cap_conn *conn = hcon->l2cap_data;
8341 	struct l2cap_hdr *hdr;
8342 	int len;
8343 
8344 	/* For AMP controller do not create l2cap conn */
8345 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8346 		goto drop;
8347 
8348 	if (!conn)
8349 		conn = l2cap_conn_add(hcon);
8350 
8351 	if (!conn)
8352 		goto drop;
8353 
8354 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8355 
8356 	switch (flags) {
8357 	case ACL_START:
8358 	case ACL_START_NO_FLUSH:
8359 	case ACL_COMPLETE:
8360 		if (conn->rx_len) {
8361 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8362 			kfree_skb(conn->rx_skb);
8363 			conn->rx_skb = NULL;
8364 			conn->rx_len = 0;
8365 			l2cap_conn_unreliable(conn, ECOMM);
8366 		}
8367 
8368 		/* Start fragment always begin with Basic L2CAP header */
8369 		if (skb->len < L2CAP_HDR_SIZE) {
8370 			BT_ERR("Frame is too short (len %d)", skb->len);
8371 			l2cap_conn_unreliable(conn, ECOMM);
8372 			goto drop;
8373 		}
8374 
8375 		hdr = (struct l2cap_hdr *) skb->data;
8376 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8377 
8378 		if (len == skb->len) {
8379 			/* Complete frame received */
8380 			l2cap_recv_frame(conn, skb);
8381 			return;
8382 		}
8383 
8384 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8385 
8386 		if (skb->len > len) {
8387 			BT_ERR("Frame is too long (len %d, expected len %d)",
8388 			       skb->len, len);
8389 			l2cap_conn_unreliable(conn, ECOMM);
8390 			goto drop;
8391 		}
8392 
8393 		/* Allocate skb for the complete frame (with header) */
8394 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8395 		if (!conn->rx_skb)
8396 			goto drop;
8397 
8398 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8399 					  skb->len);
8400 		conn->rx_len = len - skb->len;
8401 		break;
8402 
8403 	case ACL_CONT:
8404 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8405 
8406 		if (!conn->rx_len) {
8407 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8408 			l2cap_conn_unreliable(conn, ECOMM);
8409 			goto drop;
8410 		}
8411 
8412 		if (skb->len > conn->rx_len) {
8413 			BT_ERR("Fragment is too long (len %d, expected %d)",
8414 			       skb->len, conn->rx_len);
8415 			kfree_skb(conn->rx_skb);
8416 			conn->rx_skb = NULL;
8417 			conn->rx_len = 0;
8418 			l2cap_conn_unreliable(conn, ECOMM);
8419 			goto drop;
8420 		}
8421 
8422 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8423 					  skb->len);
8424 		conn->rx_len -= skb->len;
8425 
8426 		if (!conn->rx_len) {
8427 			/* Complete frame received. l2cap_recv_frame
8428 			 * takes ownership of the skb so set the global
8429 			 * rx_skb pointer to NULL first.
8430 			 */
8431 			struct sk_buff *rx_skb = conn->rx_skb;
8432 			conn->rx_skb = NULL;
8433 			l2cap_recv_frame(conn, rx_skb);
8434 		}
8435 		break;
8436 	}
8437 
8438 drop:
8439 	kfree_skb(skb);
8440 }
8441 
8442 static struct hci_cb l2cap_cb = {
8443 	.name		= "L2CAP",
8444 	.connect_cfm	= l2cap_connect_cfm,
8445 	.disconn_cfm	= l2cap_disconn_cfm,
8446 	.security_cfm	= l2cap_security_cfm,
8447 };
8448 
l2cap_debugfs_show(struct seq_file * f,void * p)8449 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8450 {
8451 	struct l2cap_chan *c;
8452 
8453 	read_lock(&chan_list_lock);
8454 
8455 	list_for_each_entry(c, &chan_list, global_l) {
8456 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8457 			   &c->src, c->src_type, &c->dst, c->dst_type,
8458 			   c->state, __le16_to_cpu(c->psm),
8459 			   c->scid, c->dcid, c->imtu, c->omtu,
8460 			   c->sec_level, c->mode);
8461 	}
8462 
8463 	read_unlock(&chan_list_lock);
8464 
8465 	return 0;
8466 }
8467 
8468 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8469 
8470 static struct dentry *l2cap_debugfs;
8471 
l2cap_init(void)8472 int __init l2cap_init(void)
8473 {
8474 	int err;
8475 
8476 	err = l2cap_init_sockets();
8477 	if (err < 0)
8478 		return err;
8479 
8480 	hci_register_cb(&l2cap_cb);
8481 
8482 	if (IS_ERR_OR_NULL(bt_debugfs))
8483 		return 0;
8484 
8485 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8486 					    NULL, &l2cap_debugfs_fops);
8487 
8488 	return 0;
8489 }
8490 
l2cap_exit(void)8491 void l2cap_exit(void)
8492 {
8493 	debugfs_remove(l2cap_debugfs);
8494 	hci_unregister_cb(&l2cap_cb);
8495 	l2cap_cleanup_sockets();
8496 }
8497 
8498 module_param(disable_ertm, bool, 0644);
8499 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8500 
8501 module_param(enable_ecred, bool, 0644);
8502 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8503