• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
bdaddr_type(u8 link_type,u8 bdaddr_type)65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
bdaddr_src_type(struct hci_conn * hcon)77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
bdaddr_dst_type(struct hci_conn * hcon)82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns a reference locked channel.
115  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 	mutex_unlock(&conn->chan_lock);
130 
131 	return c;
132 }
133 
134 /* Find channel with given DCID.
135  * Returns a reference locked channel.
136  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)137 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
138 						 u16 cid)
139 {
140 	struct l2cap_chan *c;
141 
142 	mutex_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_dcid(conn, cid);
144 	if (c) {
145 		/* Only lock if chan reference is not 0 */
146 		c = l2cap_chan_hold_unless_zero(c);
147 		if (c)
148 			l2cap_chan_lock(c);
149 	}
150 	mutex_unlock(&conn->chan_lock);
151 
152 	return c;
153 }
154 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)155 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
156 						    u8 ident)
157 {
158 	struct l2cap_chan *c;
159 
160 	list_for_each_entry(c, &conn->chan_l, list) {
161 		if (c->ident == ident)
162 			return c;
163 	}
164 	return NULL;
165 }
166 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)167 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
168 						  u8 ident)
169 {
170 	struct l2cap_chan *c;
171 
172 	mutex_lock(&conn->chan_lock);
173 	c = __l2cap_get_chan_by_ident(conn, ident);
174 	if (c) {
175 		/* Only lock if chan reference is not 0 */
176 		c = l2cap_chan_hold_unless_zero(c);
177 		if (c)
178 			l2cap_chan_lock(c);
179 	}
180 	mutex_unlock(&conn->chan_lock);
181 
182 	return c;
183 }
184 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)185 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
186 						      u8 src_type)
187 {
188 	struct l2cap_chan *c;
189 
190 	list_for_each_entry(c, &chan_list, global_l) {
191 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
192 			continue;
193 
194 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
195 			continue;
196 
197 		if (c->sport == psm && !bacmp(&c->src, src))
198 			return c;
199 	}
200 	return NULL;
201 }
202 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)203 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
204 {
205 	int err;
206 
207 	write_lock(&chan_list_lock);
208 
209 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
210 		err = -EADDRINUSE;
211 		goto done;
212 	}
213 
214 	if (psm) {
215 		chan->psm = psm;
216 		chan->sport = psm;
217 		err = 0;
218 	} else {
219 		u16 p, start, end, incr;
220 
221 		if (chan->src_type == BDADDR_BREDR) {
222 			start = L2CAP_PSM_DYN_START;
223 			end = L2CAP_PSM_AUTO_END;
224 			incr = 2;
225 		} else {
226 			start = L2CAP_PSM_LE_DYN_START;
227 			end = L2CAP_PSM_LE_DYN_END;
228 			incr = 1;
229 		}
230 
231 		err = -EINVAL;
232 		for (p = start; p <= end; p += incr)
233 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
234 							 chan->src_type)) {
235 				chan->psm   = cpu_to_le16(p);
236 				chan->sport = cpu_to_le16(p);
237 				err = 0;
238 				break;
239 			}
240 	}
241 
242 done:
243 	write_unlock(&chan_list_lock);
244 	return err;
245 }
246 EXPORT_SYMBOL_GPL(l2cap_add_psm);
247 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)248 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
249 {
250 	write_lock(&chan_list_lock);
251 
252 	/* Override the defaults (which are for conn-oriented) */
253 	chan->omtu = L2CAP_DEFAULT_MTU;
254 	chan->chan_type = L2CAP_CHAN_FIXED;
255 
256 	chan->scid = scid;
257 
258 	write_unlock(&chan_list_lock);
259 
260 	return 0;
261 }
262 
l2cap_alloc_cid(struct l2cap_conn * conn)263 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 {
265 	u16 cid, dyn_end;
266 
267 	if (conn->hcon->type == LE_LINK)
268 		dyn_end = L2CAP_CID_LE_DYN_END;
269 	else
270 		dyn_end = L2CAP_CID_DYN_END;
271 
272 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
273 		if (!__l2cap_get_chan_by_scid(conn, cid))
274 			return cid;
275 	}
276 
277 	return 0;
278 }
279 
l2cap_state_change(struct l2cap_chan * chan,int state)280 static void l2cap_state_change(struct l2cap_chan *chan, int state)
281 {
282 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
283 	       state_to_string(state));
284 
285 	chan->state = state;
286 	chan->ops->state_change(chan, state, 0);
287 }
288 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)289 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 						int state, int err)
291 {
292 	chan->state = state;
293 	chan->ops->state_change(chan, chan->state, err);
294 }
295 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)296 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
297 {
298 	chan->ops->state_change(chan, chan->state, err);
299 }
300 
__set_retrans_timer(struct l2cap_chan * chan)301 static void __set_retrans_timer(struct l2cap_chan *chan)
302 {
303 	if (!delayed_work_pending(&chan->monitor_timer) &&
304 	    chan->retrans_timeout) {
305 		l2cap_set_timer(chan, &chan->retrans_timer,
306 				msecs_to_jiffies(chan->retrans_timeout));
307 	}
308 }
309 
__set_monitor_timer(struct l2cap_chan * chan)310 static void __set_monitor_timer(struct l2cap_chan *chan)
311 {
312 	__clear_retrans_timer(chan);
313 	if (chan->monitor_timeout) {
314 		l2cap_set_timer(chan, &chan->monitor_timer,
315 				msecs_to_jiffies(chan->monitor_timeout));
316 	}
317 }
318 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)319 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
320 					       u16 seq)
321 {
322 	struct sk_buff *skb;
323 
324 	skb_queue_walk(head, skb) {
325 		if (bt_cb(skb)->l2cap.txseq == seq)
326 			return skb;
327 	}
328 
329 	return NULL;
330 }
331 
332 /* ---- L2CAP sequence number lists ---- */
333 
334 /* For ERTM, ordered lists of sequence numbers must be tracked for
335  * SREJ requests that are received and for frames that are to be
336  * retransmitted. These seq_list functions implement a singly-linked
337  * list in an array, where membership in the list can also be checked
338  * in constant time. Items can also be added to the tail of the list
339  * and removed from the head in constant time, without further memory
340  * allocs or frees.
341  */
342 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)343 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
344 {
345 	size_t alloc_size, i;
346 
347 	/* Allocated size is a power of 2 to map sequence numbers
348 	 * (which may be up to 14 bits) in to a smaller array that is
349 	 * sized for the negotiated ERTM transmit windows.
350 	 */
351 	alloc_size = roundup_pow_of_two(size);
352 
353 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
354 	if (!seq_list->list)
355 		return -ENOMEM;
356 
357 	seq_list->mask = alloc_size - 1;
358 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
359 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
360 	for (i = 0; i < alloc_size; i++)
361 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
362 
363 	return 0;
364 }
365 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)366 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
367 {
368 	kfree(seq_list->list);
369 }
370 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)371 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
372 					   u16 seq)
373 {
374 	/* Constant-time check for list membership */
375 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
376 }
377 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)378 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
379 {
380 	u16 seq = seq_list->head;
381 	u16 mask = seq_list->mask;
382 
383 	seq_list->head = seq_list->list[seq & mask];
384 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
385 
386 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
387 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 	}
390 
391 	return seq;
392 }
393 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)394 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 {
396 	u16 i;
397 
398 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
399 		return;
400 
401 	for (i = 0; i <= seq_list->mask; i++)
402 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
403 
404 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
405 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
406 }
407 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)408 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
409 {
410 	u16 mask = seq_list->mask;
411 
412 	/* All appends happen in constant time */
413 
414 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
415 		return;
416 
417 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
418 		seq_list->head = seq;
419 	else
420 		seq_list->list[seq_list->tail & mask] = seq;
421 
422 	seq_list->tail = seq;
423 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
424 }
425 
l2cap_chan_timeout(struct work_struct * work)426 static void l2cap_chan_timeout(struct work_struct *work)
427 {
428 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
429 					       chan_timer.work);
430 	struct l2cap_conn *conn = chan->conn;
431 	int reason;
432 
433 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
434 
435 	mutex_lock(&conn->chan_lock);
436 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
437 	 * this work. No need to call l2cap_chan_hold(chan) here again.
438 	 */
439 	l2cap_chan_lock(chan);
440 
441 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
442 		reason = ECONNREFUSED;
443 	else if (chan->state == BT_CONNECT &&
444 		 chan->sec_level != BT_SECURITY_SDP)
445 		reason = ECONNREFUSED;
446 	else
447 		reason = ETIMEDOUT;
448 
449 	l2cap_chan_close(chan, reason);
450 
451 	chan->ops->close(chan);
452 
453 	l2cap_chan_unlock(chan);
454 	l2cap_chan_put(chan);
455 
456 	mutex_unlock(&conn->chan_lock);
457 }
458 
l2cap_chan_create(void)459 struct l2cap_chan *l2cap_chan_create(void)
460 {
461 	struct l2cap_chan *chan;
462 
463 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 	if (!chan)
465 		return NULL;
466 
467 	skb_queue_head_init(&chan->tx_q);
468 	skb_queue_head_init(&chan->srej_q);
469 	mutex_init(&chan->lock);
470 
471 	/* Set default lock nesting level */
472 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
473 
474 	write_lock(&chan_list_lock);
475 	list_add(&chan->global_l, &chan_list);
476 	write_unlock(&chan_list_lock);
477 
478 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
479 
480 	chan->state = BT_OPEN;
481 
482 	kref_init(&chan->kref);
483 
484 	/* This flag is cleared in l2cap_chan_ready() */
485 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
486 
487 	BT_DBG("chan %p", chan);
488 
489 	return chan;
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
492 
l2cap_chan_destroy(struct kref * kref)493 static void l2cap_chan_destroy(struct kref *kref)
494 {
495 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
496 
497 	BT_DBG("chan %p", chan);
498 
499 	write_lock(&chan_list_lock);
500 	list_del(&chan->global_l);
501 	write_unlock(&chan_list_lock);
502 
503 	kfree(chan);
504 }
505 
l2cap_chan_hold(struct l2cap_chan * c)506 void l2cap_chan_hold(struct l2cap_chan *c)
507 {
508 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
509 
510 	kref_get(&c->kref);
511 }
512 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
514 {
515 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
516 
517 	if (!kref_get_unless_zero(&c->kref))
518 		return NULL;
519 
520 	return c;
521 }
522 
l2cap_chan_put(struct l2cap_chan * c)523 void l2cap_chan_put(struct l2cap_chan *c)
524 {
525 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
526 
527 	kref_put(&c->kref, l2cap_chan_destroy);
528 }
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
530 
l2cap_chan_set_defaults(struct l2cap_chan * chan)531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
532 {
533 	chan->fcs  = L2CAP_FCS_CRC16;
534 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 	chan->remote_max_tx = chan->max_tx;
538 	chan->remote_tx_win = chan->tx_win;
539 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 	chan->sec_level = BT_SECURITY_LOW;
541 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
544 
545 	chan->conf_state = 0;
546 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
547 
548 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
549 }
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
551 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
553 {
554 	chan->sdu = NULL;
555 	chan->sdu_last_frag = NULL;
556 	chan->sdu_len = 0;
557 	chan->tx_credits = tx_credits;
558 	/* Derive MPS from connection MTU to stop HCI fragmentation */
559 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
560 	/* Give enough credits for a full packet */
561 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
562 
563 	skb_queue_head_init(&chan->tx_q);
564 }
565 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)566 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
567 {
568 	l2cap_le_flowctl_init(chan, tx_credits);
569 
570 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
571 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
572 		chan->mps = L2CAP_ECRED_MIN_MPS;
573 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
574 	}
575 }
576 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)577 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
578 {
579 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
580 	       __le16_to_cpu(chan->psm), chan->dcid);
581 
582 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
583 
584 	chan->conn = conn;
585 
586 	switch (chan->chan_type) {
587 	case L2CAP_CHAN_CONN_ORIENTED:
588 		/* Alloc CID for connection-oriented socket */
589 		chan->scid = l2cap_alloc_cid(conn);
590 		if (conn->hcon->type == ACL_LINK)
591 			chan->omtu = L2CAP_DEFAULT_MTU;
592 		break;
593 
594 	case L2CAP_CHAN_CONN_LESS:
595 		/* Connectionless socket */
596 		chan->scid = L2CAP_CID_CONN_LESS;
597 		chan->dcid = L2CAP_CID_CONN_LESS;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 		break;
600 
601 	case L2CAP_CHAN_FIXED:
602 		/* Caller will set CID and CID specific MTU values */
603 		break;
604 
605 	default:
606 		/* Raw socket can send/recv signalling messages only */
607 		chan->scid = L2CAP_CID_SIGNALING;
608 		chan->dcid = L2CAP_CID_SIGNALING;
609 		chan->omtu = L2CAP_DEFAULT_MTU;
610 	}
611 
612 	chan->local_id		= L2CAP_BESTEFFORT_ID;
613 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
614 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
615 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
616 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
617 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
618 
619 	l2cap_chan_hold(chan);
620 
621 	/* Only keep a reference for fixed channels if they requested it */
622 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 		hci_conn_hold(conn->hcon);
625 
626 	list_add(&chan->list, &conn->chan_l);
627 }
628 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)629 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
630 {
631 	mutex_lock(&conn->chan_lock);
632 	__l2cap_chan_add(conn, chan);
633 	mutex_unlock(&conn->chan_lock);
634 }
635 
l2cap_chan_del(struct l2cap_chan * chan,int err)636 void l2cap_chan_del(struct l2cap_chan *chan, int err)
637 {
638 	struct l2cap_conn *conn = chan->conn;
639 
640 	__clear_chan_timer(chan);
641 
642 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
643 	       state_to_string(chan->state));
644 
645 	chan->ops->teardown(chan, err);
646 
647 	if (conn) {
648 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
649 		/* Delete from channel list */
650 		list_del(&chan->list);
651 
652 		l2cap_chan_put(chan);
653 
654 		chan->conn = NULL;
655 
656 		/* Reference was only held for non-fixed channels or
657 		 * fixed channels that explicitly requested it using the
658 		 * FLAG_HOLD_HCI_CONN flag.
659 		 */
660 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
661 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
662 			hci_conn_drop(conn->hcon);
663 
664 		if (mgr && mgr->bredr_chan == chan)
665 			mgr->bredr_chan = NULL;
666 	}
667 
668 	if (chan->hs_hchan) {
669 		struct hci_chan *hs_hchan = chan->hs_hchan;
670 
671 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
672 		amp_disconnect_logical_link(hs_hchan);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch(chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 
703 	return;
704 }
705 EXPORT_SYMBOL_GPL(l2cap_chan_del);
706 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)707 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
708 			      void *data)
709 {
710 	struct l2cap_chan *chan;
711 
712 	list_for_each_entry(chan, &conn->chan_l, list) {
713 		func(chan, data);
714 	}
715 }
716 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)717 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
718 		     void *data)
719 {
720 	if (!conn)
721 		return;
722 
723 	mutex_lock(&conn->chan_lock);
724 	__l2cap_chan_list(conn, func, data);
725 	mutex_unlock(&conn->chan_lock);
726 }
727 
728 EXPORT_SYMBOL_GPL(l2cap_chan_list);
729 
l2cap_conn_update_id_addr(struct work_struct * work)730 static void l2cap_conn_update_id_addr(struct work_struct *work)
731 {
732 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
733 					       id_addr_update_work);
734 	struct hci_conn *hcon = conn->hcon;
735 	struct l2cap_chan *chan;
736 
737 	mutex_lock(&conn->chan_lock);
738 
739 	list_for_each_entry(chan, &conn->chan_l, list) {
740 		l2cap_chan_lock(chan);
741 		bacpy(&chan->dst, &hcon->dst);
742 		chan->dst_type = bdaddr_dst_type(hcon);
743 		l2cap_chan_unlock(chan);
744 	}
745 
746 	mutex_unlock(&conn->chan_lock);
747 }
748 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)749 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
750 {
751 	struct l2cap_conn *conn = chan->conn;
752 	struct l2cap_le_conn_rsp rsp;
753 	u16 result;
754 
755 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
756 		result = L2CAP_CR_LE_AUTHORIZATION;
757 	else
758 		result = L2CAP_CR_LE_BAD_PSM;
759 
760 	l2cap_state_change(chan, BT_DISCONN);
761 
762 	rsp.dcid    = cpu_to_le16(chan->scid);
763 	rsp.mtu     = cpu_to_le16(chan->imtu);
764 	rsp.mps     = cpu_to_le16(chan->mps);
765 	rsp.credits = cpu_to_le16(chan->rx_credits);
766 	rsp.result  = cpu_to_le16(result);
767 
768 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
769 		       &rsp);
770 }
771 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)772 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
773 {
774 	struct l2cap_conn *conn = chan->conn;
775 	struct l2cap_ecred_conn_rsp rsp;
776 	u16 result;
777 
778 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
779 		result = L2CAP_CR_LE_AUTHORIZATION;
780 	else
781 		result = L2CAP_CR_LE_BAD_PSM;
782 
783 	l2cap_state_change(chan, BT_DISCONN);
784 
785 	memset(&rsp, 0, sizeof(rsp));
786 
787 	rsp.result  = cpu_to_le16(result);
788 
789 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
790 		       &rsp);
791 }
792 
l2cap_chan_connect_reject(struct l2cap_chan * chan)793 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
794 {
795 	struct l2cap_conn *conn = chan->conn;
796 	struct l2cap_conn_rsp rsp;
797 	u16 result;
798 
799 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
800 		result = L2CAP_CR_SEC_BLOCK;
801 	else
802 		result = L2CAP_CR_BAD_PSM;
803 
804 	l2cap_state_change(chan, BT_DISCONN);
805 
806 	rsp.scid   = cpu_to_le16(chan->dcid);
807 	rsp.dcid   = cpu_to_le16(chan->scid);
808 	rsp.result = cpu_to_le16(result);
809 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 
811 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
812 }
813 
l2cap_chan_close(struct l2cap_chan * chan,int reason)814 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
815 {
816 	struct l2cap_conn *conn = chan->conn;
817 
818 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
819 
820 	switch (chan->state) {
821 	case BT_LISTEN:
822 		chan->ops->teardown(chan, 0);
823 		break;
824 
825 	case BT_CONNECTED:
826 	case BT_CONFIG:
827 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
828 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
829 			l2cap_send_disconn_req(chan, reason);
830 		} else
831 			l2cap_chan_del(chan, reason);
832 		break;
833 
834 	case BT_CONNECT2:
835 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
836 			if (conn->hcon->type == ACL_LINK)
837 				l2cap_chan_connect_reject(chan);
838 			else if (conn->hcon->type == LE_LINK) {
839 				switch (chan->mode) {
840 				case L2CAP_MODE_LE_FLOWCTL:
841 					l2cap_chan_le_connect_reject(chan);
842 					break;
843 				case L2CAP_MODE_EXT_FLOWCTL:
844 					l2cap_chan_ecred_connect_reject(chan);
845 					break;
846 				}
847 			}
848 		}
849 
850 		l2cap_chan_del(chan, reason);
851 		break;
852 
853 	case BT_CONNECT:
854 	case BT_DISCONN:
855 		l2cap_chan_del(chan, reason);
856 		break;
857 
858 	default:
859 		chan->ops->teardown(chan, 0);
860 		break;
861 	}
862 }
863 EXPORT_SYMBOL(l2cap_chan_close);
864 
l2cap_get_auth_type(struct l2cap_chan * chan)865 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
866 {
867 	switch (chan->chan_type) {
868 	case L2CAP_CHAN_RAW:
869 		switch (chan->sec_level) {
870 		case BT_SECURITY_HIGH:
871 		case BT_SECURITY_FIPS:
872 			return HCI_AT_DEDICATED_BONDING_MITM;
873 		case BT_SECURITY_MEDIUM:
874 			return HCI_AT_DEDICATED_BONDING;
875 		default:
876 			return HCI_AT_NO_BONDING;
877 		}
878 		break;
879 	case L2CAP_CHAN_CONN_LESS:
880 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
881 			if (chan->sec_level == BT_SECURITY_LOW)
882 				chan->sec_level = BT_SECURITY_SDP;
883 		}
884 		if (chan->sec_level == BT_SECURITY_HIGH ||
885 		    chan->sec_level == BT_SECURITY_FIPS)
886 			return HCI_AT_NO_BONDING_MITM;
887 		else
888 			return HCI_AT_NO_BONDING;
889 		break;
890 	case L2CAP_CHAN_CONN_ORIENTED:
891 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
892 			if (chan->sec_level == BT_SECURITY_LOW)
893 				chan->sec_level = BT_SECURITY_SDP;
894 
895 			if (chan->sec_level == BT_SECURITY_HIGH ||
896 			    chan->sec_level == BT_SECURITY_FIPS)
897 				return HCI_AT_NO_BONDING_MITM;
898 			else
899 				return HCI_AT_NO_BONDING;
900 		}
901 		fallthrough;
902 
903 	default:
904 		switch (chan->sec_level) {
905 		case BT_SECURITY_HIGH:
906 		case BT_SECURITY_FIPS:
907 			return HCI_AT_GENERAL_BONDING_MITM;
908 		case BT_SECURITY_MEDIUM:
909 			return HCI_AT_GENERAL_BONDING;
910 		default:
911 			return HCI_AT_NO_BONDING;
912 		}
913 		break;
914 	}
915 }
916 
917 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)918 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
919 {
920 	struct l2cap_conn *conn = chan->conn;
921 	__u8 auth_type;
922 
923 	if (conn->hcon->type == LE_LINK)
924 		return smp_conn_security(conn->hcon, chan->sec_level);
925 
926 	auth_type = l2cap_get_auth_type(chan);
927 
928 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
929 				 initiator);
930 }
931 
l2cap_get_ident(struct l2cap_conn * conn)932 static u8 l2cap_get_ident(struct l2cap_conn *conn)
933 {
934 	u8 id;
935 
936 	/* Get next available identificator.
937 	 *    1 - 128 are used by kernel.
938 	 *  129 - 199 are reserved.
939 	 *  200 - 254 are used by utilities like l2ping, etc.
940 	 */
941 
942 	mutex_lock(&conn->ident_lock);
943 
944 	if (++conn->tx_ident > 128)
945 		conn->tx_ident = 1;
946 
947 	id = conn->tx_ident;
948 
949 	mutex_unlock(&conn->ident_lock);
950 
951 	return id;
952 }
953 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)954 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
955 			   void *data)
956 {
957 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
958 	u8 flags;
959 
960 	BT_DBG("code 0x%2.2x", code);
961 
962 	if (!skb)
963 		return;
964 
965 	/* Use NO_FLUSH if supported or we have an LE link (which does
966 	 * not support auto-flushing packets) */
967 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
968 	    conn->hcon->type == LE_LINK)
969 		flags = ACL_START_NO_FLUSH;
970 	else
971 		flags = ACL_START;
972 
973 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
974 	skb->priority = HCI_PRIO_MAX;
975 
976 	hci_send_acl(conn->hchan, skb, flags);
977 }
978 
__chan_is_moving(struct l2cap_chan * chan)979 static bool __chan_is_moving(struct l2cap_chan *chan)
980 {
981 	return chan->move_state != L2CAP_MOVE_STABLE &&
982 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
983 }
984 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)985 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
986 {
987 	struct hci_conn *hcon = chan->conn->hcon;
988 	u16 flags;
989 
990 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
991 	       skb->priority);
992 
993 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
994 		if (chan->hs_hchan)
995 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
996 		else
997 			kfree_skb(skb);
998 
999 		return;
1000 	}
1001 
1002 	/* Use NO_FLUSH for LE links (where this is the only option) or
1003 	 * if the BR/EDR link supports it and flushing has not been
1004 	 * explicitly requested (through FLAG_FLUSHABLE).
1005 	 */
1006 	if (hcon->type == LE_LINK ||
1007 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1008 	     lmp_no_flush_capable(hcon->hdev)))
1009 		flags = ACL_START_NO_FLUSH;
1010 	else
1011 		flags = ACL_START;
1012 
1013 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1014 	hci_send_acl(chan->conn->hchan, skb, flags);
1015 }
1016 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1017 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1018 {
1019 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1020 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1021 
1022 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1023 		/* S-Frame */
1024 		control->sframe = 1;
1025 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1026 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1027 
1028 		control->sar = 0;
1029 		control->txseq = 0;
1030 	} else {
1031 		/* I-Frame */
1032 		control->sframe = 0;
1033 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1034 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1035 
1036 		control->poll = 0;
1037 		control->super = 0;
1038 	}
1039 }
1040 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1041 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1042 {
1043 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1044 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1045 
1046 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1047 		/* S-Frame */
1048 		control->sframe = 1;
1049 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1050 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1051 
1052 		control->sar = 0;
1053 		control->txseq = 0;
1054 	} else {
1055 		/* I-Frame */
1056 		control->sframe = 0;
1057 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1058 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1059 
1060 		control->poll = 0;
1061 		control->super = 0;
1062 	}
1063 }
1064 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1065 static inline void __unpack_control(struct l2cap_chan *chan,
1066 				    struct sk_buff *skb)
1067 {
1068 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1069 		__unpack_extended_control(get_unaligned_le32(skb->data),
1070 					  &bt_cb(skb)->l2cap);
1071 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1072 	} else {
1073 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1076 	}
1077 }
1078 
__pack_extended_control(struct l2cap_ctrl * control)1079 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1080 {
1081 	u32 packed;
1082 
1083 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1084 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1085 
1086 	if (control->sframe) {
1087 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1088 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1089 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1090 	} else {
1091 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1092 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1093 	}
1094 
1095 	return packed;
1096 }
1097 
__pack_enhanced_control(struct l2cap_ctrl * control)1098 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1099 {
1100 	u16 packed;
1101 
1102 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1103 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1104 
1105 	if (control->sframe) {
1106 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1107 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1108 		packed |= L2CAP_CTRL_FRAME_TYPE;
1109 	} else {
1110 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1111 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1112 	}
1113 
1114 	return packed;
1115 }
1116 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1117 static inline void __pack_control(struct l2cap_chan *chan,
1118 				  struct l2cap_ctrl *control,
1119 				  struct sk_buff *skb)
1120 {
1121 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1122 		put_unaligned_le32(__pack_extended_control(control),
1123 				   skb->data + L2CAP_HDR_SIZE);
1124 	} else {
1125 		put_unaligned_le16(__pack_enhanced_control(control),
1126 				   skb->data + L2CAP_HDR_SIZE);
1127 	}
1128 }
1129 
__ertm_hdr_size(struct l2cap_chan * chan)1130 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1131 {
1132 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1133 		return L2CAP_EXT_HDR_SIZE;
1134 	else
1135 		return L2CAP_ENH_HDR_SIZE;
1136 }
1137 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1138 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1139 					       u32 control)
1140 {
1141 	struct sk_buff *skb;
1142 	struct l2cap_hdr *lh;
1143 	int hlen = __ertm_hdr_size(chan);
1144 
1145 	if (chan->fcs == L2CAP_FCS_CRC16)
1146 		hlen += L2CAP_FCS_SIZE;
1147 
1148 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1149 
1150 	if (!skb)
1151 		return ERR_PTR(-ENOMEM);
1152 
1153 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1154 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1155 	lh->cid = cpu_to_le16(chan->dcid);
1156 
1157 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1158 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1159 	else
1160 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1161 
1162 	if (chan->fcs == L2CAP_FCS_CRC16) {
1163 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1164 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1165 	}
1166 
1167 	skb->priority = HCI_PRIO_MAX;
1168 	return skb;
1169 }
1170 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1171 static void l2cap_send_sframe(struct l2cap_chan *chan,
1172 			      struct l2cap_ctrl *control)
1173 {
1174 	struct sk_buff *skb;
1175 	u32 control_field;
1176 
1177 	BT_DBG("chan %p, control %p", chan, control);
1178 
1179 	if (!control->sframe)
1180 		return;
1181 
1182 	if (__chan_is_moving(chan))
1183 		return;
1184 
1185 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1186 	    !control->poll)
1187 		control->final = 1;
1188 
1189 	if (control->super == L2CAP_SUPER_RR)
1190 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1191 	else if (control->super == L2CAP_SUPER_RNR)
1192 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1193 
1194 	if (control->super != L2CAP_SUPER_SREJ) {
1195 		chan->last_acked_seq = control->reqseq;
1196 		__clear_ack_timer(chan);
1197 	}
1198 
1199 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1200 	       control->final, control->poll, control->super);
1201 
1202 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1203 		control_field = __pack_extended_control(control);
1204 	else
1205 		control_field = __pack_enhanced_control(control);
1206 
1207 	skb = l2cap_create_sframe_pdu(chan, control_field);
1208 	if (!IS_ERR(skb))
1209 		l2cap_do_send(chan, skb);
1210 }
1211 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1212 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1213 {
1214 	struct l2cap_ctrl control;
1215 
1216 	BT_DBG("chan %p, poll %d", chan, poll);
1217 
1218 	memset(&control, 0, sizeof(control));
1219 	control.sframe = 1;
1220 	control.poll = poll;
1221 
1222 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1223 		control.super = L2CAP_SUPER_RNR;
1224 	else
1225 		control.super = L2CAP_SUPER_RR;
1226 
1227 	control.reqseq = chan->buffer_seq;
1228 	l2cap_send_sframe(chan, &control);
1229 }
1230 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1231 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1232 {
1233 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1234 		return true;
1235 
1236 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1237 }
1238 
__amp_capable(struct l2cap_chan * chan)1239 static bool __amp_capable(struct l2cap_chan *chan)
1240 {
1241 	struct l2cap_conn *conn = chan->conn;
1242 	struct hci_dev *hdev;
1243 	bool amp_available = false;
1244 
1245 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1246 		return false;
1247 
1248 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1249 		return false;
1250 
1251 	read_lock(&hci_dev_list_lock);
1252 	list_for_each_entry(hdev, &hci_dev_list, list) {
1253 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1254 		    test_bit(HCI_UP, &hdev->flags)) {
1255 			amp_available = true;
1256 			break;
1257 		}
1258 	}
1259 	read_unlock(&hci_dev_list_lock);
1260 
1261 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1262 		return amp_available;
1263 
1264 	return false;
1265 }
1266 
l2cap_check_efs(struct l2cap_chan * chan)1267 static bool l2cap_check_efs(struct l2cap_chan *chan)
1268 {
1269 	/* Check EFS parameters */
1270 	return true;
1271 }
1272 
l2cap_send_conn_req(struct l2cap_chan * chan)1273 void l2cap_send_conn_req(struct l2cap_chan *chan)
1274 {
1275 	struct l2cap_conn *conn = chan->conn;
1276 	struct l2cap_conn_req req;
1277 
1278 	req.scid = cpu_to_le16(chan->scid);
1279 	req.psm  = chan->psm;
1280 
1281 	chan->ident = l2cap_get_ident(conn);
1282 
1283 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1284 
1285 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1286 }
1287 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1288 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1289 {
1290 	struct l2cap_create_chan_req req;
1291 	req.scid = cpu_to_le16(chan->scid);
1292 	req.psm  = chan->psm;
1293 	req.amp_id = amp_id;
1294 
1295 	chan->ident = l2cap_get_ident(chan->conn);
1296 
1297 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1298 		       sizeof(req), &req);
1299 }
1300 
l2cap_move_setup(struct l2cap_chan * chan)1301 static void l2cap_move_setup(struct l2cap_chan *chan)
1302 {
1303 	struct sk_buff *skb;
1304 
1305 	BT_DBG("chan %p", chan);
1306 
1307 	if (chan->mode != L2CAP_MODE_ERTM)
1308 		return;
1309 
1310 	__clear_retrans_timer(chan);
1311 	__clear_monitor_timer(chan);
1312 	__clear_ack_timer(chan);
1313 
1314 	chan->retry_count = 0;
1315 	skb_queue_walk(&chan->tx_q, skb) {
1316 		if (bt_cb(skb)->l2cap.retries)
1317 			bt_cb(skb)->l2cap.retries = 1;
1318 		else
1319 			break;
1320 	}
1321 
1322 	chan->expected_tx_seq = chan->buffer_seq;
1323 
1324 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1325 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1326 	l2cap_seq_list_clear(&chan->retrans_list);
1327 	l2cap_seq_list_clear(&chan->srej_list);
1328 	skb_queue_purge(&chan->srej_q);
1329 
1330 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1331 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1332 
1333 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1334 }
1335 
l2cap_move_done(struct l2cap_chan * chan)1336 static void l2cap_move_done(struct l2cap_chan *chan)
1337 {
1338 	u8 move_role = chan->move_role;
1339 	BT_DBG("chan %p", chan);
1340 
1341 	chan->move_state = L2CAP_MOVE_STABLE;
1342 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1343 
1344 	if (chan->mode != L2CAP_MODE_ERTM)
1345 		return;
1346 
1347 	switch (move_role) {
1348 	case L2CAP_MOVE_ROLE_INITIATOR:
1349 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1350 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1351 		break;
1352 	case L2CAP_MOVE_ROLE_RESPONDER:
1353 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1354 		break;
1355 	}
1356 }
1357 
l2cap_chan_ready(struct l2cap_chan * chan)1358 static void l2cap_chan_ready(struct l2cap_chan *chan)
1359 {
1360 	/* The channel may have already been flagged as connected in
1361 	 * case of receiving data before the L2CAP info req/rsp
1362 	 * procedure is complete.
1363 	 */
1364 	if (chan->state == BT_CONNECTED)
1365 		return;
1366 
1367 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1368 	chan->conf_state = 0;
1369 	__clear_chan_timer(chan);
1370 
1371 	switch (chan->mode) {
1372 	case L2CAP_MODE_LE_FLOWCTL:
1373 	case L2CAP_MODE_EXT_FLOWCTL:
1374 		if (!chan->tx_credits)
1375 			chan->ops->suspend(chan);
1376 		break;
1377 	}
1378 
1379 	chan->state = BT_CONNECTED;
1380 
1381 	chan->ops->ready(chan);
1382 }
1383 
l2cap_le_connect(struct l2cap_chan * chan)1384 static void l2cap_le_connect(struct l2cap_chan *chan)
1385 {
1386 	struct l2cap_conn *conn = chan->conn;
1387 	struct l2cap_le_conn_req req;
1388 
1389 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1390 		return;
1391 
1392 	if (!chan->imtu)
1393 		chan->imtu = chan->conn->mtu;
1394 
1395 	l2cap_le_flowctl_init(chan, 0);
1396 
1397 	req.psm     = chan->psm;
1398 	req.scid    = cpu_to_le16(chan->scid);
1399 	req.mtu     = cpu_to_le16(chan->imtu);
1400 	req.mps     = cpu_to_le16(chan->mps);
1401 	req.credits = cpu_to_le16(chan->rx_credits);
1402 
1403 	chan->ident = l2cap_get_ident(conn);
1404 
1405 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1406 		       sizeof(req), &req);
1407 }
1408 
1409 struct l2cap_ecred_conn_data {
1410 	struct {
1411 		struct l2cap_ecred_conn_req req;
1412 		__le16 scid[5];
1413 	} __packed pdu;
1414 	struct l2cap_chan *chan;
1415 	struct pid *pid;
1416 	int count;
1417 };
1418 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1419 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1420 {
1421 	struct l2cap_ecred_conn_data *conn = data;
1422 	struct pid *pid;
1423 
1424 	if (chan == conn->chan)
1425 		return;
1426 
1427 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1428 		return;
1429 
1430 	pid = chan->ops->get_peer_pid(chan);
1431 
1432 	/* Only add deferred channels with the same PID/PSM */
1433 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1434 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1435 		return;
1436 
1437 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1438 		return;
1439 
1440 	l2cap_ecred_init(chan, 0);
1441 
1442 	/* Set the same ident so we can match on the rsp */
1443 	chan->ident = conn->chan->ident;
1444 
1445 	/* Include all channels deferred */
1446 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1447 
1448 	conn->count++;
1449 }
1450 
l2cap_ecred_connect(struct l2cap_chan * chan)1451 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1452 {
1453 	struct l2cap_conn *conn = chan->conn;
1454 	struct l2cap_ecred_conn_data data;
1455 
1456 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1457 		return;
1458 
1459 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1460 		return;
1461 
1462 	l2cap_ecred_init(chan, 0);
1463 
1464 	data.pdu.req.psm     = chan->psm;
1465 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1466 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1467 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1468 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1469 
1470 	chan->ident = l2cap_get_ident(conn);
1471 	data.pid = chan->ops->get_peer_pid(chan);
1472 
1473 	data.count = 1;
1474 	data.chan = chan;
1475 	data.pid = chan->ops->get_peer_pid(chan);
1476 
1477 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1478 
1479 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1480 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1481 		       &data.pdu);
1482 }
1483 
l2cap_le_start(struct l2cap_chan * chan)1484 static void l2cap_le_start(struct l2cap_chan *chan)
1485 {
1486 	struct l2cap_conn *conn = chan->conn;
1487 
1488 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1489 		return;
1490 
1491 	if (!chan->psm) {
1492 		l2cap_chan_ready(chan);
1493 		return;
1494 	}
1495 
1496 	if (chan->state == BT_CONNECT) {
1497 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1498 			l2cap_ecred_connect(chan);
1499 		else
1500 			l2cap_le_connect(chan);
1501 	}
1502 }
1503 
l2cap_start_connection(struct l2cap_chan * chan)1504 static void l2cap_start_connection(struct l2cap_chan *chan)
1505 {
1506 	if (__amp_capable(chan)) {
1507 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1508 		a2mp_discover_amp(chan);
1509 	} else if (chan->conn->hcon->type == LE_LINK) {
1510 		l2cap_le_start(chan);
1511 	} else {
1512 		l2cap_send_conn_req(chan);
1513 	}
1514 }
1515 
l2cap_request_info(struct l2cap_conn * conn)1516 static void l2cap_request_info(struct l2cap_conn *conn)
1517 {
1518 	struct l2cap_info_req req;
1519 
1520 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1521 		return;
1522 
1523 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1524 
1525 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1526 	conn->info_ident = l2cap_get_ident(conn);
1527 
1528 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1529 
1530 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1531 		       sizeof(req), &req);
1532 }
1533 
l2cap_check_enc_key_size(struct hci_conn * hcon)1534 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1535 {
1536 	/* The minimum encryption key size needs to be enforced by the
1537 	 * host stack before establishing any L2CAP connections. The
1538 	 * specification in theory allows a minimum of 1, but to align
1539 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1540 	 *
1541 	 * This check might also be called for unencrypted connections
1542 	 * that have no key size requirements. Ensure that the link is
1543 	 * actually encrypted before enforcing a key size.
1544 	 */
1545 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1546 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1547 }
1548 
l2cap_do_start(struct l2cap_chan * chan)1549 static void l2cap_do_start(struct l2cap_chan *chan)
1550 {
1551 	struct l2cap_conn *conn = chan->conn;
1552 
1553 	if (conn->hcon->type == LE_LINK) {
1554 		l2cap_le_start(chan);
1555 		return;
1556 	}
1557 
1558 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1559 		l2cap_request_info(conn);
1560 		return;
1561 	}
1562 
1563 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1564 		return;
1565 
1566 	if (!l2cap_chan_check_security(chan, true) ||
1567 	    !__l2cap_no_conn_pending(chan))
1568 		return;
1569 
1570 	if (l2cap_check_enc_key_size(conn->hcon))
1571 		l2cap_start_connection(chan);
1572 	else
1573 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1574 }
1575 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1576 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1577 {
1578 	u32 local_feat_mask = l2cap_feat_mask;
1579 	if (!disable_ertm)
1580 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1581 
1582 	switch (mode) {
1583 	case L2CAP_MODE_ERTM:
1584 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1585 	case L2CAP_MODE_STREAMING:
1586 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1587 	default:
1588 		return 0x00;
1589 	}
1590 }
1591 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1592 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1593 {
1594 	struct l2cap_conn *conn = chan->conn;
1595 	struct l2cap_disconn_req req;
1596 
1597 	if (!conn)
1598 		return;
1599 
1600 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1601 		__clear_retrans_timer(chan);
1602 		__clear_monitor_timer(chan);
1603 		__clear_ack_timer(chan);
1604 	}
1605 
1606 	if (chan->scid == L2CAP_CID_A2MP) {
1607 		l2cap_state_change(chan, BT_DISCONN);
1608 		return;
1609 	}
1610 
1611 	req.dcid = cpu_to_le16(chan->dcid);
1612 	req.scid = cpu_to_le16(chan->scid);
1613 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1614 		       sizeof(req), &req);
1615 
1616 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1617 }
1618 
1619 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1620 static void l2cap_conn_start(struct l2cap_conn *conn)
1621 {
1622 	struct l2cap_chan *chan, *tmp;
1623 
1624 	BT_DBG("conn %p", conn);
1625 
1626 	mutex_lock(&conn->chan_lock);
1627 
1628 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1629 		l2cap_chan_lock(chan);
1630 
1631 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1632 			l2cap_chan_ready(chan);
1633 			l2cap_chan_unlock(chan);
1634 			continue;
1635 		}
1636 
1637 		if (chan->state == BT_CONNECT) {
1638 			if (!l2cap_chan_check_security(chan, true) ||
1639 			    !__l2cap_no_conn_pending(chan)) {
1640 				l2cap_chan_unlock(chan);
1641 				continue;
1642 			}
1643 
1644 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1645 			    && test_bit(CONF_STATE2_DEVICE,
1646 					&chan->conf_state)) {
1647 				l2cap_chan_close(chan, ECONNRESET);
1648 				l2cap_chan_unlock(chan);
1649 				continue;
1650 			}
1651 
1652 			if (l2cap_check_enc_key_size(conn->hcon))
1653 				l2cap_start_connection(chan);
1654 			else
1655 				l2cap_chan_close(chan, ECONNREFUSED);
1656 
1657 		} else if (chan->state == BT_CONNECT2) {
1658 			struct l2cap_conn_rsp rsp;
1659 			char buf[128];
1660 			rsp.scid = cpu_to_le16(chan->dcid);
1661 			rsp.dcid = cpu_to_le16(chan->scid);
1662 
1663 			if (l2cap_chan_check_security(chan, false)) {
1664 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1665 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1666 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1667 					chan->ops->defer(chan);
1668 
1669 				} else {
1670 					l2cap_state_change(chan, BT_CONFIG);
1671 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1672 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1673 				}
1674 			} else {
1675 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1676 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1677 			}
1678 
1679 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1680 				       sizeof(rsp), &rsp);
1681 
1682 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1683 			    rsp.result != L2CAP_CR_SUCCESS) {
1684 				l2cap_chan_unlock(chan);
1685 				continue;
1686 			}
1687 
1688 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1689 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1690 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1691 			chan->num_conf_req++;
1692 		}
1693 
1694 		l2cap_chan_unlock(chan);
1695 	}
1696 
1697 	mutex_unlock(&conn->chan_lock);
1698 }
1699 
l2cap_le_conn_ready(struct l2cap_conn * conn)1700 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1701 {
1702 	struct hci_conn *hcon = conn->hcon;
1703 	struct hci_dev *hdev = hcon->hdev;
1704 
1705 	BT_DBG("%s conn %p", hdev->name, conn);
1706 
1707 	/* For outgoing pairing which doesn't necessarily have an
1708 	 * associated socket (e.g. mgmt_pair_device).
1709 	 */
1710 	if (hcon->out)
1711 		smp_conn_security(hcon, hcon->pending_sec_level);
1712 
1713 	/* For LE slave connections, make sure the connection interval
1714 	 * is in the range of the minium and maximum interval that has
1715 	 * been configured for this connection. If not, then trigger
1716 	 * the connection update procedure.
1717 	 */
1718 	if (hcon->role == HCI_ROLE_SLAVE &&
1719 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1720 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1721 		struct l2cap_conn_param_update_req req;
1722 
1723 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1724 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1725 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1726 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1727 
1728 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1729 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1730 	}
1731 }
1732 
l2cap_conn_ready(struct l2cap_conn * conn)1733 static void l2cap_conn_ready(struct l2cap_conn *conn)
1734 {
1735 	struct l2cap_chan *chan;
1736 	struct hci_conn *hcon = conn->hcon;
1737 
1738 	BT_DBG("conn %p", conn);
1739 
1740 	if (hcon->type == ACL_LINK)
1741 		l2cap_request_info(conn);
1742 
1743 	mutex_lock(&conn->chan_lock);
1744 
1745 	list_for_each_entry(chan, &conn->chan_l, list) {
1746 
1747 		l2cap_chan_lock(chan);
1748 
1749 		if (chan->scid == L2CAP_CID_A2MP) {
1750 			l2cap_chan_unlock(chan);
1751 			continue;
1752 		}
1753 
1754 		if (hcon->type == LE_LINK) {
1755 			l2cap_le_start(chan);
1756 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1757 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1758 				l2cap_chan_ready(chan);
1759 		} else if (chan->state == BT_CONNECT) {
1760 			l2cap_do_start(chan);
1761 		}
1762 
1763 		l2cap_chan_unlock(chan);
1764 	}
1765 
1766 	mutex_unlock(&conn->chan_lock);
1767 
1768 	if (hcon->type == LE_LINK)
1769 		l2cap_le_conn_ready(conn);
1770 
1771 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1772 }
1773 
1774 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1775 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1776 {
1777 	struct l2cap_chan *chan;
1778 
1779 	BT_DBG("conn %p", conn);
1780 
1781 	mutex_lock(&conn->chan_lock);
1782 
1783 	list_for_each_entry(chan, &conn->chan_l, list) {
1784 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1785 			l2cap_chan_set_err(chan, err);
1786 	}
1787 
1788 	mutex_unlock(&conn->chan_lock);
1789 }
1790 
l2cap_info_timeout(struct work_struct * work)1791 static void l2cap_info_timeout(struct work_struct *work)
1792 {
1793 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1794 					       info_timer.work);
1795 
1796 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1797 	conn->info_ident = 0;
1798 
1799 	l2cap_conn_start(conn);
1800 }
1801 
1802 /*
1803  * l2cap_user
1804  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1805  * callback is called during registration. The ->remove callback is called
1806  * during unregistration.
1807  * An l2cap_user object can either be explicitly unregistered or when the
1808  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1809  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1810  * External modules must own a reference to the l2cap_conn object if they intend
1811  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1812  * any time if they don't.
1813  */
1814 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1815 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1816 {
1817 	struct hci_dev *hdev = conn->hcon->hdev;
1818 	int ret;
1819 
1820 	/* We need to check whether l2cap_conn is registered. If it is not, we
1821 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1822 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1823 	 * relies on the parent hci_conn object to be locked. This itself relies
1824 	 * on the hci_dev object to be locked. So we must lock the hci device
1825 	 * here, too. */
1826 
1827 	hci_dev_lock(hdev);
1828 
1829 	if (!list_empty(&user->list)) {
1830 		ret = -EINVAL;
1831 		goto out_unlock;
1832 	}
1833 
1834 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1835 	if (!conn->hchan) {
1836 		ret = -ENODEV;
1837 		goto out_unlock;
1838 	}
1839 
1840 	ret = user->probe(conn, user);
1841 	if (ret)
1842 		goto out_unlock;
1843 
1844 	list_add(&user->list, &conn->users);
1845 	ret = 0;
1846 
1847 out_unlock:
1848 	hci_dev_unlock(hdev);
1849 	return ret;
1850 }
1851 EXPORT_SYMBOL(l2cap_register_user);
1852 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1853 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1854 {
1855 	struct hci_dev *hdev = conn->hcon->hdev;
1856 
1857 	hci_dev_lock(hdev);
1858 
1859 	if (list_empty(&user->list))
1860 		goto out_unlock;
1861 
1862 	list_del_init(&user->list);
1863 	user->remove(conn, user);
1864 
1865 out_unlock:
1866 	hci_dev_unlock(hdev);
1867 }
1868 EXPORT_SYMBOL(l2cap_unregister_user);
1869 
l2cap_unregister_all_users(struct l2cap_conn * conn)1870 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1871 {
1872 	struct l2cap_user *user;
1873 
1874 	while (!list_empty(&conn->users)) {
1875 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1876 		list_del_init(&user->list);
1877 		user->remove(conn, user);
1878 	}
1879 }
1880 
l2cap_conn_del(struct hci_conn * hcon,int err)1881 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1882 {
1883 	struct l2cap_conn *conn = hcon->l2cap_data;
1884 	struct l2cap_chan *chan, *l;
1885 
1886 	if (!conn)
1887 		return;
1888 
1889 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1890 
1891 	kfree_skb(conn->rx_skb);
1892 
1893 	skb_queue_purge(&conn->pending_rx);
1894 
1895 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1896 	 * might block if we are running on a worker from the same workqueue
1897 	 * pending_rx_work is waiting on.
1898 	 */
1899 	if (work_pending(&conn->pending_rx_work))
1900 		cancel_work_sync(&conn->pending_rx_work);
1901 
1902 	if (work_pending(&conn->id_addr_update_work))
1903 		cancel_work_sync(&conn->id_addr_update_work);
1904 
1905 	l2cap_unregister_all_users(conn);
1906 
1907 	/* Force the connection to be immediately dropped */
1908 	hcon->disc_timeout = 0;
1909 
1910 	mutex_lock(&conn->chan_lock);
1911 
1912 	/* Kill channels */
1913 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1914 		l2cap_chan_hold(chan);
1915 		l2cap_chan_lock(chan);
1916 
1917 		l2cap_chan_del(chan, err);
1918 
1919 		chan->ops->close(chan);
1920 
1921 		l2cap_chan_unlock(chan);
1922 		l2cap_chan_put(chan);
1923 	}
1924 
1925 	mutex_unlock(&conn->chan_lock);
1926 
1927 	hci_chan_del(conn->hchan);
1928 
1929 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1930 		cancel_delayed_work_sync(&conn->info_timer);
1931 
1932 	hcon->l2cap_data = NULL;
1933 	conn->hchan = NULL;
1934 	l2cap_conn_put(conn);
1935 }
1936 
l2cap_conn_free(struct kref * ref)1937 static void l2cap_conn_free(struct kref *ref)
1938 {
1939 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1940 
1941 	hci_conn_put(conn->hcon);
1942 	kfree(conn);
1943 }
1944 
l2cap_conn_get(struct l2cap_conn * conn)1945 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1946 {
1947 	kref_get(&conn->ref);
1948 	return conn;
1949 }
1950 EXPORT_SYMBOL(l2cap_conn_get);
1951 
l2cap_conn_put(struct l2cap_conn * conn)1952 void l2cap_conn_put(struct l2cap_conn *conn)
1953 {
1954 	kref_put(&conn->ref, l2cap_conn_free);
1955 }
1956 EXPORT_SYMBOL(l2cap_conn_put);
1957 
1958 /* ---- Socket interface ---- */
1959 
1960 /* Find socket with psm and source / destination bdaddr.
1961  * Returns closest match.
1962  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1963 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1964 						   bdaddr_t *src,
1965 						   bdaddr_t *dst,
1966 						   u8 link_type)
1967 {
1968 	struct l2cap_chan *c, *c1 = NULL;
1969 
1970 	read_lock(&chan_list_lock);
1971 
1972 	list_for_each_entry(c, &chan_list, global_l) {
1973 		if (state && c->state != state)
1974 			continue;
1975 
1976 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1977 			continue;
1978 
1979 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1980 			continue;
1981 
1982 		if (c->psm == psm) {
1983 			int src_match, dst_match;
1984 			int src_any, dst_any;
1985 
1986 			/* Exact match. */
1987 			src_match = !bacmp(&c->src, src);
1988 			dst_match = !bacmp(&c->dst, dst);
1989 			if (src_match && dst_match) {
1990 				c = l2cap_chan_hold_unless_zero(c);
1991 				if (!c)
1992 					continue;
1993 
1994 				read_unlock(&chan_list_lock);
1995 				return c;
1996 			}
1997 
1998 			/* Closest match */
1999 			src_any = !bacmp(&c->src, BDADDR_ANY);
2000 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2001 			if ((src_match && dst_any) || (src_any && dst_match) ||
2002 			    (src_any && dst_any))
2003 				c1 = c;
2004 		}
2005 	}
2006 
2007 	if (c1)
2008 		c1 = l2cap_chan_hold_unless_zero(c1);
2009 
2010 	read_unlock(&chan_list_lock);
2011 
2012 	return c1;
2013 }
2014 
l2cap_monitor_timeout(struct work_struct * work)2015 static void l2cap_monitor_timeout(struct work_struct *work)
2016 {
2017 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2018 					       monitor_timer.work);
2019 
2020 	BT_DBG("chan %p", chan);
2021 
2022 	l2cap_chan_lock(chan);
2023 
2024 	if (!chan->conn) {
2025 		l2cap_chan_unlock(chan);
2026 		l2cap_chan_put(chan);
2027 		return;
2028 	}
2029 
2030 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2031 
2032 	l2cap_chan_unlock(chan);
2033 	l2cap_chan_put(chan);
2034 }
2035 
l2cap_retrans_timeout(struct work_struct * work)2036 static void l2cap_retrans_timeout(struct work_struct *work)
2037 {
2038 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2039 					       retrans_timer.work);
2040 
2041 	BT_DBG("chan %p", chan);
2042 
2043 	l2cap_chan_lock(chan);
2044 
2045 	if (!chan->conn) {
2046 		l2cap_chan_unlock(chan);
2047 		l2cap_chan_put(chan);
2048 		return;
2049 	}
2050 
2051 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2052 	l2cap_chan_unlock(chan);
2053 	l2cap_chan_put(chan);
2054 }
2055 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2056 static void l2cap_streaming_send(struct l2cap_chan *chan,
2057 				 struct sk_buff_head *skbs)
2058 {
2059 	struct sk_buff *skb;
2060 	struct l2cap_ctrl *control;
2061 
2062 	BT_DBG("chan %p, skbs %p", chan, skbs);
2063 
2064 	if (__chan_is_moving(chan))
2065 		return;
2066 
2067 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2068 
2069 	while (!skb_queue_empty(&chan->tx_q)) {
2070 
2071 		skb = skb_dequeue(&chan->tx_q);
2072 
2073 		bt_cb(skb)->l2cap.retries = 1;
2074 		control = &bt_cb(skb)->l2cap;
2075 
2076 		control->reqseq = 0;
2077 		control->txseq = chan->next_tx_seq;
2078 
2079 		__pack_control(chan, control, skb);
2080 
2081 		if (chan->fcs == L2CAP_FCS_CRC16) {
2082 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2083 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2084 		}
2085 
2086 		l2cap_do_send(chan, skb);
2087 
2088 		BT_DBG("Sent txseq %u", control->txseq);
2089 
2090 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2091 		chan->frames_sent++;
2092 	}
2093 }
2094 
l2cap_ertm_send(struct l2cap_chan * chan)2095 static int l2cap_ertm_send(struct l2cap_chan *chan)
2096 {
2097 	struct sk_buff *skb, *tx_skb;
2098 	struct l2cap_ctrl *control;
2099 	int sent = 0;
2100 
2101 	BT_DBG("chan %p", chan);
2102 
2103 	if (chan->state != BT_CONNECTED)
2104 		return -ENOTCONN;
2105 
2106 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2107 		return 0;
2108 
2109 	if (__chan_is_moving(chan))
2110 		return 0;
2111 
2112 	while (chan->tx_send_head &&
2113 	       chan->unacked_frames < chan->remote_tx_win &&
2114 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2115 
2116 		skb = chan->tx_send_head;
2117 
2118 		bt_cb(skb)->l2cap.retries = 1;
2119 		control = &bt_cb(skb)->l2cap;
2120 
2121 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2122 			control->final = 1;
2123 
2124 		control->reqseq = chan->buffer_seq;
2125 		chan->last_acked_seq = chan->buffer_seq;
2126 		control->txseq = chan->next_tx_seq;
2127 
2128 		__pack_control(chan, control, skb);
2129 
2130 		if (chan->fcs == L2CAP_FCS_CRC16) {
2131 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2132 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2133 		}
2134 
2135 		/* Clone after data has been modified. Data is assumed to be
2136 		   read-only (for locking purposes) on cloned sk_buffs.
2137 		 */
2138 		tx_skb = skb_clone(skb, GFP_KERNEL);
2139 
2140 		if (!tx_skb)
2141 			break;
2142 
2143 		__set_retrans_timer(chan);
2144 
2145 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2146 		chan->unacked_frames++;
2147 		chan->frames_sent++;
2148 		sent++;
2149 
2150 		if (skb_queue_is_last(&chan->tx_q, skb))
2151 			chan->tx_send_head = NULL;
2152 		else
2153 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2154 
2155 		l2cap_do_send(chan, tx_skb);
2156 		BT_DBG("Sent txseq %u", control->txseq);
2157 	}
2158 
2159 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2160 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2161 
2162 	return sent;
2163 }
2164 
l2cap_ertm_resend(struct l2cap_chan * chan)2165 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2166 {
2167 	struct l2cap_ctrl control;
2168 	struct sk_buff *skb;
2169 	struct sk_buff *tx_skb;
2170 	u16 seq;
2171 
2172 	BT_DBG("chan %p", chan);
2173 
2174 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2175 		return;
2176 
2177 	if (__chan_is_moving(chan))
2178 		return;
2179 
2180 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2181 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2182 
2183 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2184 		if (!skb) {
2185 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2186 			       seq);
2187 			continue;
2188 		}
2189 
2190 		bt_cb(skb)->l2cap.retries++;
2191 		control = bt_cb(skb)->l2cap;
2192 
2193 		if (chan->max_tx != 0 &&
2194 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2195 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2196 			l2cap_send_disconn_req(chan, ECONNRESET);
2197 			l2cap_seq_list_clear(&chan->retrans_list);
2198 			break;
2199 		}
2200 
2201 		control.reqseq = chan->buffer_seq;
2202 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2203 			control.final = 1;
2204 		else
2205 			control.final = 0;
2206 
2207 		if (skb_cloned(skb)) {
2208 			/* Cloned sk_buffs are read-only, so we need a
2209 			 * writeable copy
2210 			 */
2211 			tx_skb = skb_copy(skb, GFP_KERNEL);
2212 		} else {
2213 			tx_skb = skb_clone(skb, GFP_KERNEL);
2214 		}
2215 
2216 		if (!tx_skb) {
2217 			l2cap_seq_list_clear(&chan->retrans_list);
2218 			break;
2219 		}
2220 
2221 		/* Update skb contents */
2222 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2223 			put_unaligned_le32(__pack_extended_control(&control),
2224 					   tx_skb->data + L2CAP_HDR_SIZE);
2225 		} else {
2226 			put_unaligned_le16(__pack_enhanced_control(&control),
2227 					   tx_skb->data + L2CAP_HDR_SIZE);
2228 		}
2229 
2230 		/* Update FCS */
2231 		if (chan->fcs == L2CAP_FCS_CRC16) {
2232 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2233 					tx_skb->len - L2CAP_FCS_SIZE);
2234 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2235 						L2CAP_FCS_SIZE);
2236 		}
2237 
2238 		l2cap_do_send(chan, tx_skb);
2239 
2240 		BT_DBG("Resent txseq %d", control.txseq);
2241 
2242 		chan->last_acked_seq = chan->buffer_seq;
2243 	}
2244 }
2245 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2246 static void l2cap_retransmit(struct l2cap_chan *chan,
2247 			     struct l2cap_ctrl *control)
2248 {
2249 	BT_DBG("chan %p, control %p", chan, control);
2250 
2251 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2252 	l2cap_ertm_resend(chan);
2253 }
2254 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2255 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2256 				 struct l2cap_ctrl *control)
2257 {
2258 	struct sk_buff *skb;
2259 
2260 	BT_DBG("chan %p, control %p", chan, control);
2261 
2262 	if (control->poll)
2263 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2264 
2265 	l2cap_seq_list_clear(&chan->retrans_list);
2266 
2267 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2268 		return;
2269 
2270 	if (chan->unacked_frames) {
2271 		skb_queue_walk(&chan->tx_q, skb) {
2272 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2273 			    skb == chan->tx_send_head)
2274 				break;
2275 		}
2276 
2277 		skb_queue_walk_from(&chan->tx_q, skb) {
2278 			if (skb == chan->tx_send_head)
2279 				break;
2280 
2281 			l2cap_seq_list_append(&chan->retrans_list,
2282 					      bt_cb(skb)->l2cap.txseq);
2283 		}
2284 
2285 		l2cap_ertm_resend(chan);
2286 	}
2287 }
2288 
l2cap_send_ack(struct l2cap_chan * chan)2289 static void l2cap_send_ack(struct l2cap_chan *chan)
2290 {
2291 	struct l2cap_ctrl control;
2292 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2293 					 chan->last_acked_seq);
2294 	int threshold;
2295 
2296 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2297 	       chan, chan->last_acked_seq, chan->buffer_seq);
2298 
2299 	memset(&control, 0, sizeof(control));
2300 	control.sframe = 1;
2301 
2302 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2303 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2304 		__clear_ack_timer(chan);
2305 		control.super = L2CAP_SUPER_RNR;
2306 		control.reqseq = chan->buffer_seq;
2307 		l2cap_send_sframe(chan, &control);
2308 	} else {
2309 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2310 			l2cap_ertm_send(chan);
2311 			/* If any i-frames were sent, they included an ack */
2312 			if (chan->buffer_seq == chan->last_acked_seq)
2313 				frames_to_ack = 0;
2314 		}
2315 
2316 		/* Ack now if the window is 3/4ths full.
2317 		 * Calculate without mul or div
2318 		 */
2319 		threshold = chan->ack_win;
2320 		threshold += threshold << 1;
2321 		threshold >>= 2;
2322 
2323 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2324 		       threshold);
2325 
2326 		if (frames_to_ack >= threshold) {
2327 			__clear_ack_timer(chan);
2328 			control.super = L2CAP_SUPER_RR;
2329 			control.reqseq = chan->buffer_seq;
2330 			l2cap_send_sframe(chan, &control);
2331 			frames_to_ack = 0;
2332 		}
2333 
2334 		if (frames_to_ack)
2335 			__set_ack_timer(chan);
2336 	}
2337 }
2338 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2339 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2340 					 struct msghdr *msg, int len,
2341 					 int count, struct sk_buff *skb)
2342 {
2343 	struct l2cap_conn *conn = chan->conn;
2344 	struct sk_buff **frag;
2345 	int sent = 0;
2346 
2347 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2348 		return -EFAULT;
2349 
2350 	sent += count;
2351 	len  -= count;
2352 
2353 	/* Continuation fragments (no L2CAP header) */
2354 	frag = &skb_shinfo(skb)->frag_list;
2355 	while (len) {
2356 		struct sk_buff *tmp;
2357 
2358 		count = min_t(unsigned int, conn->mtu, len);
2359 
2360 		tmp = chan->ops->alloc_skb(chan, 0, count,
2361 					   msg->msg_flags & MSG_DONTWAIT);
2362 		if (IS_ERR(tmp))
2363 			return PTR_ERR(tmp);
2364 
2365 		*frag = tmp;
2366 
2367 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2368 				   &msg->msg_iter))
2369 			return -EFAULT;
2370 
2371 		sent += count;
2372 		len  -= count;
2373 
2374 		skb->len += (*frag)->len;
2375 		skb->data_len += (*frag)->len;
2376 
2377 		frag = &(*frag)->next;
2378 	}
2379 
2380 	return sent;
2381 }
2382 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2383 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2384 						 struct msghdr *msg, size_t len)
2385 {
2386 	struct l2cap_conn *conn = chan->conn;
2387 	struct sk_buff *skb;
2388 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2389 	struct l2cap_hdr *lh;
2390 
2391 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2392 	       __le16_to_cpu(chan->psm), len);
2393 
2394 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2395 
2396 	skb = chan->ops->alloc_skb(chan, hlen, count,
2397 				   msg->msg_flags & MSG_DONTWAIT);
2398 	if (IS_ERR(skb))
2399 		return skb;
2400 
2401 	/* Create L2CAP header */
2402 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2403 	lh->cid = cpu_to_le16(chan->dcid);
2404 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2405 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2406 
2407 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2408 	if (unlikely(err < 0)) {
2409 		kfree_skb(skb);
2410 		return ERR_PTR(err);
2411 	}
2412 	return skb;
2413 }
2414 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2415 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2416 					      struct msghdr *msg, size_t len)
2417 {
2418 	struct l2cap_conn *conn = chan->conn;
2419 	struct sk_buff *skb;
2420 	int err, count;
2421 	struct l2cap_hdr *lh;
2422 
2423 	BT_DBG("chan %p len %zu", chan, len);
2424 
2425 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2426 
2427 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2428 				   msg->msg_flags & MSG_DONTWAIT);
2429 	if (IS_ERR(skb))
2430 		return skb;
2431 
2432 	/* Create L2CAP header */
2433 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2434 	lh->cid = cpu_to_le16(chan->dcid);
2435 	lh->len = cpu_to_le16(len);
2436 
2437 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2438 	if (unlikely(err < 0)) {
2439 		kfree_skb(skb);
2440 		return ERR_PTR(err);
2441 	}
2442 	return skb;
2443 }
2444 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2445 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2446 					       struct msghdr *msg, size_t len,
2447 					       u16 sdulen)
2448 {
2449 	struct l2cap_conn *conn = chan->conn;
2450 	struct sk_buff *skb;
2451 	int err, count, hlen;
2452 	struct l2cap_hdr *lh;
2453 
2454 	BT_DBG("chan %p len %zu", chan, len);
2455 
2456 	if (!conn)
2457 		return ERR_PTR(-ENOTCONN);
2458 
2459 	hlen = __ertm_hdr_size(chan);
2460 
2461 	if (sdulen)
2462 		hlen += L2CAP_SDULEN_SIZE;
2463 
2464 	if (chan->fcs == L2CAP_FCS_CRC16)
2465 		hlen += L2CAP_FCS_SIZE;
2466 
2467 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2468 
2469 	skb = chan->ops->alloc_skb(chan, hlen, count,
2470 				   msg->msg_flags & MSG_DONTWAIT);
2471 	if (IS_ERR(skb))
2472 		return skb;
2473 
2474 	/* Create L2CAP header */
2475 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2476 	lh->cid = cpu_to_le16(chan->dcid);
2477 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2478 
2479 	/* Control header is populated later */
2480 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2481 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2482 	else
2483 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2484 
2485 	if (sdulen)
2486 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2487 
2488 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2489 	if (unlikely(err < 0)) {
2490 		kfree_skb(skb);
2491 		return ERR_PTR(err);
2492 	}
2493 
2494 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2495 	bt_cb(skb)->l2cap.retries = 0;
2496 	return skb;
2497 }
2498 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2499 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2500 			     struct sk_buff_head *seg_queue,
2501 			     struct msghdr *msg, size_t len)
2502 {
2503 	struct sk_buff *skb;
2504 	u16 sdu_len;
2505 	size_t pdu_len;
2506 	u8 sar;
2507 
2508 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2509 
2510 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2511 	 * so fragmented skbs are not used.  The HCI layer's handling
2512 	 * of fragmented skbs is not compatible with ERTM's queueing.
2513 	 */
2514 
2515 	/* PDU size is derived from the HCI MTU */
2516 	pdu_len = chan->conn->mtu;
2517 
2518 	/* Constrain PDU size for BR/EDR connections */
2519 	if (!chan->hs_hcon)
2520 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2521 
2522 	/* Adjust for largest possible L2CAP overhead. */
2523 	if (chan->fcs)
2524 		pdu_len -= L2CAP_FCS_SIZE;
2525 
2526 	pdu_len -= __ertm_hdr_size(chan);
2527 
2528 	/* Remote device may have requested smaller PDUs */
2529 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2530 
2531 	if (len <= pdu_len) {
2532 		sar = L2CAP_SAR_UNSEGMENTED;
2533 		sdu_len = 0;
2534 		pdu_len = len;
2535 	} else {
2536 		sar = L2CAP_SAR_START;
2537 		sdu_len = len;
2538 	}
2539 
2540 	while (len > 0) {
2541 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2542 
2543 		if (IS_ERR(skb)) {
2544 			__skb_queue_purge(seg_queue);
2545 			return PTR_ERR(skb);
2546 		}
2547 
2548 		bt_cb(skb)->l2cap.sar = sar;
2549 		__skb_queue_tail(seg_queue, skb);
2550 
2551 		len -= pdu_len;
2552 		if (sdu_len)
2553 			sdu_len = 0;
2554 
2555 		if (len <= pdu_len) {
2556 			sar = L2CAP_SAR_END;
2557 			pdu_len = len;
2558 		} else {
2559 			sar = L2CAP_SAR_CONTINUE;
2560 		}
2561 	}
2562 
2563 	return 0;
2564 }
2565 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2566 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2567 						   struct msghdr *msg,
2568 						   size_t len, u16 sdulen)
2569 {
2570 	struct l2cap_conn *conn = chan->conn;
2571 	struct sk_buff *skb;
2572 	int err, count, hlen;
2573 	struct l2cap_hdr *lh;
2574 
2575 	BT_DBG("chan %p len %zu", chan, len);
2576 
2577 	if (!conn)
2578 		return ERR_PTR(-ENOTCONN);
2579 
2580 	hlen = L2CAP_HDR_SIZE;
2581 
2582 	if (sdulen)
2583 		hlen += L2CAP_SDULEN_SIZE;
2584 
2585 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2586 
2587 	skb = chan->ops->alloc_skb(chan, hlen, count,
2588 				   msg->msg_flags & MSG_DONTWAIT);
2589 	if (IS_ERR(skb))
2590 		return skb;
2591 
2592 	/* Create L2CAP header */
2593 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2594 	lh->cid = cpu_to_le16(chan->dcid);
2595 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2596 
2597 	if (sdulen)
2598 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2599 
2600 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2601 	if (unlikely(err < 0)) {
2602 		kfree_skb(skb);
2603 		return ERR_PTR(err);
2604 	}
2605 
2606 	return skb;
2607 }
2608 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2609 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2610 				struct sk_buff_head *seg_queue,
2611 				struct msghdr *msg, size_t len)
2612 {
2613 	struct sk_buff *skb;
2614 	size_t pdu_len;
2615 	u16 sdu_len;
2616 
2617 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2618 
2619 	sdu_len = len;
2620 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2621 
2622 	while (len > 0) {
2623 		if (len <= pdu_len)
2624 			pdu_len = len;
2625 
2626 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2627 		if (IS_ERR(skb)) {
2628 			__skb_queue_purge(seg_queue);
2629 			return PTR_ERR(skb);
2630 		}
2631 
2632 		__skb_queue_tail(seg_queue, skb);
2633 
2634 		len -= pdu_len;
2635 
2636 		if (sdu_len) {
2637 			sdu_len = 0;
2638 			pdu_len += L2CAP_SDULEN_SIZE;
2639 		}
2640 	}
2641 
2642 	return 0;
2643 }
2644 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2645 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2646 {
2647 	int sent = 0;
2648 
2649 	BT_DBG("chan %p", chan);
2650 
2651 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2652 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2653 		chan->tx_credits--;
2654 		sent++;
2655 	}
2656 
2657 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2658 	       skb_queue_len(&chan->tx_q));
2659 }
2660 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2661 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2662 {
2663 	struct sk_buff *skb;
2664 	int err;
2665 	struct sk_buff_head seg_queue;
2666 
2667 	if (!chan->conn)
2668 		return -ENOTCONN;
2669 
2670 	/* Connectionless channel */
2671 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2672 		skb = l2cap_create_connless_pdu(chan, msg, len);
2673 		if (IS_ERR(skb))
2674 			return PTR_ERR(skb);
2675 
2676 		/* Channel lock is released before requesting new skb and then
2677 		 * reacquired thus we need to recheck channel state.
2678 		 */
2679 		if (chan->state != BT_CONNECTED) {
2680 			kfree_skb(skb);
2681 			return -ENOTCONN;
2682 		}
2683 
2684 		l2cap_do_send(chan, skb);
2685 		return len;
2686 	}
2687 
2688 	switch (chan->mode) {
2689 	case L2CAP_MODE_LE_FLOWCTL:
2690 	case L2CAP_MODE_EXT_FLOWCTL:
2691 		/* Check outgoing MTU */
2692 		if (len > chan->omtu)
2693 			return -EMSGSIZE;
2694 
2695 		__skb_queue_head_init(&seg_queue);
2696 
2697 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2698 
2699 		if (chan->state != BT_CONNECTED) {
2700 			__skb_queue_purge(&seg_queue);
2701 			err = -ENOTCONN;
2702 		}
2703 
2704 		if (err)
2705 			return err;
2706 
2707 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2708 
2709 		l2cap_le_flowctl_send(chan);
2710 
2711 		if (!chan->tx_credits)
2712 			chan->ops->suspend(chan);
2713 
2714 		err = len;
2715 
2716 		break;
2717 
2718 	case L2CAP_MODE_BASIC:
2719 		/* Check outgoing MTU */
2720 		if (len > chan->omtu)
2721 			return -EMSGSIZE;
2722 
2723 		/* Create a basic PDU */
2724 		skb = l2cap_create_basic_pdu(chan, msg, len);
2725 		if (IS_ERR(skb))
2726 			return PTR_ERR(skb);
2727 
2728 		/* Channel lock is released before requesting new skb and then
2729 		 * reacquired thus we need to recheck channel state.
2730 		 */
2731 		if (chan->state != BT_CONNECTED) {
2732 			kfree_skb(skb);
2733 			return -ENOTCONN;
2734 		}
2735 
2736 		l2cap_do_send(chan, skb);
2737 		err = len;
2738 		break;
2739 
2740 	case L2CAP_MODE_ERTM:
2741 	case L2CAP_MODE_STREAMING:
2742 		/* Check outgoing MTU */
2743 		if (len > chan->omtu) {
2744 			err = -EMSGSIZE;
2745 			break;
2746 		}
2747 
2748 		__skb_queue_head_init(&seg_queue);
2749 
2750 		/* Do segmentation before calling in to the state machine,
2751 		 * since it's possible to block while waiting for memory
2752 		 * allocation.
2753 		 */
2754 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2755 
2756 		/* The channel could have been closed while segmenting,
2757 		 * check that it is still connected.
2758 		 */
2759 		if (chan->state != BT_CONNECTED) {
2760 			__skb_queue_purge(&seg_queue);
2761 			err = -ENOTCONN;
2762 		}
2763 
2764 		if (err)
2765 			break;
2766 
2767 		if (chan->mode == L2CAP_MODE_ERTM)
2768 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2769 		else
2770 			l2cap_streaming_send(chan, &seg_queue);
2771 
2772 		err = len;
2773 
2774 		/* If the skbs were not queued for sending, they'll still be in
2775 		 * seg_queue and need to be purged.
2776 		 */
2777 		__skb_queue_purge(&seg_queue);
2778 		break;
2779 
2780 	default:
2781 		BT_DBG("bad state %1.1x", chan->mode);
2782 		err = -EBADFD;
2783 	}
2784 
2785 	return err;
2786 }
2787 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2788 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2789 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2790 {
2791 	struct l2cap_ctrl control;
2792 	u16 seq;
2793 
2794 	BT_DBG("chan %p, txseq %u", chan, txseq);
2795 
2796 	memset(&control, 0, sizeof(control));
2797 	control.sframe = 1;
2798 	control.super = L2CAP_SUPER_SREJ;
2799 
2800 	for (seq = chan->expected_tx_seq; seq != txseq;
2801 	     seq = __next_seq(chan, seq)) {
2802 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2803 			control.reqseq = seq;
2804 			l2cap_send_sframe(chan, &control);
2805 			l2cap_seq_list_append(&chan->srej_list, seq);
2806 		}
2807 	}
2808 
2809 	chan->expected_tx_seq = __next_seq(chan, txseq);
2810 }
2811 
l2cap_send_srej_tail(struct l2cap_chan * chan)2812 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2813 {
2814 	struct l2cap_ctrl control;
2815 
2816 	BT_DBG("chan %p", chan);
2817 
2818 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2819 		return;
2820 
2821 	memset(&control, 0, sizeof(control));
2822 	control.sframe = 1;
2823 	control.super = L2CAP_SUPER_SREJ;
2824 	control.reqseq = chan->srej_list.tail;
2825 	l2cap_send_sframe(chan, &control);
2826 }
2827 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2828 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2829 {
2830 	struct l2cap_ctrl control;
2831 	u16 initial_head;
2832 	u16 seq;
2833 
2834 	BT_DBG("chan %p, txseq %u", chan, txseq);
2835 
2836 	memset(&control, 0, sizeof(control));
2837 	control.sframe = 1;
2838 	control.super = L2CAP_SUPER_SREJ;
2839 
2840 	/* Capture initial list head to allow only one pass through the list. */
2841 	initial_head = chan->srej_list.head;
2842 
2843 	do {
2844 		seq = l2cap_seq_list_pop(&chan->srej_list);
2845 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2846 			break;
2847 
2848 		control.reqseq = seq;
2849 		l2cap_send_sframe(chan, &control);
2850 		l2cap_seq_list_append(&chan->srej_list, seq);
2851 	} while (chan->srej_list.head != initial_head);
2852 }
2853 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2854 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2855 {
2856 	struct sk_buff *acked_skb;
2857 	u16 ackseq;
2858 
2859 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2860 
2861 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2862 		return;
2863 
2864 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2865 	       chan->expected_ack_seq, chan->unacked_frames);
2866 
2867 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2868 	     ackseq = __next_seq(chan, ackseq)) {
2869 
2870 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2871 		if (acked_skb) {
2872 			skb_unlink(acked_skb, &chan->tx_q);
2873 			kfree_skb(acked_skb);
2874 			chan->unacked_frames--;
2875 		}
2876 	}
2877 
2878 	chan->expected_ack_seq = reqseq;
2879 
2880 	if (chan->unacked_frames == 0)
2881 		__clear_retrans_timer(chan);
2882 
2883 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2884 }
2885 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2886 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2887 {
2888 	BT_DBG("chan %p", chan);
2889 
2890 	chan->expected_tx_seq = chan->buffer_seq;
2891 	l2cap_seq_list_clear(&chan->srej_list);
2892 	skb_queue_purge(&chan->srej_q);
2893 	chan->rx_state = L2CAP_RX_STATE_RECV;
2894 }
2895 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2896 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2897 				struct l2cap_ctrl *control,
2898 				struct sk_buff_head *skbs, u8 event)
2899 {
2900 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2901 	       event);
2902 
2903 	switch (event) {
2904 	case L2CAP_EV_DATA_REQUEST:
2905 		if (chan->tx_send_head == NULL)
2906 			chan->tx_send_head = skb_peek(skbs);
2907 
2908 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2909 		l2cap_ertm_send(chan);
2910 		break;
2911 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2912 		BT_DBG("Enter LOCAL_BUSY");
2913 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2914 
2915 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2916 			/* The SREJ_SENT state must be aborted if we are to
2917 			 * enter the LOCAL_BUSY state.
2918 			 */
2919 			l2cap_abort_rx_srej_sent(chan);
2920 		}
2921 
2922 		l2cap_send_ack(chan);
2923 
2924 		break;
2925 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2926 		BT_DBG("Exit LOCAL_BUSY");
2927 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2928 
2929 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2930 			struct l2cap_ctrl local_control;
2931 
2932 			memset(&local_control, 0, sizeof(local_control));
2933 			local_control.sframe = 1;
2934 			local_control.super = L2CAP_SUPER_RR;
2935 			local_control.poll = 1;
2936 			local_control.reqseq = chan->buffer_seq;
2937 			l2cap_send_sframe(chan, &local_control);
2938 
2939 			chan->retry_count = 1;
2940 			__set_monitor_timer(chan);
2941 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2942 		}
2943 		break;
2944 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2945 		l2cap_process_reqseq(chan, control->reqseq);
2946 		break;
2947 	case L2CAP_EV_EXPLICIT_POLL:
2948 		l2cap_send_rr_or_rnr(chan, 1);
2949 		chan->retry_count = 1;
2950 		__set_monitor_timer(chan);
2951 		__clear_ack_timer(chan);
2952 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2953 		break;
2954 	case L2CAP_EV_RETRANS_TO:
2955 		l2cap_send_rr_or_rnr(chan, 1);
2956 		chan->retry_count = 1;
2957 		__set_monitor_timer(chan);
2958 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2959 		break;
2960 	case L2CAP_EV_RECV_FBIT:
2961 		/* Nothing to process */
2962 		break;
2963 	default:
2964 		break;
2965 	}
2966 }
2967 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2968 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2969 				  struct l2cap_ctrl *control,
2970 				  struct sk_buff_head *skbs, u8 event)
2971 {
2972 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2973 	       event);
2974 
2975 	switch (event) {
2976 	case L2CAP_EV_DATA_REQUEST:
2977 		if (chan->tx_send_head == NULL)
2978 			chan->tx_send_head = skb_peek(skbs);
2979 		/* Queue data, but don't send. */
2980 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2981 		break;
2982 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2983 		BT_DBG("Enter LOCAL_BUSY");
2984 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2985 
2986 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2987 			/* The SREJ_SENT state must be aborted if we are to
2988 			 * enter the LOCAL_BUSY state.
2989 			 */
2990 			l2cap_abort_rx_srej_sent(chan);
2991 		}
2992 
2993 		l2cap_send_ack(chan);
2994 
2995 		break;
2996 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2997 		BT_DBG("Exit LOCAL_BUSY");
2998 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2999 
3000 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3001 			struct l2cap_ctrl local_control;
3002 			memset(&local_control, 0, sizeof(local_control));
3003 			local_control.sframe = 1;
3004 			local_control.super = L2CAP_SUPER_RR;
3005 			local_control.poll = 1;
3006 			local_control.reqseq = chan->buffer_seq;
3007 			l2cap_send_sframe(chan, &local_control);
3008 
3009 			chan->retry_count = 1;
3010 			__set_monitor_timer(chan);
3011 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3012 		}
3013 		break;
3014 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3015 		l2cap_process_reqseq(chan, control->reqseq);
3016 		fallthrough;
3017 
3018 	case L2CAP_EV_RECV_FBIT:
3019 		if (control && control->final) {
3020 			__clear_monitor_timer(chan);
3021 			if (chan->unacked_frames > 0)
3022 				__set_retrans_timer(chan);
3023 			chan->retry_count = 0;
3024 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3025 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3026 		}
3027 		break;
3028 	case L2CAP_EV_EXPLICIT_POLL:
3029 		/* Ignore */
3030 		break;
3031 	case L2CAP_EV_MONITOR_TO:
3032 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3033 			l2cap_send_rr_or_rnr(chan, 1);
3034 			__set_monitor_timer(chan);
3035 			chan->retry_count++;
3036 		} else {
3037 			l2cap_send_disconn_req(chan, ECONNABORTED);
3038 		}
3039 		break;
3040 	default:
3041 		break;
3042 	}
3043 }
3044 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3045 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3046 		     struct sk_buff_head *skbs, u8 event)
3047 {
3048 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3049 	       chan, control, skbs, event, chan->tx_state);
3050 
3051 	switch (chan->tx_state) {
3052 	case L2CAP_TX_STATE_XMIT:
3053 		l2cap_tx_state_xmit(chan, control, skbs, event);
3054 		break;
3055 	case L2CAP_TX_STATE_WAIT_F:
3056 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3057 		break;
3058 	default:
3059 		/* Ignore event */
3060 		break;
3061 	}
3062 }
3063 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3064 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3065 			     struct l2cap_ctrl *control)
3066 {
3067 	BT_DBG("chan %p, control %p", chan, control);
3068 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3069 }
3070 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3071 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3072 				  struct l2cap_ctrl *control)
3073 {
3074 	BT_DBG("chan %p, control %p", chan, control);
3075 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3076 }
3077 
3078 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3079 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3080 {
3081 	struct sk_buff *nskb;
3082 	struct l2cap_chan *chan;
3083 
3084 	BT_DBG("conn %p", conn);
3085 
3086 	mutex_lock(&conn->chan_lock);
3087 
3088 	list_for_each_entry(chan, &conn->chan_l, list) {
3089 		if (chan->chan_type != L2CAP_CHAN_RAW)
3090 			continue;
3091 
3092 		/* Don't send frame to the channel it came from */
3093 		if (bt_cb(skb)->l2cap.chan == chan)
3094 			continue;
3095 
3096 		nskb = skb_clone(skb, GFP_KERNEL);
3097 		if (!nskb)
3098 			continue;
3099 		if (chan->ops->recv(chan, nskb))
3100 			kfree_skb(nskb);
3101 	}
3102 
3103 	mutex_unlock(&conn->chan_lock);
3104 }
3105 
3106 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3107 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3108 				       u8 ident, u16 dlen, void *data)
3109 {
3110 	struct sk_buff *skb, **frag;
3111 	struct l2cap_cmd_hdr *cmd;
3112 	struct l2cap_hdr *lh;
3113 	int len, count;
3114 
3115 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3116 	       conn, code, ident, dlen);
3117 
3118 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3119 		return NULL;
3120 
3121 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3122 	count = min_t(unsigned int, conn->mtu, len);
3123 
3124 	skb = bt_skb_alloc(count, GFP_KERNEL);
3125 	if (!skb)
3126 		return NULL;
3127 
3128 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3129 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3130 
3131 	if (conn->hcon->type == LE_LINK)
3132 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3133 	else
3134 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3135 
3136 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3137 	cmd->code  = code;
3138 	cmd->ident = ident;
3139 	cmd->len   = cpu_to_le16(dlen);
3140 
3141 	if (dlen) {
3142 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3143 		skb_put_data(skb, data, count);
3144 		data += count;
3145 	}
3146 
3147 	len -= skb->len;
3148 
3149 	/* Continuation fragments (no L2CAP header) */
3150 	frag = &skb_shinfo(skb)->frag_list;
3151 	while (len) {
3152 		count = min_t(unsigned int, conn->mtu, len);
3153 
3154 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3155 		if (!*frag)
3156 			goto fail;
3157 
3158 		skb_put_data(*frag, data, count);
3159 
3160 		len  -= count;
3161 		data += count;
3162 
3163 		frag = &(*frag)->next;
3164 	}
3165 
3166 	return skb;
3167 
3168 fail:
3169 	kfree_skb(skb);
3170 	return NULL;
3171 }
3172 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3173 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3174 				     unsigned long *val)
3175 {
3176 	struct l2cap_conf_opt *opt = *ptr;
3177 	int len;
3178 
3179 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3180 	*ptr += len;
3181 
3182 	*type = opt->type;
3183 	*olen = opt->len;
3184 
3185 	switch (opt->len) {
3186 	case 1:
3187 		*val = *((u8 *) opt->val);
3188 		break;
3189 
3190 	case 2:
3191 		*val = get_unaligned_le16(opt->val);
3192 		break;
3193 
3194 	case 4:
3195 		*val = get_unaligned_le32(opt->val);
3196 		break;
3197 
3198 	default:
3199 		*val = (unsigned long) opt->val;
3200 		break;
3201 	}
3202 
3203 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3204 	return len;
3205 }
3206 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3207 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3208 {
3209 	struct l2cap_conf_opt *opt = *ptr;
3210 
3211 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3212 
3213 	if (size < L2CAP_CONF_OPT_SIZE + len)
3214 		return;
3215 
3216 	opt->type = type;
3217 	opt->len  = len;
3218 
3219 	switch (len) {
3220 	case 1:
3221 		*((u8 *) opt->val)  = val;
3222 		break;
3223 
3224 	case 2:
3225 		put_unaligned_le16(val, opt->val);
3226 		break;
3227 
3228 	case 4:
3229 		put_unaligned_le32(val, opt->val);
3230 		break;
3231 
3232 	default:
3233 		memcpy(opt->val, (void *) val, len);
3234 		break;
3235 	}
3236 
3237 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3238 }
3239 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3240 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3241 {
3242 	struct l2cap_conf_efs efs;
3243 
3244 	switch (chan->mode) {
3245 	case L2CAP_MODE_ERTM:
3246 		efs.id		= chan->local_id;
3247 		efs.stype	= chan->local_stype;
3248 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3249 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3250 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3251 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3252 		break;
3253 
3254 	case L2CAP_MODE_STREAMING:
3255 		efs.id		= 1;
3256 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3257 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3258 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3259 		efs.acc_lat	= 0;
3260 		efs.flush_to	= 0;
3261 		break;
3262 
3263 	default:
3264 		return;
3265 	}
3266 
3267 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3268 			   (unsigned long) &efs, size);
3269 }
3270 
l2cap_ack_timeout(struct work_struct * work)3271 static void l2cap_ack_timeout(struct work_struct *work)
3272 {
3273 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3274 					       ack_timer.work);
3275 	u16 frames_to_ack;
3276 
3277 	BT_DBG("chan %p", chan);
3278 
3279 	l2cap_chan_lock(chan);
3280 
3281 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3282 				     chan->last_acked_seq);
3283 
3284 	if (frames_to_ack)
3285 		l2cap_send_rr_or_rnr(chan, 0);
3286 
3287 	l2cap_chan_unlock(chan);
3288 	l2cap_chan_put(chan);
3289 }
3290 
l2cap_ertm_init(struct l2cap_chan * chan)3291 int l2cap_ertm_init(struct l2cap_chan *chan)
3292 {
3293 	int err;
3294 
3295 	chan->next_tx_seq = 0;
3296 	chan->expected_tx_seq = 0;
3297 	chan->expected_ack_seq = 0;
3298 	chan->unacked_frames = 0;
3299 	chan->buffer_seq = 0;
3300 	chan->frames_sent = 0;
3301 	chan->last_acked_seq = 0;
3302 	chan->sdu = NULL;
3303 	chan->sdu_last_frag = NULL;
3304 	chan->sdu_len = 0;
3305 
3306 	skb_queue_head_init(&chan->tx_q);
3307 
3308 	chan->local_amp_id = AMP_ID_BREDR;
3309 	chan->move_id = AMP_ID_BREDR;
3310 	chan->move_state = L2CAP_MOVE_STABLE;
3311 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3312 
3313 	if (chan->mode != L2CAP_MODE_ERTM)
3314 		return 0;
3315 
3316 	chan->rx_state = L2CAP_RX_STATE_RECV;
3317 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3318 
3319 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3320 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3321 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3322 
3323 	skb_queue_head_init(&chan->srej_q);
3324 
3325 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3326 	if (err < 0)
3327 		return err;
3328 
3329 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3330 	if (err < 0)
3331 		l2cap_seq_list_free(&chan->srej_list);
3332 
3333 	return err;
3334 }
3335 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3336 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3337 {
3338 	switch (mode) {
3339 	case L2CAP_MODE_STREAMING:
3340 	case L2CAP_MODE_ERTM:
3341 		if (l2cap_mode_supported(mode, remote_feat_mask))
3342 			return mode;
3343 		fallthrough;
3344 	default:
3345 		return L2CAP_MODE_BASIC;
3346 	}
3347 }
3348 
__l2cap_ews_supported(struct l2cap_conn * conn)3349 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3350 {
3351 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3352 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3353 }
3354 
__l2cap_efs_supported(struct l2cap_conn * conn)3355 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3359 }
3360 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3361 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3362 				      struct l2cap_conf_rfc *rfc)
3363 {
3364 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3365 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3366 
3367 		/* Class 1 devices have must have ERTM timeouts
3368 		 * exceeding the Link Supervision Timeout.  The
3369 		 * default Link Supervision Timeout for AMP
3370 		 * controllers is 10 seconds.
3371 		 *
3372 		 * Class 1 devices use 0xffffffff for their
3373 		 * best-effort flush timeout, so the clamping logic
3374 		 * will result in a timeout that meets the above
3375 		 * requirement.  ERTM timeouts are 16-bit values, so
3376 		 * the maximum timeout is 65.535 seconds.
3377 		 */
3378 
3379 		/* Convert timeout to milliseconds and round */
3380 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3381 
3382 		/* This is the recommended formula for class 2 devices
3383 		 * that start ERTM timers when packets are sent to the
3384 		 * controller.
3385 		 */
3386 		ertm_to = 3 * ertm_to + 500;
3387 
3388 		if (ertm_to > 0xffff)
3389 			ertm_to = 0xffff;
3390 
3391 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3392 		rfc->monitor_timeout = rfc->retrans_timeout;
3393 	} else {
3394 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3395 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3396 	}
3397 }
3398 
l2cap_txwin_setup(struct l2cap_chan * chan)3399 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3400 {
3401 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3402 	    __l2cap_ews_supported(chan->conn)) {
3403 		/* use extended control field */
3404 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3405 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3406 	} else {
3407 		chan->tx_win = min_t(u16, chan->tx_win,
3408 				     L2CAP_DEFAULT_TX_WINDOW);
3409 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3410 	}
3411 	chan->ack_win = chan->tx_win;
3412 }
3413 
l2cap_mtu_auto(struct l2cap_chan * chan)3414 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3415 {
3416 	struct hci_conn *conn = chan->conn->hcon;
3417 
3418 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3419 
3420 	/* The 2-DH1 packet has between 2 and 56 information bytes
3421 	 * (including the 2-byte payload header)
3422 	 */
3423 	if (!(conn->pkt_type & HCI_2DH1))
3424 		chan->imtu = 54;
3425 
3426 	/* The 3-DH1 packet has between 2 and 85 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_3DH1))
3430 		chan->imtu = 83;
3431 
3432 	/* The 2-DH3 packet has between 2 and 369 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_2DH3))
3436 		chan->imtu = 367;
3437 
3438 	/* The 3-DH3 packet has between 2 and 554 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_3DH3))
3442 		chan->imtu = 552;
3443 
3444 	/* The 2-DH5 packet has between 2 and 681 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_2DH5))
3448 		chan->imtu = 679;
3449 
3450 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_3DH5))
3454 		chan->imtu = 1021;
3455 }
3456 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3457 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3458 {
3459 	struct l2cap_conf_req *req = data;
3460 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3461 	void *ptr = req->data;
3462 	void *endptr = data + data_size;
3463 	u16 size;
3464 
3465 	BT_DBG("chan %p", chan);
3466 
3467 	if (chan->num_conf_req || chan->num_conf_rsp)
3468 		goto done;
3469 
3470 	switch (chan->mode) {
3471 	case L2CAP_MODE_STREAMING:
3472 	case L2CAP_MODE_ERTM:
3473 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3474 			break;
3475 
3476 		if (__l2cap_efs_supported(chan->conn))
3477 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3478 
3479 		fallthrough;
3480 	default:
3481 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3482 		break;
3483 	}
3484 
3485 done:
3486 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3487 		if (!chan->imtu)
3488 			l2cap_mtu_auto(chan);
3489 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3490 				   endptr - ptr);
3491 	}
3492 
3493 	switch (chan->mode) {
3494 	case L2CAP_MODE_BASIC:
3495 		if (disable_ertm)
3496 			break;
3497 
3498 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3499 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3500 			break;
3501 
3502 		rfc.mode            = L2CAP_MODE_BASIC;
3503 		rfc.txwin_size      = 0;
3504 		rfc.max_transmit    = 0;
3505 		rfc.retrans_timeout = 0;
3506 		rfc.monitor_timeout = 0;
3507 		rfc.max_pdu_size    = 0;
3508 
3509 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3510 				   (unsigned long) &rfc, endptr - ptr);
3511 		break;
3512 
3513 	case L2CAP_MODE_ERTM:
3514 		rfc.mode            = L2CAP_MODE_ERTM;
3515 		rfc.max_transmit    = chan->max_tx;
3516 
3517 		__l2cap_set_ertm_timeouts(chan, &rfc);
3518 
3519 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3520 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3521 			     L2CAP_FCS_SIZE);
3522 		rfc.max_pdu_size = cpu_to_le16(size);
3523 
3524 		l2cap_txwin_setup(chan);
3525 
3526 		rfc.txwin_size = min_t(u16, chan->tx_win,
3527 				       L2CAP_DEFAULT_TX_WINDOW);
3528 
3529 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3530 				   (unsigned long) &rfc, endptr - ptr);
3531 
3532 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3533 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3534 
3535 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3536 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3537 					   chan->tx_win, endptr - ptr);
3538 
3539 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3540 			if (chan->fcs == L2CAP_FCS_NONE ||
3541 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3542 				chan->fcs = L2CAP_FCS_NONE;
3543 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3544 						   chan->fcs, endptr - ptr);
3545 			}
3546 		break;
3547 
3548 	case L2CAP_MODE_STREAMING:
3549 		l2cap_txwin_setup(chan);
3550 		rfc.mode            = L2CAP_MODE_STREAMING;
3551 		rfc.txwin_size      = 0;
3552 		rfc.max_transmit    = 0;
3553 		rfc.retrans_timeout = 0;
3554 		rfc.monitor_timeout = 0;
3555 
3556 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3557 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3558 			     L2CAP_FCS_SIZE);
3559 		rfc.max_pdu_size = cpu_to_le16(size);
3560 
3561 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3562 				   (unsigned long) &rfc, endptr - ptr);
3563 
3564 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3565 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3566 
3567 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3568 			if (chan->fcs == L2CAP_FCS_NONE ||
3569 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3570 				chan->fcs = L2CAP_FCS_NONE;
3571 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3572 						   chan->fcs, endptr - ptr);
3573 			}
3574 		break;
3575 	}
3576 
3577 	req->dcid  = cpu_to_le16(chan->dcid);
3578 	req->flags = cpu_to_le16(0);
3579 
3580 	return ptr - data;
3581 }
3582 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3583 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3584 {
3585 	struct l2cap_conf_rsp *rsp = data;
3586 	void *ptr = rsp->data;
3587 	void *endptr = data + data_size;
3588 	void *req = chan->conf_req;
3589 	int len = chan->conf_len;
3590 	int type, hint, olen;
3591 	unsigned long val;
3592 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3593 	struct l2cap_conf_efs efs;
3594 	u8 remote_efs = 0;
3595 	u16 mtu = L2CAP_DEFAULT_MTU;
3596 	u16 result = L2CAP_CONF_SUCCESS;
3597 	u16 size;
3598 
3599 	BT_DBG("chan %p", chan);
3600 
3601 	while (len >= L2CAP_CONF_OPT_SIZE) {
3602 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3603 		if (len < 0)
3604 			break;
3605 
3606 		hint  = type & L2CAP_CONF_HINT;
3607 		type &= L2CAP_CONF_MASK;
3608 
3609 		switch (type) {
3610 		case L2CAP_CONF_MTU:
3611 			if (olen != 2)
3612 				break;
3613 			mtu = val;
3614 			break;
3615 
3616 		case L2CAP_CONF_FLUSH_TO:
3617 			if (olen != 2)
3618 				break;
3619 			chan->flush_to = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_QOS:
3623 			break;
3624 
3625 		case L2CAP_CONF_RFC:
3626 			if (olen != sizeof(rfc))
3627 				break;
3628 			memcpy(&rfc, (void *) val, olen);
3629 			break;
3630 
3631 		case L2CAP_CONF_FCS:
3632 			if (olen != 1)
3633 				break;
3634 			if (val == L2CAP_FCS_NONE)
3635 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3636 			break;
3637 
3638 		case L2CAP_CONF_EFS:
3639 			if (olen != sizeof(efs))
3640 				break;
3641 			remote_efs = 1;
3642 			memcpy(&efs, (void *) val, olen);
3643 			break;
3644 
3645 		case L2CAP_CONF_EWS:
3646 			if (olen != 2)
3647 				break;
3648 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3649 				return -ECONNREFUSED;
3650 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3651 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3652 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3653 			chan->remote_tx_win = val;
3654 			break;
3655 
3656 		default:
3657 			if (hint)
3658 				break;
3659 			result = L2CAP_CONF_UNKNOWN;
3660 			*((u8 *) ptr++) = type;
3661 			break;
3662 		}
3663 	}
3664 
3665 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3666 		goto done;
3667 
3668 	switch (chan->mode) {
3669 	case L2CAP_MODE_STREAMING:
3670 	case L2CAP_MODE_ERTM:
3671 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3672 			chan->mode = l2cap_select_mode(rfc.mode,
3673 						       chan->conn->feat_mask);
3674 			break;
3675 		}
3676 
3677 		if (remote_efs) {
3678 			if (__l2cap_efs_supported(chan->conn))
3679 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3680 			else
3681 				return -ECONNREFUSED;
3682 		}
3683 
3684 		if (chan->mode != rfc.mode)
3685 			return -ECONNREFUSED;
3686 
3687 		break;
3688 	}
3689 
3690 done:
3691 	if (chan->mode != rfc.mode) {
3692 		result = L2CAP_CONF_UNACCEPT;
3693 		rfc.mode = chan->mode;
3694 
3695 		if (chan->num_conf_rsp == 1)
3696 			return -ECONNREFUSED;
3697 
3698 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3699 				   (unsigned long) &rfc, endptr - ptr);
3700 	}
3701 
3702 	if (result == L2CAP_CONF_SUCCESS) {
3703 		/* Configure output options and let the other side know
3704 		 * which ones we don't like. */
3705 
3706 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3707 			result = L2CAP_CONF_UNACCEPT;
3708 		else {
3709 			chan->omtu = mtu;
3710 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3711 		}
3712 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3713 
3714 		if (remote_efs) {
3715 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3716 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3717 			    efs.stype != chan->local_stype) {
3718 
3719 				result = L2CAP_CONF_UNACCEPT;
3720 
3721 				if (chan->num_conf_req >= 1)
3722 					return -ECONNREFUSED;
3723 
3724 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3725 						   sizeof(efs),
3726 						   (unsigned long) &efs, endptr - ptr);
3727 			} else {
3728 				/* Send PENDING Conf Rsp */
3729 				result = L2CAP_CONF_PENDING;
3730 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3731 			}
3732 		}
3733 
3734 		switch (rfc.mode) {
3735 		case L2CAP_MODE_BASIC:
3736 			chan->fcs = L2CAP_FCS_NONE;
3737 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3738 			break;
3739 
3740 		case L2CAP_MODE_ERTM:
3741 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3742 				chan->remote_tx_win = rfc.txwin_size;
3743 			else
3744 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3745 
3746 			chan->remote_max_tx = rfc.max_transmit;
3747 
3748 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3749 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3750 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3751 			rfc.max_pdu_size = cpu_to_le16(size);
3752 			chan->remote_mps = size;
3753 
3754 			__l2cap_set_ertm_timeouts(chan, &rfc);
3755 
3756 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3757 
3758 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3759 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3760 
3761 			if (remote_efs &&
3762 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3763 				chan->remote_id = efs.id;
3764 				chan->remote_stype = efs.stype;
3765 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3766 				chan->remote_flush_to =
3767 					le32_to_cpu(efs.flush_to);
3768 				chan->remote_acc_lat =
3769 					le32_to_cpu(efs.acc_lat);
3770 				chan->remote_sdu_itime =
3771 					le32_to_cpu(efs.sdu_itime);
3772 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3773 						   sizeof(efs),
3774 						   (unsigned long) &efs, endptr - ptr);
3775 			}
3776 			break;
3777 
3778 		case L2CAP_MODE_STREAMING:
3779 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3780 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3781 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3782 			rfc.max_pdu_size = cpu_to_le16(size);
3783 			chan->remote_mps = size;
3784 
3785 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3786 
3787 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3788 					   (unsigned long) &rfc, endptr - ptr);
3789 
3790 			break;
3791 
3792 		default:
3793 			result = L2CAP_CONF_UNACCEPT;
3794 
3795 			memset(&rfc, 0, sizeof(rfc));
3796 			rfc.mode = chan->mode;
3797 		}
3798 
3799 		if (result == L2CAP_CONF_SUCCESS)
3800 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3801 	}
3802 	rsp->scid   = cpu_to_le16(chan->dcid);
3803 	rsp->result = cpu_to_le16(result);
3804 	rsp->flags  = cpu_to_le16(0);
3805 
3806 	return ptr - data;
3807 }
3808 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3809 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3810 				void *data, size_t size, u16 *result)
3811 {
3812 	struct l2cap_conf_req *req = data;
3813 	void *ptr = req->data;
3814 	void *endptr = data + size;
3815 	int type, olen;
3816 	unsigned long val;
3817 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3818 	struct l2cap_conf_efs efs;
3819 
3820 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3821 
3822 	while (len >= L2CAP_CONF_OPT_SIZE) {
3823 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3824 		if (len < 0)
3825 			break;
3826 
3827 		switch (type) {
3828 		case L2CAP_CONF_MTU:
3829 			if (olen != 2)
3830 				break;
3831 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3832 				*result = L2CAP_CONF_UNACCEPT;
3833 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3834 			} else
3835 				chan->imtu = val;
3836 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3837 					   endptr - ptr);
3838 			break;
3839 
3840 		case L2CAP_CONF_FLUSH_TO:
3841 			if (olen != 2)
3842 				break;
3843 			chan->flush_to = val;
3844 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3845 					   chan->flush_to, endptr - ptr);
3846 			break;
3847 
3848 		case L2CAP_CONF_RFC:
3849 			if (olen != sizeof(rfc))
3850 				break;
3851 			memcpy(&rfc, (void *)val, olen);
3852 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3853 			    rfc.mode != chan->mode)
3854 				return -ECONNREFUSED;
3855 			chan->fcs = 0;
3856 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3857 					   (unsigned long) &rfc, endptr - ptr);
3858 			break;
3859 
3860 		case L2CAP_CONF_EWS:
3861 			if (olen != 2)
3862 				break;
3863 			chan->ack_win = min_t(u16, val, chan->ack_win);
3864 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3865 					   chan->tx_win, endptr - ptr);
3866 			break;
3867 
3868 		case L2CAP_CONF_EFS:
3869 			if (olen != sizeof(efs))
3870 				break;
3871 			memcpy(&efs, (void *)val, olen);
3872 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3873 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3874 			    efs.stype != chan->local_stype)
3875 				return -ECONNREFUSED;
3876 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3877 					   (unsigned long) &efs, endptr - ptr);
3878 			break;
3879 
3880 		case L2CAP_CONF_FCS:
3881 			if (olen != 1)
3882 				break;
3883 			if (*result == L2CAP_CONF_PENDING)
3884 				if (val == L2CAP_FCS_NONE)
3885 					set_bit(CONF_RECV_NO_FCS,
3886 						&chan->conf_state);
3887 			break;
3888 		}
3889 	}
3890 
3891 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3892 		return -ECONNREFUSED;
3893 
3894 	chan->mode = rfc.mode;
3895 
3896 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3897 		switch (rfc.mode) {
3898 		case L2CAP_MODE_ERTM:
3899 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3900 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3901 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3902 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3903 				chan->ack_win = min_t(u16, chan->ack_win,
3904 						      rfc.txwin_size);
3905 
3906 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3907 				chan->local_msdu = le16_to_cpu(efs.msdu);
3908 				chan->local_sdu_itime =
3909 					le32_to_cpu(efs.sdu_itime);
3910 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3911 				chan->local_flush_to =
3912 					le32_to_cpu(efs.flush_to);
3913 			}
3914 			break;
3915 
3916 		case L2CAP_MODE_STREAMING:
3917 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3918 		}
3919 	}
3920 
3921 	req->dcid   = cpu_to_le16(chan->dcid);
3922 	req->flags  = cpu_to_le16(0);
3923 
3924 	return ptr - data;
3925 }
3926 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3927 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3928 				u16 result, u16 flags)
3929 {
3930 	struct l2cap_conf_rsp *rsp = data;
3931 	void *ptr = rsp->data;
3932 
3933 	BT_DBG("chan %p", chan);
3934 
3935 	rsp->scid   = cpu_to_le16(chan->dcid);
3936 	rsp->result = cpu_to_le16(result);
3937 	rsp->flags  = cpu_to_le16(flags);
3938 
3939 	return ptr - data;
3940 }
3941 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3942 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3943 {
3944 	struct l2cap_le_conn_rsp rsp;
3945 	struct l2cap_conn *conn = chan->conn;
3946 
3947 	BT_DBG("chan %p", chan);
3948 
3949 	rsp.dcid    = cpu_to_le16(chan->scid);
3950 	rsp.mtu     = cpu_to_le16(chan->imtu);
3951 	rsp.mps     = cpu_to_le16(chan->mps);
3952 	rsp.credits = cpu_to_le16(chan->rx_credits);
3953 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3954 
3955 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3956 		       &rsp);
3957 }
3958 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3959 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3960 {
3961 	struct {
3962 		struct l2cap_ecred_conn_rsp rsp;
3963 		__le16 dcid[5];
3964 	} __packed pdu;
3965 	struct l2cap_conn *conn = chan->conn;
3966 	u16 ident = chan->ident;
3967 	int i = 0;
3968 
3969 	if (!ident)
3970 		return;
3971 
3972 	BT_DBG("chan %p ident %d", chan, ident);
3973 
3974 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3975 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3976 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3977 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3978 
3979 	mutex_lock(&conn->chan_lock);
3980 
3981 	list_for_each_entry(chan, &conn->chan_l, list) {
3982 		if (chan->ident != ident)
3983 			continue;
3984 
3985 		/* Reset ident so only one response is sent */
3986 		chan->ident = 0;
3987 
3988 		/* Include all channels pending with the same ident */
3989 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3990 	}
3991 
3992 	mutex_unlock(&conn->chan_lock);
3993 
3994 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3995 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3996 }
3997 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3998 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3999 {
4000 	struct l2cap_conn_rsp rsp;
4001 	struct l2cap_conn *conn = chan->conn;
4002 	u8 buf[128];
4003 	u8 rsp_code;
4004 
4005 	rsp.scid   = cpu_to_le16(chan->dcid);
4006 	rsp.dcid   = cpu_to_le16(chan->scid);
4007 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4008 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4009 
4010 	if (chan->hs_hcon)
4011 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4012 	else
4013 		rsp_code = L2CAP_CONN_RSP;
4014 
4015 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4016 
4017 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4018 
4019 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4020 		return;
4021 
4022 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4023 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4024 	chan->num_conf_req++;
4025 }
4026 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4027 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4028 {
4029 	int type, olen;
4030 	unsigned long val;
4031 	/* Use sane default values in case a misbehaving remote device
4032 	 * did not send an RFC or extended window size option.
4033 	 */
4034 	u16 txwin_ext = chan->ack_win;
4035 	struct l2cap_conf_rfc rfc = {
4036 		.mode = chan->mode,
4037 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4038 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4039 		.max_pdu_size = cpu_to_le16(chan->imtu),
4040 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4041 	};
4042 
4043 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4044 
4045 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4046 		return;
4047 
4048 	while (len >= L2CAP_CONF_OPT_SIZE) {
4049 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4050 		if (len < 0)
4051 			break;
4052 
4053 		switch (type) {
4054 		case L2CAP_CONF_RFC:
4055 			if (olen != sizeof(rfc))
4056 				break;
4057 			memcpy(&rfc, (void *)val, olen);
4058 			break;
4059 		case L2CAP_CONF_EWS:
4060 			if (olen != 2)
4061 				break;
4062 			txwin_ext = val;
4063 			break;
4064 		}
4065 	}
4066 
4067 	switch (rfc.mode) {
4068 	case L2CAP_MODE_ERTM:
4069 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4070 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4071 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4072 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4073 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4074 		else
4075 			chan->ack_win = min_t(u16, chan->ack_win,
4076 					      rfc.txwin_size);
4077 		break;
4078 	case L2CAP_MODE_STREAMING:
4079 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4080 	}
4081 }
4082 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4083 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4084 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4085 				    u8 *data)
4086 {
4087 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4088 
4089 	if (cmd_len < sizeof(*rej))
4090 		return -EPROTO;
4091 
4092 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4093 		return 0;
4094 
4095 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4096 	    cmd->ident == conn->info_ident) {
4097 		cancel_delayed_work(&conn->info_timer);
4098 
4099 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4100 		conn->info_ident = 0;
4101 
4102 		l2cap_conn_start(conn);
4103 	}
4104 
4105 	return 0;
4106 }
4107 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4108 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4109 					struct l2cap_cmd_hdr *cmd,
4110 					u8 *data, u8 rsp_code, u8 amp_id)
4111 {
4112 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4113 	struct l2cap_conn_rsp rsp;
4114 	struct l2cap_chan *chan = NULL, *pchan;
4115 	int result, status = L2CAP_CS_NO_INFO;
4116 
4117 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4118 	__le16 psm = req->psm;
4119 
4120 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4121 
4122 	/* Check if we have socket listening on psm */
4123 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4124 					 &conn->hcon->dst, ACL_LINK);
4125 	if (!pchan) {
4126 		result = L2CAP_CR_BAD_PSM;
4127 		goto sendresp;
4128 	}
4129 
4130 	mutex_lock(&conn->chan_lock);
4131 	l2cap_chan_lock(pchan);
4132 
4133 	/* Check if the ACL is secure enough (if not SDP) */
4134 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4135 	    !hci_conn_check_link_mode(conn->hcon)) {
4136 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4137 		result = L2CAP_CR_SEC_BLOCK;
4138 		goto response;
4139 	}
4140 
4141 	result = L2CAP_CR_NO_MEM;
4142 
4143 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4144 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4145 		result = L2CAP_CR_INVALID_SCID;
4146 		goto response;
4147 	}
4148 
4149 	/* Check if we already have channel with that dcid */
4150 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4151 		result = L2CAP_CR_SCID_IN_USE;
4152 		goto response;
4153 	}
4154 
4155 	chan = pchan->ops->new_connection(pchan);
4156 	if (!chan)
4157 		goto response;
4158 
4159 	/* For certain devices (ex: HID mouse), support for authentication,
4160 	 * pairing and bonding is optional. For such devices, inorder to avoid
4161 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4162 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4163 	 */
4164 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4165 
4166 	bacpy(&chan->src, &conn->hcon->src);
4167 	bacpy(&chan->dst, &conn->hcon->dst);
4168 	chan->src_type = bdaddr_src_type(conn->hcon);
4169 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4170 	chan->psm  = psm;
4171 	chan->dcid = scid;
4172 	chan->local_amp_id = amp_id;
4173 
4174 	__l2cap_chan_add(conn, chan);
4175 
4176 	dcid = chan->scid;
4177 
4178 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4179 
4180 	chan->ident = cmd->ident;
4181 
4182 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4183 		if (l2cap_chan_check_security(chan, false)) {
4184 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4185 				l2cap_state_change(chan, BT_CONNECT2);
4186 				result = L2CAP_CR_PEND;
4187 				status = L2CAP_CS_AUTHOR_PEND;
4188 				chan->ops->defer(chan);
4189 			} else {
4190 				/* Force pending result for AMP controllers.
4191 				 * The connection will succeed after the
4192 				 * physical link is up.
4193 				 */
4194 				if (amp_id == AMP_ID_BREDR) {
4195 					l2cap_state_change(chan, BT_CONFIG);
4196 					result = L2CAP_CR_SUCCESS;
4197 				} else {
4198 					l2cap_state_change(chan, BT_CONNECT2);
4199 					result = L2CAP_CR_PEND;
4200 				}
4201 				status = L2CAP_CS_NO_INFO;
4202 			}
4203 		} else {
4204 			l2cap_state_change(chan, BT_CONNECT2);
4205 			result = L2CAP_CR_PEND;
4206 			status = L2CAP_CS_AUTHEN_PEND;
4207 		}
4208 	} else {
4209 		l2cap_state_change(chan, BT_CONNECT2);
4210 		result = L2CAP_CR_PEND;
4211 		status = L2CAP_CS_NO_INFO;
4212 	}
4213 
4214 response:
4215 	l2cap_chan_unlock(pchan);
4216 	mutex_unlock(&conn->chan_lock);
4217 	l2cap_chan_put(pchan);
4218 
4219 sendresp:
4220 	rsp.scid   = cpu_to_le16(scid);
4221 	rsp.dcid   = cpu_to_le16(dcid);
4222 	rsp.result = cpu_to_le16(result);
4223 	rsp.status = cpu_to_le16(status);
4224 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4225 
4226 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4227 		struct l2cap_info_req info;
4228 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4229 
4230 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4231 		conn->info_ident = l2cap_get_ident(conn);
4232 
4233 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4234 
4235 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4236 			       sizeof(info), &info);
4237 	}
4238 
4239 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4240 	    result == L2CAP_CR_SUCCESS) {
4241 		u8 buf[128];
4242 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4243 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4244 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4245 		chan->num_conf_req++;
4246 	}
4247 
4248 	return chan;
4249 }
4250 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4251 static int l2cap_connect_req(struct l2cap_conn *conn,
4252 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4253 {
4254 	struct hci_dev *hdev = conn->hcon->hdev;
4255 	struct hci_conn *hcon = conn->hcon;
4256 
4257 	if (cmd_len < sizeof(struct l2cap_conn_req))
4258 		return -EPROTO;
4259 
4260 	hci_dev_lock(hdev);
4261 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4262 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4263 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4264 	hci_dev_unlock(hdev);
4265 
4266 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4267 	return 0;
4268 }
4269 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4270 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4271 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4272 				    u8 *data)
4273 {
4274 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4275 	u16 scid, dcid, result, status;
4276 	struct l2cap_chan *chan;
4277 	u8 req[128];
4278 	int err;
4279 
4280 	if (cmd_len < sizeof(*rsp))
4281 		return -EPROTO;
4282 
4283 	scid   = __le16_to_cpu(rsp->scid);
4284 	dcid   = __le16_to_cpu(rsp->dcid);
4285 	result = __le16_to_cpu(rsp->result);
4286 	status = __le16_to_cpu(rsp->status);
4287 
4288 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4289 	       dcid, scid, result, status);
4290 
4291 	mutex_lock(&conn->chan_lock);
4292 
4293 	if (scid) {
4294 		chan = __l2cap_get_chan_by_scid(conn, scid);
4295 		if (!chan) {
4296 			err = -EBADSLT;
4297 			goto unlock;
4298 		}
4299 	} else {
4300 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4301 		if (!chan) {
4302 			err = -EBADSLT;
4303 			goto unlock;
4304 		}
4305 	}
4306 
4307 	err = 0;
4308 
4309 	l2cap_chan_lock(chan);
4310 
4311 	switch (result) {
4312 	case L2CAP_CR_SUCCESS:
4313 		l2cap_state_change(chan, BT_CONFIG);
4314 		chan->ident = 0;
4315 		chan->dcid = dcid;
4316 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4317 
4318 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4319 			break;
4320 
4321 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4322 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4323 		chan->num_conf_req++;
4324 		break;
4325 
4326 	case L2CAP_CR_PEND:
4327 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4328 		break;
4329 
4330 	default:
4331 		l2cap_chan_del(chan, ECONNREFUSED);
4332 		break;
4333 	}
4334 
4335 	l2cap_chan_unlock(chan);
4336 
4337 unlock:
4338 	mutex_unlock(&conn->chan_lock);
4339 
4340 	return err;
4341 }
4342 
set_default_fcs(struct l2cap_chan * chan)4343 static inline void set_default_fcs(struct l2cap_chan *chan)
4344 {
4345 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4346 	 * sides request it.
4347 	 */
4348 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4349 		chan->fcs = L2CAP_FCS_NONE;
4350 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4351 		chan->fcs = L2CAP_FCS_CRC16;
4352 }
4353 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4354 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4355 				    u8 ident, u16 flags)
4356 {
4357 	struct l2cap_conn *conn = chan->conn;
4358 
4359 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4360 	       flags);
4361 
4362 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4363 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4364 
4365 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4366 		       l2cap_build_conf_rsp(chan, data,
4367 					    L2CAP_CONF_SUCCESS, flags), data);
4368 }
4369 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4370 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4371 				   u16 scid, u16 dcid)
4372 {
4373 	struct l2cap_cmd_rej_cid rej;
4374 
4375 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4376 	rej.scid = __cpu_to_le16(scid);
4377 	rej.dcid = __cpu_to_le16(dcid);
4378 
4379 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4380 }
4381 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4382 static inline int l2cap_config_req(struct l2cap_conn *conn,
4383 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4384 				   u8 *data)
4385 {
4386 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4387 	u16 dcid, flags;
4388 	u8 rsp[64];
4389 	struct l2cap_chan *chan;
4390 	int len, err = 0;
4391 
4392 	if (cmd_len < sizeof(*req))
4393 		return -EPROTO;
4394 
4395 	dcid  = __le16_to_cpu(req->dcid);
4396 	flags = __le16_to_cpu(req->flags);
4397 
4398 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4399 
4400 	chan = l2cap_get_chan_by_scid(conn, dcid);
4401 	if (!chan) {
4402 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4403 		return 0;
4404 	}
4405 
4406 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4407 	    chan->state != BT_CONNECTED) {
4408 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4409 				       chan->dcid);
4410 		goto unlock;
4411 	}
4412 
4413 	/* Reject if config buffer is too small. */
4414 	len = cmd_len - sizeof(*req);
4415 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4416 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4417 			       l2cap_build_conf_rsp(chan, rsp,
4418 			       L2CAP_CONF_REJECT, flags), rsp);
4419 		goto unlock;
4420 	}
4421 
4422 	/* Store config. */
4423 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4424 	chan->conf_len += len;
4425 
4426 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4427 		/* Incomplete config. Send empty response. */
4428 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4429 			       l2cap_build_conf_rsp(chan, rsp,
4430 			       L2CAP_CONF_SUCCESS, flags), rsp);
4431 		goto unlock;
4432 	}
4433 
4434 	/* Complete config. */
4435 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4436 	if (len < 0) {
4437 		l2cap_send_disconn_req(chan, ECONNRESET);
4438 		goto unlock;
4439 	}
4440 
4441 	chan->ident = cmd->ident;
4442 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4443 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4444 		chan->num_conf_rsp++;
4445 
4446 	/* Reset config buffer. */
4447 	chan->conf_len = 0;
4448 
4449 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4450 		goto unlock;
4451 
4452 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4453 		set_default_fcs(chan);
4454 
4455 		if (chan->mode == L2CAP_MODE_ERTM ||
4456 		    chan->mode == L2CAP_MODE_STREAMING)
4457 			err = l2cap_ertm_init(chan);
4458 
4459 		if (err < 0)
4460 			l2cap_send_disconn_req(chan, -err);
4461 		else
4462 			l2cap_chan_ready(chan);
4463 
4464 		goto unlock;
4465 	}
4466 
4467 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4468 		u8 buf[64];
4469 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4470 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4471 		chan->num_conf_req++;
4472 	}
4473 
4474 	/* Got Conf Rsp PENDING from remote side and assume we sent
4475 	   Conf Rsp PENDING in the code above */
4476 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4477 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4478 
4479 		/* check compatibility */
4480 
4481 		/* Send rsp for BR/EDR channel */
4482 		if (!chan->hs_hcon)
4483 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4484 		else
4485 			chan->ident = cmd->ident;
4486 	}
4487 
4488 unlock:
4489 	l2cap_chan_unlock(chan);
4490 	l2cap_chan_put(chan);
4491 	return err;
4492 }
4493 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4494 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4495 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4496 				   u8 *data)
4497 {
4498 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4499 	u16 scid, flags, result;
4500 	struct l2cap_chan *chan;
4501 	int len = cmd_len - sizeof(*rsp);
4502 	int err = 0;
4503 
4504 	if (cmd_len < sizeof(*rsp))
4505 		return -EPROTO;
4506 
4507 	scid   = __le16_to_cpu(rsp->scid);
4508 	flags  = __le16_to_cpu(rsp->flags);
4509 	result = __le16_to_cpu(rsp->result);
4510 
4511 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4512 	       result, len);
4513 
4514 	chan = l2cap_get_chan_by_scid(conn, scid);
4515 	if (!chan)
4516 		return 0;
4517 
4518 	switch (result) {
4519 	case L2CAP_CONF_SUCCESS:
4520 		l2cap_conf_rfc_get(chan, rsp->data, len);
4521 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4522 		break;
4523 
4524 	case L2CAP_CONF_PENDING:
4525 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4526 
4527 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4528 			char buf[64];
4529 
4530 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4531 						   buf, sizeof(buf), &result);
4532 			if (len < 0) {
4533 				l2cap_send_disconn_req(chan, ECONNRESET);
4534 				goto done;
4535 			}
4536 
4537 			if (!chan->hs_hcon) {
4538 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4539 							0);
4540 			} else {
4541 				if (l2cap_check_efs(chan)) {
4542 					amp_create_logical_link(chan);
4543 					chan->ident = cmd->ident;
4544 				}
4545 			}
4546 		}
4547 		goto done;
4548 
4549 	case L2CAP_CONF_UNACCEPT:
4550 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4551 			char req[64];
4552 
4553 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4554 				l2cap_send_disconn_req(chan, ECONNRESET);
4555 				goto done;
4556 			}
4557 
4558 			/* throw out any old stored conf requests */
4559 			result = L2CAP_CONF_SUCCESS;
4560 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4561 						   req, sizeof(req), &result);
4562 			if (len < 0) {
4563 				l2cap_send_disconn_req(chan, ECONNRESET);
4564 				goto done;
4565 			}
4566 
4567 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4568 				       L2CAP_CONF_REQ, len, req);
4569 			chan->num_conf_req++;
4570 			if (result != L2CAP_CONF_SUCCESS)
4571 				goto done;
4572 			break;
4573 		}
4574 		fallthrough;
4575 
4576 	default:
4577 		l2cap_chan_set_err(chan, ECONNRESET);
4578 
4579 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4580 		l2cap_send_disconn_req(chan, ECONNRESET);
4581 		goto done;
4582 	}
4583 
4584 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4585 		goto done;
4586 
4587 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4588 
4589 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4590 		set_default_fcs(chan);
4591 
4592 		if (chan->mode == L2CAP_MODE_ERTM ||
4593 		    chan->mode == L2CAP_MODE_STREAMING)
4594 			err = l2cap_ertm_init(chan);
4595 
4596 		if (err < 0)
4597 			l2cap_send_disconn_req(chan, -err);
4598 		else
4599 			l2cap_chan_ready(chan);
4600 	}
4601 
4602 done:
4603 	l2cap_chan_unlock(chan);
4604 	l2cap_chan_put(chan);
4605 	return err;
4606 }
4607 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4608 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4609 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4610 				       u8 *data)
4611 {
4612 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4613 	struct l2cap_disconn_rsp rsp;
4614 	u16 dcid, scid;
4615 	struct l2cap_chan *chan;
4616 
4617 	if (cmd_len != sizeof(*req))
4618 		return -EPROTO;
4619 
4620 	scid = __le16_to_cpu(req->scid);
4621 	dcid = __le16_to_cpu(req->dcid);
4622 
4623 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4624 
4625 	mutex_lock(&conn->chan_lock);
4626 
4627 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4628 	if (!chan) {
4629 		mutex_unlock(&conn->chan_lock);
4630 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4631 		return 0;
4632 	}
4633 
4634 	l2cap_chan_hold(chan);
4635 	l2cap_chan_lock(chan);
4636 
4637 	rsp.dcid = cpu_to_le16(chan->scid);
4638 	rsp.scid = cpu_to_le16(chan->dcid);
4639 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4640 
4641 	chan->ops->set_shutdown(chan);
4642 
4643 	l2cap_chan_del(chan, ECONNRESET);
4644 
4645 	chan->ops->close(chan);
4646 
4647 	l2cap_chan_unlock(chan);
4648 	l2cap_chan_put(chan);
4649 
4650 	mutex_unlock(&conn->chan_lock);
4651 
4652 	return 0;
4653 }
4654 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4655 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4656 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4657 				       u8 *data)
4658 {
4659 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4660 	u16 dcid, scid;
4661 	struct l2cap_chan *chan;
4662 
4663 	if (cmd_len != sizeof(*rsp))
4664 		return -EPROTO;
4665 
4666 	scid = __le16_to_cpu(rsp->scid);
4667 	dcid = __le16_to_cpu(rsp->dcid);
4668 
4669 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4670 
4671 	mutex_lock(&conn->chan_lock);
4672 
4673 	chan = __l2cap_get_chan_by_scid(conn, scid);
4674 	if (!chan) {
4675 		mutex_unlock(&conn->chan_lock);
4676 		return 0;
4677 	}
4678 
4679 	l2cap_chan_hold(chan);
4680 	l2cap_chan_lock(chan);
4681 
4682 	if (chan->state != BT_DISCONN) {
4683 		l2cap_chan_unlock(chan);
4684 		l2cap_chan_put(chan);
4685 		mutex_unlock(&conn->chan_lock);
4686 		return 0;
4687 	}
4688 
4689 	l2cap_chan_del(chan, 0);
4690 
4691 	chan->ops->close(chan);
4692 
4693 	l2cap_chan_unlock(chan);
4694 	l2cap_chan_put(chan);
4695 
4696 	mutex_unlock(&conn->chan_lock);
4697 
4698 	return 0;
4699 }
4700 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4701 static inline int l2cap_information_req(struct l2cap_conn *conn,
4702 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4703 					u8 *data)
4704 {
4705 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4706 	u16 type;
4707 
4708 	if (cmd_len != sizeof(*req))
4709 		return -EPROTO;
4710 
4711 	type = __le16_to_cpu(req->type);
4712 
4713 	BT_DBG("type 0x%4.4x", type);
4714 
4715 	if (type == L2CAP_IT_FEAT_MASK) {
4716 		u8 buf[8];
4717 		u32 feat_mask = l2cap_feat_mask;
4718 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4719 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4720 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4721 		if (!disable_ertm)
4722 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4723 				| L2CAP_FEAT_FCS;
4724 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4725 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4726 				| L2CAP_FEAT_EXT_WINDOW;
4727 
4728 		put_unaligned_le32(feat_mask, rsp->data);
4729 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4730 			       buf);
4731 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4732 		u8 buf[12];
4733 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4734 
4735 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4736 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4737 		rsp->data[0] = conn->local_fixed_chan;
4738 		memset(rsp->data + 1, 0, 7);
4739 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4740 			       buf);
4741 	} else {
4742 		struct l2cap_info_rsp rsp;
4743 		rsp.type   = cpu_to_le16(type);
4744 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4745 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4746 			       &rsp);
4747 	}
4748 
4749 	return 0;
4750 }
4751 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4752 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4753 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4754 					u8 *data)
4755 {
4756 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4757 	u16 type, result;
4758 
4759 	if (cmd_len < sizeof(*rsp))
4760 		return -EPROTO;
4761 
4762 	type   = __le16_to_cpu(rsp->type);
4763 	result = __le16_to_cpu(rsp->result);
4764 
4765 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4766 
4767 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4768 	if (cmd->ident != conn->info_ident ||
4769 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4770 		return 0;
4771 
4772 	cancel_delayed_work(&conn->info_timer);
4773 
4774 	if (result != L2CAP_IR_SUCCESS) {
4775 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4776 		conn->info_ident = 0;
4777 
4778 		l2cap_conn_start(conn);
4779 
4780 		return 0;
4781 	}
4782 
4783 	switch (type) {
4784 	case L2CAP_IT_FEAT_MASK:
4785 		conn->feat_mask = get_unaligned_le32(rsp->data);
4786 
4787 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4788 			struct l2cap_info_req req;
4789 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4790 
4791 			conn->info_ident = l2cap_get_ident(conn);
4792 
4793 			l2cap_send_cmd(conn, conn->info_ident,
4794 				       L2CAP_INFO_REQ, sizeof(req), &req);
4795 		} else {
4796 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4797 			conn->info_ident = 0;
4798 
4799 			l2cap_conn_start(conn);
4800 		}
4801 		break;
4802 
4803 	case L2CAP_IT_FIXED_CHAN:
4804 		conn->remote_fixed_chan = rsp->data[0];
4805 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4806 		conn->info_ident = 0;
4807 
4808 		l2cap_conn_start(conn);
4809 		break;
4810 	}
4811 
4812 	return 0;
4813 }
4814 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4815 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4816 				    struct l2cap_cmd_hdr *cmd,
4817 				    u16 cmd_len, void *data)
4818 {
4819 	struct l2cap_create_chan_req *req = data;
4820 	struct l2cap_create_chan_rsp rsp;
4821 	struct l2cap_chan *chan;
4822 	struct hci_dev *hdev;
4823 	u16 psm, scid;
4824 
4825 	if (cmd_len != sizeof(*req))
4826 		return -EPROTO;
4827 
4828 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4829 		return -EINVAL;
4830 
4831 	psm = le16_to_cpu(req->psm);
4832 	scid = le16_to_cpu(req->scid);
4833 
4834 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4835 
4836 	/* For controller id 0 make BR/EDR connection */
4837 	if (req->amp_id == AMP_ID_BREDR) {
4838 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4839 			      req->amp_id);
4840 		return 0;
4841 	}
4842 
4843 	/* Validate AMP controller id */
4844 	hdev = hci_dev_get(req->amp_id);
4845 	if (!hdev)
4846 		goto error;
4847 
4848 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4849 		hci_dev_put(hdev);
4850 		goto error;
4851 	}
4852 
4853 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4854 			     req->amp_id);
4855 	if (chan) {
4856 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4857 		struct hci_conn *hs_hcon;
4858 
4859 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4860 						  &conn->hcon->dst);
4861 		if (!hs_hcon) {
4862 			hci_dev_put(hdev);
4863 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4864 					       chan->dcid);
4865 			return 0;
4866 		}
4867 
4868 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4869 
4870 		mgr->bredr_chan = chan;
4871 		chan->hs_hcon = hs_hcon;
4872 		chan->fcs = L2CAP_FCS_NONE;
4873 		conn->mtu = hdev->block_mtu;
4874 	}
4875 
4876 	hci_dev_put(hdev);
4877 
4878 	return 0;
4879 
4880 error:
4881 	rsp.dcid = 0;
4882 	rsp.scid = cpu_to_le16(scid);
4883 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4884 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4885 
4886 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4887 		       sizeof(rsp), &rsp);
4888 
4889 	return 0;
4890 }
4891 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4892 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4893 {
4894 	struct l2cap_move_chan_req req;
4895 	u8 ident;
4896 
4897 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4898 
4899 	ident = l2cap_get_ident(chan->conn);
4900 	chan->ident = ident;
4901 
4902 	req.icid = cpu_to_le16(chan->scid);
4903 	req.dest_amp_id = dest_amp_id;
4904 
4905 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4906 		       &req);
4907 
4908 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4909 }
4910 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4911 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4912 {
4913 	struct l2cap_move_chan_rsp rsp;
4914 
4915 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4916 
4917 	rsp.icid = cpu_to_le16(chan->dcid);
4918 	rsp.result = cpu_to_le16(result);
4919 
4920 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4921 		       sizeof(rsp), &rsp);
4922 }
4923 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4924 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4925 {
4926 	struct l2cap_move_chan_cfm cfm;
4927 
4928 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4929 
4930 	chan->ident = l2cap_get_ident(chan->conn);
4931 
4932 	cfm.icid = cpu_to_le16(chan->scid);
4933 	cfm.result = cpu_to_le16(result);
4934 
4935 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4936 		       sizeof(cfm), &cfm);
4937 
4938 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4939 }
4940 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4941 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4942 {
4943 	struct l2cap_move_chan_cfm cfm;
4944 
4945 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4946 
4947 	cfm.icid = cpu_to_le16(icid);
4948 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4949 
4950 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4951 		       sizeof(cfm), &cfm);
4952 }
4953 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4954 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4955 					 u16 icid)
4956 {
4957 	struct l2cap_move_chan_cfm_rsp rsp;
4958 
4959 	BT_DBG("icid 0x%4.4x", icid);
4960 
4961 	rsp.icid = cpu_to_le16(icid);
4962 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4963 }
4964 
__release_logical_link(struct l2cap_chan * chan)4965 static void __release_logical_link(struct l2cap_chan *chan)
4966 {
4967 	chan->hs_hchan = NULL;
4968 	chan->hs_hcon = NULL;
4969 
4970 	/* Placeholder - release the logical link */
4971 }
4972 
l2cap_logical_fail(struct l2cap_chan * chan)4973 static void l2cap_logical_fail(struct l2cap_chan *chan)
4974 {
4975 	/* Logical link setup failed */
4976 	if (chan->state != BT_CONNECTED) {
4977 		/* Create channel failure, disconnect */
4978 		l2cap_send_disconn_req(chan, ECONNRESET);
4979 		return;
4980 	}
4981 
4982 	switch (chan->move_role) {
4983 	case L2CAP_MOVE_ROLE_RESPONDER:
4984 		l2cap_move_done(chan);
4985 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4986 		break;
4987 	case L2CAP_MOVE_ROLE_INITIATOR:
4988 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4989 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4990 			/* Remote has only sent pending or
4991 			 * success responses, clean up
4992 			 */
4993 			l2cap_move_done(chan);
4994 		}
4995 
4996 		/* Other amp move states imply that the move
4997 		 * has already aborted
4998 		 */
4999 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5000 		break;
5001 	}
5002 }
5003 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5004 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5005 					struct hci_chan *hchan)
5006 {
5007 	struct l2cap_conf_rsp rsp;
5008 
5009 	chan->hs_hchan = hchan;
5010 	chan->hs_hcon->l2cap_data = chan->conn;
5011 
5012 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5013 
5014 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5015 		int err;
5016 
5017 		set_default_fcs(chan);
5018 
5019 		err = l2cap_ertm_init(chan);
5020 		if (err < 0)
5021 			l2cap_send_disconn_req(chan, -err);
5022 		else
5023 			l2cap_chan_ready(chan);
5024 	}
5025 }
5026 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5027 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5028 				      struct hci_chan *hchan)
5029 {
5030 	chan->hs_hcon = hchan->conn;
5031 	chan->hs_hcon->l2cap_data = chan->conn;
5032 
5033 	BT_DBG("move_state %d", chan->move_state);
5034 
5035 	switch (chan->move_state) {
5036 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5037 		/* Move confirm will be sent after a success
5038 		 * response is received
5039 		 */
5040 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5041 		break;
5042 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5043 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5044 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5045 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5046 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5047 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5048 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5049 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5050 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5051 		}
5052 		break;
5053 	default:
5054 		/* Move was not in expected state, free the channel */
5055 		__release_logical_link(chan);
5056 
5057 		chan->move_state = L2CAP_MOVE_STABLE;
5058 	}
5059 }
5060 
5061 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5062 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5063 		       u8 status)
5064 {
5065 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5066 
5067 	if (status) {
5068 		l2cap_logical_fail(chan);
5069 		__release_logical_link(chan);
5070 		return;
5071 	}
5072 
5073 	if (chan->state != BT_CONNECTED) {
5074 		/* Ignore logical link if channel is on BR/EDR */
5075 		if (chan->local_amp_id != AMP_ID_BREDR)
5076 			l2cap_logical_finish_create(chan, hchan);
5077 	} else {
5078 		l2cap_logical_finish_move(chan, hchan);
5079 	}
5080 }
5081 
l2cap_move_start(struct l2cap_chan * chan)5082 void l2cap_move_start(struct l2cap_chan *chan)
5083 {
5084 	BT_DBG("chan %p", chan);
5085 
5086 	if (chan->local_amp_id == AMP_ID_BREDR) {
5087 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5088 			return;
5089 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5090 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5091 		/* Placeholder - start physical link setup */
5092 	} else {
5093 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5094 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5095 		chan->move_id = 0;
5096 		l2cap_move_setup(chan);
5097 		l2cap_send_move_chan_req(chan, 0);
5098 	}
5099 }
5100 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5101 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5102 			    u8 local_amp_id, u8 remote_amp_id)
5103 {
5104 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5105 	       local_amp_id, remote_amp_id);
5106 
5107 	chan->fcs = L2CAP_FCS_NONE;
5108 
5109 	/* Outgoing channel on AMP */
5110 	if (chan->state == BT_CONNECT) {
5111 		if (result == L2CAP_CR_SUCCESS) {
5112 			chan->local_amp_id = local_amp_id;
5113 			l2cap_send_create_chan_req(chan, remote_amp_id);
5114 		} else {
5115 			/* Revert to BR/EDR connect */
5116 			l2cap_send_conn_req(chan);
5117 		}
5118 
5119 		return;
5120 	}
5121 
5122 	/* Incoming channel on AMP */
5123 	if (__l2cap_no_conn_pending(chan)) {
5124 		struct l2cap_conn_rsp rsp;
5125 		char buf[128];
5126 		rsp.scid = cpu_to_le16(chan->dcid);
5127 		rsp.dcid = cpu_to_le16(chan->scid);
5128 
5129 		if (result == L2CAP_CR_SUCCESS) {
5130 			/* Send successful response */
5131 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5132 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5133 		} else {
5134 			/* Send negative response */
5135 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5136 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5137 		}
5138 
5139 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5140 			       sizeof(rsp), &rsp);
5141 
5142 		if (result == L2CAP_CR_SUCCESS) {
5143 			l2cap_state_change(chan, BT_CONFIG);
5144 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5145 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5146 				       L2CAP_CONF_REQ,
5147 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5148 			chan->num_conf_req++;
5149 		}
5150 	}
5151 }
5152 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5153 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5154 				   u8 remote_amp_id)
5155 {
5156 	l2cap_move_setup(chan);
5157 	chan->move_id = local_amp_id;
5158 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5159 
5160 	l2cap_send_move_chan_req(chan, remote_amp_id);
5161 }
5162 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5163 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5164 {
5165 	struct hci_chan *hchan = NULL;
5166 
5167 	/* Placeholder - get hci_chan for logical link */
5168 
5169 	if (hchan) {
5170 		if (hchan->state == BT_CONNECTED) {
5171 			/* Logical link is ready to go */
5172 			chan->hs_hcon = hchan->conn;
5173 			chan->hs_hcon->l2cap_data = chan->conn;
5174 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5175 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5176 
5177 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5178 		} else {
5179 			/* Wait for logical link to be ready */
5180 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5181 		}
5182 	} else {
5183 		/* Logical link not available */
5184 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5185 	}
5186 }
5187 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5188 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5189 {
5190 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5191 		u8 rsp_result;
5192 		if (result == -EINVAL)
5193 			rsp_result = L2CAP_MR_BAD_ID;
5194 		else
5195 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5196 
5197 		l2cap_send_move_chan_rsp(chan, rsp_result);
5198 	}
5199 
5200 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5201 	chan->move_state = L2CAP_MOVE_STABLE;
5202 
5203 	/* Restart data transmission */
5204 	l2cap_ertm_send(chan);
5205 }
5206 
5207 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5208 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5209 {
5210 	u8 local_amp_id = chan->local_amp_id;
5211 	u8 remote_amp_id = chan->remote_amp_id;
5212 
5213 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5214 	       chan, result, local_amp_id, remote_amp_id);
5215 
5216 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5217 		return;
5218 
5219 	if (chan->state != BT_CONNECTED) {
5220 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5221 	} else if (result != L2CAP_MR_SUCCESS) {
5222 		l2cap_do_move_cancel(chan, result);
5223 	} else {
5224 		switch (chan->move_role) {
5225 		case L2CAP_MOVE_ROLE_INITIATOR:
5226 			l2cap_do_move_initiate(chan, local_amp_id,
5227 					       remote_amp_id);
5228 			break;
5229 		case L2CAP_MOVE_ROLE_RESPONDER:
5230 			l2cap_do_move_respond(chan, result);
5231 			break;
5232 		default:
5233 			l2cap_do_move_cancel(chan, result);
5234 			break;
5235 		}
5236 	}
5237 }
5238 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5239 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5240 					 struct l2cap_cmd_hdr *cmd,
5241 					 u16 cmd_len, void *data)
5242 {
5243 	struct l2cap_move_chan_req *req = data;
5244 	struct l2cap_move_chan_rsp rsp;
5245 	struct l2cap_chan *chan;
5246 	u16 icid = 0;
5247 	u16 result = L2CAP_MR_NOT_ALLOWED;
5248 
5249 	if (cmd_len != sizeof(*req))
5250 		return -EPROTO;
5251 
5252 	icid = le16_to_cpu(req->icid);
5253 
5254 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5255 
5256 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5257 		return -EINVAL;
5258 
5259 	chan = l2cap_get_chan_by_dcid(conn, icid);
5260 	if (!chan) {
5261 		rsp.icid = cpu_to_le16(icid);
5262 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5263 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5264 			       sizeof(rsp), &rsp);
5265 		return 0;
5266 	}
5267 
5268 	chan->ident = cmd->ident;
5269 
5270 	if (chan->scid < L2CAP_CID_DYN_START ||
5271 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5272 	    (chan->mode != L2CAP_MODE_ERTM &&
5273 	     chan->mode != L2CAP_MODE_STREAMING)) {
5274 		result = L2CAP_MR_NOT_ALLOWED;
5275 		goto send_move_response;
5276 	}
5277 
5278 	if (chan->local_amp_id == req->dest_amp_id) {
5279 		result = L2CAP_MR_SAME_ID;
5280 		goto send_move_response;
5281 	}
5282 
5283 	if (req->dest_amp_id != AMP_ID_BREDR) {
5284 		struct hci_dev *hdev;
5285 		hdev = hci_dev_get(req->dest_amp_id);
5286 		if (!hdev || hdev->dev_type != HCI_AMP ||
5287 		    !test_bit(HCI_UP, &hdev->flags)) {
5288 			if (hdev)
5289 				hci_dev_put(hdev);
5290 
5291 			result = L2CAP_MR_BAD_ID;
5292 			goto send_move_response;
5293 		}
5294 		hci_dev_put(hdev);
5295 	}
5296 
5297 	/* Detect a move collision.  Only send a collision response
5298 	 * if this side has "lost", otherwise proceed with the move.
5299 	 * The winner has the larger bd_addr.
5300 	 */
5301 	if ((__chan_is_moving(chan) ||
5302 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5303 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5304 		result = L2CAP_MR_COLLISION;
5305 		goto send_move_response;
5306 	}
5307 
5308 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5309 	l2cap_move_setup(chan);
5310 	chan->move_id = req->dest_amp_id;
5311 
5312 	if (req->dest_amp_id == AMP_ID_BREDR) {
5313 		/* Moving to BR/EDR */
5314 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5315 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5316 			result = L2CAP_MR_PEND;
5317 		} else {
5318 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5319 			result = L2CAP_MR_SUCCESS;
5320 		}
5321 	} else {
5322 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5323 		/* Placeholder - uncomment when amp functions are available */
5324 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5325 		result = L2CAP_MR_PEND;
5326 	}
5327 
5328 send_move_response:
5329 	l2cap_send_move_chan_rsp(chan, result);
5330 
5331 	l2cap_chan_unlock(chan);
5332 	l2cap_chan_put(chan);
5333 
5334 	return 0;
5335 }
5336 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5337 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5338 {
5339 	struct l2cap_chan *chan;
5340 	struct hci_chan *hchan = NULL;
5341 
5342 	chan = l2cap_get_chan_by_scid(conn, icid);
5343 	if (!chan) {
5344 		l2cap_send_move_chan_cfm_icid(conn, icid);
5345 		return;
5346 	}
5347 
5348 	__clear_chan_timer(chan);
5349 	if (result == L2CAP_MR_PEND)
5350 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5351 
5352 	switch (chan->move_state) {
5353 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5354 		/* Move confirm will be sent when logical link
5355 		 * is complete.
5356 		 */
5357 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5358 		break;
5359 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5360 		if (result == L2CAP_MR_PEND) {
5361 			break;
5362 		} else if (test_bit(CONN_LOCAL_BUSY,
5363 				    &chan->conn_state)) {
5364 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5365 		} else {
5366 			/* Logical link is up or moving to BR/EDR,
5367 			 * proceed with move
5368 			 */
5369 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5370 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5371 		}
5372 		break;
5373 	case L2CAP_MOVE_WAIT_RSP:
5374 		/* Moving to AMP */
5375 		if (result == L2CAP_MR_SUCCESS) {
5376 			/* Remote is ready, send confirm immediately
5377 			 * after logical link is ready
5378 			 */
5379 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5380 		} else {
5381 			/* Both logical link and move success
5382 			 * are required to confirm
5383 			 */
5384 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5385 		}
5386 
5387 		/* Placeholder - get hci_chan for logical link */
5388 		if (!hchan) {
5389 			/* Logical link not available */
5390 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5391 			break;
5392 		}
5393 
5394 		/* If the logical link is not yet connected, do not
5395 		 * send confirmation.
5396 		 */
5397 		if (hchan->state != BT_CONNECTED)
5398 			break;
5399 
5400 		/* Logical link is already ready to go */
5401 
5402 		chan->hs_hcon = hchan->conn;
5403 		chan->hs_hcon->l2cap_data = chan->conn;
5404 
5405 		if (result == L2CAP_MR_SUCCESS) {
5406 			/* Can confirm now */
5407 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5408 		} else {
5409 			/* Now only need move success
5410 			 * to confirm
5411 			 */
5412 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5413 		}
5414 
5415 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5416 		break;
5417 	default:
5418 		/* Any other amp move state means the move failed. */
5419 		chan->move_id = chan->local_amp_id;
5420 		l2cap_move_done(chan);
5421 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5422 	}
5423 
5424 	l2cap_chan_unlock(chan);
5425 	l2cap_chan_put(chan);
5426 }
5427 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5428 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5429 			    u16 result)
5430 {
5431 	struct l2cap_chan *chan;
5432 
5433 	chan = l2cap_get_chan_by_ident(conn, ident);
5434 	if (!chan) {
5435 		/* Could not locate channel, icid is best guess */
5436 		l2cap_send_move_chan_cfm_icid(conn, icid);
5437 		return;
5438 	}
5439 
5440 	__clear_chan_timer(chan);
5441 
5442 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5443 		if (result == L2CAP_MR_COLLISION) {
5444 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5445 		} else {
5446 			/* Cleanup - cancel move */
5447 			chan->move_id = chan->local_amp_id;
5448 			l2cap_move_done(chan);
5449 		}
5450 	}
5451 
5452 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5453 
5454 	l2cap_chan_unlock(chan);
5455 	l2cap_chan_put(chan);
5456 }
5457 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5458 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5459 				  struct l2cap_cmd_hdr *cmd,
5460 				  u16 cmd_len, void *data)
5461 {
5462 	struct l2cap_move_chan_rsp *rsp = data;
5463 	u16 icid, result;
5464 
5465 	if (cmd_len != sizeof(*rsp))
5466 		return -EPROTO;
5467 
5468 	icid = le16_to_cpu(rsp->icid);
5469 	result = le16_to_cpu(rsp->result);
5470 
5471 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5472 
5473 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5474 		l2cap_move_continue(conn, icid, result);
5475 	else
5476 		l2cap_move_fail(conn, cmd->ident, icid, result);
5477 
5478 	return 0;
5479 }
5480 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5481 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5482 				      struct l2cap_cmd_hdr *cmd,
5483 				      u16 cmd_len, void *data)
5484 {
5485 	struct l2cap_move_chan_cfm *cfm = data;
5486 	struct l2cap_chan *chan;
5487 	u16 icid, result;
5488 
5489 	if (cmd_len != sizeof(*cfm))
5490 		return -EPROTO;
5491 
5492 	icid = le16_to_cpu(cfm->icid);
5493 	result = le16_to_cpu(cfm->result);
5494 
5495 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5496 
5497 	chan = l2cap_get_chan_by_dcid(conn, icid);
5498 	if (!chan) {
5499 		/* Spec requires a response even if the icid was not found */
5500 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5501 		return 0;
5502 	}
5503 
5504 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5505 		if (result == L2CAP_MC_CONFIRMED) {
5506 			chan->local_amp_id = chan->move_id;
5507 			if (chan->local_amp_id == AMP_ID_BREDR)
5508 				__release_logical_link(chan);
5509 		} else {
5510 			chan->move_id = chan->local_amp_id;
5511 		}
5512 
5513 		l2cap_move_done(chan);
5514 	}
5515 
5516 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5517 
5518 	l2cap_chan_unlock(chan);
5519 	l2cap_chan_put(chan);
5520 
5521 	return 0;
5522 }
5523 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5524 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5525 						 struct l2cap_cmd_hdr *cmd,
5526 						 u16 cmd_len, void *data)
5527 {
5528 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5529 	struct l2cap_chan *chan;
5530 	u16 icid;
5531 
5532 	if (cmd_len != sizeof(*rsp))
5533 		return -EPROTO;
5534 
5535 	icid = le16_to_cpu(rsp->icid);
5536 
5537 	BT_DBG("icid 0x%4.4x", icid);
5538 
5539 	chan = l2cap_get_chan_by_scid(conn, icid);
5540 	if (!chan)
5541 		return 0;
5542 
5543 	__clear_chan_timer(chan);
5544 
5545 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5546 		chan->local_amp_id = chan->move_id;
5547 
5548 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5549 			__release_logical_link(chan);
5550 
5551 		l2cap_move_done(chan);
5552 	}
5553 
5554 	l2cap_chan_unlock(chan);
5555 	l2cap_chan_put(chan);
5556 
5557 	return 0;
5558 }
5559 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5560 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5561 					      struct l2cap_cmd_hdr *cmd,
5562 					      u16 cmd_len, u8 *data)
5563 {
5564 	struct hci_conn *hcon = conn->hcon;
5565 	struct l2cap_conn_param_update_req *req;
5566 	struct l2cap_conn_param_update_rsp rsp;
5567 	u16 min, max, latency, to_multiplier;
5568 	int err;
5569 
5570 	if (hcon->role != HCI_ROLE_MASTER)
5571 		return -EINVAL;
5572 
5573 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5574 		return -EPROTO;
5575 
5576 	req = (struct l2cap_conn_param_update_req *) data;
5577 	min		= __le16_to_cpu(req->min);
5578 	max		= __le16_to_cpu(req->max);
5579 	latency		= __le16_to_cpu(req->latency);
5580 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5581 
5582 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5583 	       min, max, latency, to_multiplier);
5584 
5585 	memset(&rsp, 0, sizeof(rsp));
5586 
5587 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5588 	if (err)
5589 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5590 	else
5591 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5592 
5593 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5594 		       sizeof(rsp), &rsp);
5595 
5596 	if (!err) {
5597 		u8 store_hint;
5598 
5599 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5600 						to_multiplier);
5601 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5602 				    store_hint, min, max, latency,
5603 				    to_multiplier);
5604 
5605 	}
5606 
5607 	return 0;
5608 }
5609 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5610 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5611 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5612 				u8 *data)
5613 {
5614 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5615 	struct hci_conn *hcon = conn->hcon;
5616 	u16 dcid, mtu, mps, credits, result;
5617 	struct l2cap_chan *chan;
5618 	int err, sec_level;
5619 
5620 	if (cmd_len < sizeof(*rsp))
5621 		return -EPROTO;
5622 
5623 	dcid    = __le16_to_cpu(rsp->dcid);
5624 	mtu     = __le16_to_cpu(rsp->mtu);
5625 	mps     = __le16_to_cpu(rsp->mps);
5626 	credits = __le16_to_cpu(rsp->credits);
5627 	result  = __le16_to_cpu(rsp->result);
5628 
5629 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5630 					   dcid < L2CAP_CID_DYN_START ||
5631 					   dcid > L2CAP_CID_LE_DYN_END))
5632 		return -EPROTO;
5633 
5634 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5635 	       dcid, mtu, mps, credits, result);
5636 
5637 	mutex_lock(&conn->chan_lock);
5638 
5639 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5640 	if (!chan) {
5641 		err = -EBADSLT;
5642 		goto unlock;
5643 	}
5644 
5645 	err = 0;
5646 
5647 	l2cap_chan_lock(chan);
5648 
5649 	switch (result) {
5650 	case L2CAP_CR_LE_SUCCESS:
5651 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5652 			err = -EBADSLT;
5653 			break;
5654 		}
5655 
5656 		chan->ident = 0;
5657 		chan->dcid = dcid;
5658 		chan->omtu = mtu;
5659 		chan->remote_mps = mps;
5660 		chan->tx_credits = credits;
5661 		l2cap_chan_ready(chan);
5662 		break;
5663 
5664 	case L2CAP_CR_LE_AUTHENTICATION:
5665 	case L2CAP_CR_LE_ENCRYPTION:
5666 		/* If we already have MITM protection we can't do
5667 		 * anything.
5668 		 */
5669 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5670 			l2cap_chan_del(chan, ECONNREFUSED);
5671 			break;
5672 		}
5673 
5674 		sec_level = hcon->sec_level + 1;
5675 		if (chan->sec_level < sec_level)
5676 			chan->sec_level = sec_level;
5677 
5678 		/* We'll need to send a new Connect Request */
5679 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5680 
5681 		smp_conn_security(hcon, chan->sec_level);
5682 		break;
5683 
5684 	default:
5685 		l2cap_chan_del(chan, ECONNREFUSED);
5686 		break;
5687 	}
5688 
5689 	l2cap_chan_unlock(chan);
5690 
5691 unlock:
5692 	mutex_unlock(&conn->chan_lock);
5693 
5694 	return err;
5695 }
5696 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5697 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5698 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5699 				      u8 *data)
5700 {
5701 	int err = 0;
5702 
5703 	switch (cmd->code) {
5704 	case L2CAP_COMMAND_REJ:
5705 		l2cap_command_rej(conn, cmd, cmd_len, data);
5706 		break;
5707 
5708 	case L2CAP_CONN_REQ:
5709 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5710 		break;
5711 
5712 	case L2CAP_CONN_RSP:
5713 	case L2CAP_CREATE_CHAN_RSP:
5714 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5715 		break;
5716 
5717 	case L2CAP_CONF_REQ:
5718 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5719 		break;
5720 
5721 	case L2CAP_CONF_RSP:
5722 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5723 		break;
5724 
5725 	case L2CAP_DISCONN_REQ:
5726 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5727 		break;
5728 
5729 	case L2CAP_DISCONN_RSP:
5730 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_ECHO_REQ:
5734 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_ECHO_RSP:
5738 		break;
5739 
5740 	case L2CAP_INFO_REQ:
5741 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5742 		break;
5743 
5744 	case L2CAP_INFO_RSP:
5745 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5746 		break;
5747 
5748 	case L2CAP_CREATE_CHAN_REQ:
5749 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5750 		break;
5751 
5752 	case L2CAP_MOVE_CHAN_REQ:
5753 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_MOVE_CHAN_RSP:
5757 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_MOVE_CHAN_CFM:
5761 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_MOVE_CHAN_CFM_RSP:
5765 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	default:
5769 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5770 		err = -EINVAL;
5771 		break;
5772 	}
5773 
5774 	return err;
5775 }
5776 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5777 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5778 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5779 				u8 *data)
5780 {
5781 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5782 	struct l2cap_le_conn_rsp rsp;
5783 	struct l2cap_chan *chan, *pchan;
5784 	u16 dcid, scid, credits, mtu, mps;
5785 	__le16 psm;
5786 	u8 result;
5787 
5788 	if (cmd_len != sizeof(*req))
5789 		return -EPROTO;
5790 
5791 	scid = __le16_to_cpu(req->scid);
5792 	mtu  = __le16_to_cpu(req->mtu);
5793 	mps  = __le16_to_cpu(req->mps);
5794 	psm  = req->psm;
5795 	dcid = 0;
5796 	credits = 0;
5797 
5798 	if (mtu < 23 || mps < 23)
5799 		return -EPROTO;
5800 
5801 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5802 	       scid, mtu, mps);
5803 
5804 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5805 	 * page 1059:
5806 	 *
5807 	 * Valid range: 0x0001-0x00ff
5808 	 *
5809 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5810 	 */
5811 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5812 		result = L2CAP_CR_LE_BAD_PSM;
5813 		chan = NULL;
5814 		goto response;
5815 	}
5816 
5817 	/* Check if we have socket listening on psm */
5818 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5819 					 &conn->hcon->dst, LE_LINK);
5820 	if (!pchan) {
5821 		result = L2CAP_CR_LE_BAD_PSM;
5822 		chan = NULL;
5823 		goto response;
5824 	}
5825 
5826 	mutex_lock(&conn->chan_lock);
5827 	l2cap_chan_lock(pchan);
5828 
5829 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5830 				     SMP_ALLOW_STK)) {
5831 		result = L2CAP_CR_LE_AUTHENTICATION;
5832 		chan = NULL;
5833 		goto response_unlock;
5834 	}
5835 
5836 	/* Check for valid dynamic CID range */
5837 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5838 		result = L2CAP_CR_LE_INVALID_SCID;
5839 		chan = NULL;
5840 		goto response_unlock;
5841 	}
5842 
5843 	/* Check if we already have channel with that dcid */
5844 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5845 		result = L2CAP_CR_LE_SCID_IN_USE;
5846 		chan = NULL;
5847 		goto response_unlock;
5848 	}
5849 
5850 	chan = pchan->ops->new_connection(pchan);
5851 	if (!chan) {
5852 		result = L2CAP_CR_LE_NO_MEM;
5853 		goto response_unlock;
5854 	}
5855 
5856 	bacpy(&chan->src, &conn->hcon->src);
5857 	bacpy(&chan->dst, &conn->hcon->dst);
5858 	chan->src_type = bdaddr_src_type(conn->hcon);
5859 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5860 	chan->psm  = psm;
5861 	chan->dcid = scid;
5862 	chan->omtu = mtu;
5863 	chan->remote_mps = mps;
5864 
5865 	__l2cap_chan_add(conn, chan);
5866 
5867 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5868 
5869 	dcid = chan->scid;
5870 	credits = chan->rx_credits;
5871 
5872 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5873 
5874 	chan->ident = cmd->ident;
5875 
5876 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5877 		l2cap_state_change(chan, BT_CONNECT2);
5878 		/* The following result value is actually not defined
5879 		 * for LE CoC but we use it to let the function know
5880 		 * that it should bail out after doing its cleanup
5881 		 * instead of sending a response.
5882 		 */
5883 		result = L2CAP_CR_PEND;
5884 		chan->ops->defer(chan);
5885 	} else {
5886 		l2cap_chan_ready(chan);
5887 		result = L2CAP_CR_LE_SUCCESS;
5888 	}
5889 
5890 response_unlock:
5891 	l2cap_chan_unlock(pchan);
5892 	mutex_unlock(&conn->chan_lock);
5893 	l2cap_chan_put(pchan);
5894 
5895 	if (result == L2CAP_CR_PEND)
5896 		return 0;
5897 
5898 response:
5899 	if (chan) {
5900 		rsp.mtu = cpu_to_le16(chan->imtu);
5901 		rsp.mps = cpu_to_le16(chan->mps);
5902 	} else {
5903 		rsp.mtu = 0;
5904 		rsp.mps = 0;
5905 	}
5906 
5907 	rsp.dcid    = cpu_to_le16(dcid);
5908 	rsp.credits = cpu_to_le16(credits);
5909 	rsp.result  = cpu_to_le16(result);
5910 
5911 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5912 
5913 	return 0;
5914 }
5915 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5916 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5917 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5918 				   u8 *data)
5919 {
5920 	struct l2cap_le_credits *pkt;
5921 	struct l2cap_chan *chan;
5922 	u16 cid, credits, max_credits;
5923 
5924 	if (cmd_len != sizeof(*pkt))
5925 		return -EPROTO;
5926 
5927 	pkt = (struct l2cap_le_credits *) data;
5928 	cid	= __le16_to_cpu(pkt->cid);
5929 	credits	= __le16_to_cpu(pkt->credits);
5930 
5931 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5932 
5933 	chan = l2cap_get_chan_by_dcid(conn, cid);
5934 	if (!chan)
5935 		return -EBADSLT;
5936 
5937 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5938 	if (credits > max_credits) {
5939 		BT_ERR("LE credits overflow");
5940 		l2cap_send_disconn_req(chan, ECONNRESET);
5941 
5942 		/* Return 0 so that we don't trigger an unnecessary
5943 		 * command reject packet.
5944 		 */
5945 		goto unlock;
5946 	}
5947 
5948 	chan->tx_credits += credits;
5949 
5950 	/* Resume sending */
5951 	l2cap_le_flowctl_send(chan);
5952 
5953 	if (chan->tx_credits)
5954 		chan->ops->resume(chan);
5955 
5956 unlock:
5957 	l2cap_chan_unlock(chan);
5958 	l2cap_chan_put(chan);
5959 
5960 	return 0;
5961 }
5962 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5963 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5964 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5965 				       u8 *data)
5966 {
5967 	struct l2cap_ecred_conn_req *req = (void *) data;
5968 	struct {
5969 		struct l2cap_ecred_conn_rsp rsp;
5970 		__le16 dcid[5];
5971 	} __packed pdu;
5972 	struct l2cap_chan *chan, *pchan;
5973 	u16 mtu, mps;
5974 	__le16 psm;
5975 	u8 result, len = 0;
5976 	int i, num_scid;
5977 	bool defer = false;
5978 
5979 	if (!enable_ecred)
5980 		return -EINVAL;
5981 
5982 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5983 		result = L2CAP_CR_LE_INVALID_PARAMS;
5984 		goto response;
5985 	}
5986 
5987 	mtu  = __le16_to_cpu(req->mtu);
5988 	mps  = __le16_to_cpu(req->mps);
5989 
5990 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5991 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5992 		goto response;
5993 	}
5994 
5995 	psm  = req->psm;
5996 
5997 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5998 	 * page 1059:
5999 	 *
6000 	 * Valid range: 0x0001-0x00ff
6001 	 *
6002 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6003 	 */
6004 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6005 		result = L2CAP_CR_LE_BAD_PSM;
6006 		goto response;
6007 	}
6008 
6009 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6010 
6011 	memset(&pdu, 0, sizeof(pdu));
6012 
6013 	/* Check if we have socket listening on psm */
6014 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6015 					 &conn->hcon->dst, LE_LINK);
6016 	if (!pchan) {
6017 		result = L2CAP_CR_LE_BAD_PSM;
6018 		goto response;
6019 	}
6020 
6021 	mutex_lock(&conn->chan_lock);
6022 	l2cap_chan_lock(pchan);
6023 
6024 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6025 				     SMP_ALLOW_STK)) {
6026 		result = L2CAP_CR_LE_AUTHENTICATION;
6027 		goto unlock;
6028 	}
6029 
6030 	result = L2CAP_CR_LE_SUCCESS;
6031 	cmd_len -= sizeof(*req);
6032 	num_scid = cmd_len / sizeof(u16);
6033 
6034 	for (i = 0; i < num_scid; i++) {
6035 		u16 scid = __le16_to_cpu(req->scid[i]);
6036 
6037 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6038 
6039 		pdu.dcid[i] = 0x0000;
6040 		len += sizeof(*pdu.dcid);
6041 
6042 		/* Check for valid dynamic CID range */
6043 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6044 			result = L2CAP_CR_LE_INVALID_SCID;
6045 			continue;
6046 		}
6047 
6048 		/* Check if we already have channel with that dcid */
6049 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6050 			result = L2CAP_CR_LE_SCID_IN_USE;
6051 			continue;
6052 		}
6053 
6054 		chan = pchan->ops->new_connection(pchan);
6055 		if (!chan) {
6056 			result = L2CAP_CR_LE_NO_MEM;
6057 			continue;
6058 		}
6059 
6060 		bacpy(&chan->src, &conn->hcon->src);
6061 		bacpy(&chan->dst, &conn->hcon->dst);
6062 		chan->src_type = bdaddr_src_type(conn->hcon);
6063 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6064 		chan->psm  = psm;
6065 		chan->dcid = scid;
6066 		chan->omtu = mtu;
6067 		chan->remote_mps = mps;
6068 
6069 		__l2cap_chan_add(conn, chan);
6070 
6071 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6072 
6073 		/* Init response */
6074 		if (!pdu.rsp.credits) {
6075 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6076 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6077 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6078 		}
6079 
6080 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6081 
6082 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6083 
6084 		chan->ident = cmd->ident;
6085 
6086 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6087 			l2cap_state_change(chan, BT_CONNECT2);
6088 			defer = true;
6089 			chan->ops->defer(chan);
6090 		} else {
6091 			l2cap_chan_ready(chan);
6092 		}
6093 	}
6094 
6095 unlock:
6096 	l2cap_chan_unlock(pchan);
6097 	mutex_unlock(&conn->chan_lock);
6098 	l2cap_chan_put(pchan);
6099 
6100 response:
6101 	pdu.rsp.result = cpu_to_le16(result);
6102 
6103 	if (defer)
6104 		return 0;
6105 
6106 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6107 		       sizeof(pdu.rsp) + len, &pdu);
6108 
6109 	return 0;
6110 }
6111 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6112 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6113 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6114 				       u8 *data)
6115 {
6116 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6117 	struct hci_conn *hcon = conn->hcon;
6118 	u16 mtu, mps, credits, result;
6119 	struct l2cap_chan *chan, *tmp;
6120 	int err = 0, sec_level;
6121 	int i = 0;
6122 
6123 	if (cmd_len < sizeof(*rsp))
6124 		return -EPROTO;
6125 
6126 	mtu     = __le16_to_cpu(rsp->mtu);
6127 	mps     = __le16_to_cpu(rsp->mps);
6128 	credits = __le16_to_cpu(rsp->credits);
6129 	result  = __le16_to_cpu(rsp->result);
6130 
6131 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6132 	       result);
6133 
6134 	mutex_lock(&conn->chan_lock);
6135 
6136 	cmd_len -= sizeof(*rsp);
6137 
6138 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6139 		u16 dcid;
6140 
6141 		if (chan->ident != cmd->ident ||
6142 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6143 		    chan->state == BT_CONNECTED)
6144 			continue;
6145 
6146 		l2cap_chan_lock(chan);
6147 
6148 		/* Check that there is a dcid for each pending channel */
6149 		if (cmd_len < sizeof(dcid)) {
6150 			l2cap_chan_del(chan, ECONNREFUSED);
6151 			l2cap_chan_unlock(chan);
6152 			continue;
6153 		}
6154 
6155 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6156 		cmd_len -= sizeof(u16);
6157 
6158 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6159 
6160 		/* Check if dcid is already in use */
6161 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6162 			/* If a device receives a
6163 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6164 			 * already-assigned Destination CID, then both the
6165 			 * original channel and the new channel shall be
6166 			 * immediately discarded and not used.
6167 			 */
6168 			l2cap_chan_del(chan, ECONNREFUSED);
6169 			l2cap_chan_unlock(chan);
6170 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6171 			l2cap_chan_lock(chan);
6172 			l2cap_chan_del(chan, ECONNRESET);
6173 			l2cap_chan_unlock(chan);
6174 			continue;
6175 		}
6176 
6177 		switch (result) {
6178 		case L2CAP_CR_LE_AUTHENTICATION:
6179 		case L2CAP_CR_LE_ENCRYPTION:
6180 			/* If we already have MITM protection we can't do
6181 			 * anything.
6182 			 */
6183 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6184 				l2cap_chan_del(chan, ECONNREFUSED);
6185 				break;
6186 			}
6187 
6188 			sec_level = hcon->sec_level + 1;
6189 			if (chan->sec_level < sec_level)
6190 				chan->sec_level = sec_level;
6191 
6192 			/* We'll need to send a new Connect Request */
6193 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6194 
6195 			smp_conn_security(hcon, chan->sec_level);
6196 			break;
6197 
6198 		case L2CAP_CR_LE_BAD_PSM:
6199 			l2cap_chan_del(chan, ECONNREFUSED);
6200 			break;
6201 
6202 		default:
6203 			/* If dcid was not set it means channels was refused */
6204 			if (!dcid) {
6205 				l2cap_chan_del(chan, ECONNREFUSED);
6206 				break;
6207 			}
6208 
6209 			chan->ident = 0;
6210 			chan->dcid = dcid;
6211 			chan->omtu = mtu;
6212 			chan->remote_mps = mps;
6213 			chan->tx_credits = credits;
6214 			l2cap_chan_ready(chan);
6215 			break;
6216 		}
6217 
6218 		l2cap_chan_unlock(chan);
6219 	}
6220 
6221 	mutex_unlock(&conn->chan_lock);
6222 
6223 	return err;
6224 }
6225 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6226 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6227 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6228 					 u8 *data)
6229 {
6230 	struct l2cap_ecred_reconf_req *req = (void *) data;
6231 	struct l2cap_ecred_reconf_rsp rsp;
6232 	u16 mtu, mps, result;
6233 	struct l2cap_chan *chan;
6234 	int i, num_scid;
6235 
6236 	if (!enable_ecred)
6237 		return -EINVAL;
6238 
6239 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6240 		result = L2CAP_CR_LE_INVALID_PARAMS;
6241 		goto respond;
6242 	}
6243 
6244 	mtu = __le16_to_cpu(req->mtu);
6245 	mps = __le16_to_cpu(req->mps);
6246 
6247 	BT_DBG("mtu %u mps %u", mtu, mps);
6248 
6249 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6250 		result = L2CAP_RECONF_INVALID_MTU;
6251 		goto respond;
6252 	}
6253 
6254 	if (mps < L2CAP_ECRED_MIN_MPS) {
6255 		result = L2CAP_RECONF_INVALID_MPS;
6256 		goto respond;
6257 	}
6258 
6259 	cmd_len -= sizeof(*req);
6260 	num_scid = cmd_len / sizeof(u16);
6261 	result = L2CAP_RECONF_SUCCESS;
6262 
6263 	for (i = 0; i < num_scid; i++) {
6264 		u16 scid;
6265 
6266 		scid = __le16_to_cpu(req->scid[i]);
6267 		if (!scid)
6268 			return -EPROTO;
6269 
6270 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6271 		if (!chan)
6272 			continue;
6273 
6274 		/* If the MTU value is decreased for any of the included
6275 		 * channels, then the receiver shall disconnect all
6276 		 * included channels.
6277 		 */
6278 		if (chan->omtu > mtu) {
6279 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6280 			       chan->omtu, mtu);
6281 			result = L2CAP_RECONF_INVALID_MTU;
6282 		}
6283 
6284 		chan->omtu = mtu;
6285 		chan->remote_mps = mps;
6286 	}
6287 
6288 respond:
6289 	rsp.result = cpu_to_le16(result);
6290 
6291 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6292 		       &rsp);
6293 
6294 	return 0;
6295 }
6296 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6297 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6298 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6299 					 u8 *data)
6300 {
6301 	struct l2cap_chan *chan, *tmp;
6302 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6303 	u16 result;
6304 
6305 	if (cmd_len < sizeof(*rsp))
6306 		return -EPROTO;
6307 
6308 	result = __le16_to_cpu(rsp->result);
6309 
6310 	BT_DBG("result 0x%4.4x", rsp->result);
6311 
6312 	if (!result)
6313 		return 0;
6314 
6315 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6316 		if (chan->ident != cmd->ident)
6317 			continue;
6318 
6319 		l2cap_chan_del(chan, ECONNRESET);
6320 	}
6321 
6322 	return 0;
6323 }
6324 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6325 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6326 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6327 				       u8 *data)
6328 {
6329 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6330 	struct l2cap_chan *chan;
6331 
6332 	if (cmd_len < sizeof(*rej))
6333 		return -EPROTO;
6334 
6335 	mutex_lock(&conn->chan_lock);
6336 
6337 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6338 	if (!chan)
6339 		goto done;
6340 
6341 	l2cap_chan_lock(chan);
6342 	l2cap_chan_del(chan, ECONNREFUSED);
6343 	l2cap_chan_unlock(chan);
6344 
6345 done:
6346 	mutex_unlock(&conn->chan_lock);
6347 	return 0;
6348 }
6349 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6350 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6351 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6352 				   u8 *data)
6353 {
6354 	int err = 0;
6355 
6356 	switch (cmd->code) {
6357 	case L2CAP_COMMAND_REJ:
6358 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6359 		break;
6360 
6361 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6362 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6363 		break;
6364 
6365 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6366 		break;
6367 
6368 	case L2CAP_LE_CONN_RSP:
6369 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6370 		break;
6371 
6372 	case L2CAP_LE_CONN_REQ:
6373 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6374 		break;
6375 
6376 	case L2CAP_LE_CREDITS:
6377 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6378 		break;
6379 
6380 	case L2CAP_ECRED_CONN_REQ:
6381 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6382 		break;
6383 
6384 	case L2CAP_ECRED_CONN_RSP:
6385 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6386 		break;
6387 
6388 	case L2CAP_ECRED_RECONF_REQ:
6389 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6390 		break;
6391 
6392 	case L2CAP_ECRED_RECONF_RSP:
6393 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6394 		break;
6395 
6396 	case L2CAP_DISCONN_REQ:
6397 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6398 		break;
6399 
6400 	case L2CAP_DISCONN_RSP:
6401 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6402 		break;
6403 
6404 	default:
6405 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6406 		err = -EINVAL;
6407 		break;
6408 	}
6409 
6410 	return err;
6411 }
6412 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6413 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6414 					struct sk_buff *skb)
6415 {
6416 	struct hci_conn *hcon = conn->hcon;
6417 	struct l2cap_cmd_hdr *cmd;
6418 	u16 len;
6419 	int err;
6420 
6421 	if (hcon->type != LE_LINK)
6422 		goto drop;
6423 
6424 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6425 		goto drop;
6426 
6427 	cmd = (void *) skb->data;
6428 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6429 
6430 	len = le16_to_cpu(cmd->len);
6431 
6432 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6433 
6434 	if (len != skb->len || !cmd->ident) {
6435 		BT_DBG("corrupted command");
6436 		goto drop;
6437 	}
6438 
6439 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6440 	if (err) {
6441 		struct l2cap_cmd_rej_unk rej;
6442 
6443 		BT_ERR("Wrong link type (%d)", err);
6444 
6445 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6446 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6447 			       sizeof(rej), &rej);
6448 	}
6449 
6450 drop:
6451 	kfree_skb(skb);
6452 }
6453 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6454 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6455 				     struct sk_buff *skb)
6456 {
6457 	struct hci_conn *hcon = conn->hcon;
6458 	struct l2cap_cmd_hdr *cmd;
6459 	int err;
6460 
6461 	l2cap_raw_recv(conn, skb);
6462 
6463 	if (hcon->type != ACL_LINK)
6464 		goto drop;
6465 
6466 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6467 		u16 len;
6468 
6469 		cmd = (void *) skb->data;
6470 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6471 
6472 		len = le16_to_cpu(cmd->len);
6473 
6474 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6475 		       cmd->ident);
6476 
6477 		if (len > skb->len || !cmd->ident) {
6478 			BT_DBG("corrupted command");
6479 			break;
6480 		}
6481 
6482 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6483 		if (err) {
6484 			struct l2cap_cmd_rej_unk rej;
6485 
6486 			BT_ERR("Wrong link type (%d)", err);
6487 
6488 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6489 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6490 				       sizeof(rej), &rej);
6491 		}
6492 
6493 		skb_pull(skb, len);
6494 	}
6495 
6496 drop:
6497 	kfree_skb(skb);
6498 }
6499 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6500 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6501 {
6502 	u16 our_fcs, rcv_fcs;
6503 	int hdr_size;
6504 
6505 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6506 		hdr_size = L2CAP_EXT_HDR_SIZE;
6507 	else
6508 		hdr_size = L2CAP_ENH_HDR_SIZE;
6509 
6510 	if (chan->fcs == L2CAP_FCS_CRC16) {
6511 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6512 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6513 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6514 
6515 		if (our_fcs != rcv_fcs)
6516 			return -EBADMSG;
6517 	}
6518 	return 0;
6519 }
6520 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6521 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6522 {
6523 	struct l2cap_ctrl control;
6524 
6525 	BT_DBG("chan %p", chan);
6526 
6527 	memset(&control, 0, sizeof(control));
6528 	control.sframe = 1;
6529 	control.final = 1;
6530 	control.reqseq = chan->buffer_seq;
6531 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6532 
6533 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6534 		control.super = L2CAP_SUPER_RNR;
6535 		l2cap_send_sframe(chan, &control);
6536 	}
6537 
6538 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6539 	    chan->unacked_frames > 0)
6540 		__set_retrans_timer(chan);
6541 
6542 	/* Send pending iframes */
6543 	l2cap_ertm_send(chan);
6544 
6545 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6546 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6547 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6548 		 * send it now.
6549 		 */
6550 		control.super = L2CAP_SUPER_RR;
6551 		l2cap_send_sframe(chan, &control);
6552 	}
6553 }
6554 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6555 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6556 			    struct sk_buff **last_frag)
6557 {
6558 	/* skb->len reflects data in skb as well as all fragments
6559 	 * skb->data_len reflects only data in fragments
6560 	 */
6561 	if (!skb_has_frag_list(skb))
6562 		skb_shinfo(skb)->frag_list = new_frag;
6563 
6564 	new_frag->next = NULL;
6565 
6566 	(*last_frag)->next = new_frag;
6567 	*last_frag = new_frag;
6568 
6569 	skb->len += new_frag->len;
6570 	skb->data_len += new_frag->len;
6571 	skb->truesize += new_frag->truesize;
6572 }
6573 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6574 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6575 				struct l2cap_ctrl *control)
6576 {
6577 	int err = -EINVAL;
6578 
6579 	switch (control->sar) {
6580 	case L2CAP_SAR_UNSEGMENTED:
6581 		if (chan->sdu)
6582 			break;
6583 
6584 		err = chan->ops->recv(chan, skb);
6585 		break;
6586 
6587 	case L2CAP_SAR_START:
6588 		if (chan->sdu)
6589 			break;
6590 
6591 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6592 			break;
6593 
6594 		chan->sdu_len = get_unaligned_le16(skb->data);
6595 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6596 
6597 		if (chan->sdu_len > chan->imtu) {
6598 			err = -EMSGSIZE;
6599 			break;
6600 		}
6601 
6602 		if (skb->len >= chan->sdu_len)
6603 			break;
6604 
6605 		chan->sdu = skb;
6606 		chan->sdu_last_frag = skb;
6607 
6608 		skb = NULL;
6609 		err = 0;
6610 		break;
6611 
6612 	case L2CAP_SAR_CONTINUE:
6613 		if (!chan->sdu)
6614 			break;
6615 
6616 		append_skb_frag(chan->sdu, skb,
6617 				&chan->sdu_last_frag);
6618 		skb = NULL;
6619 
6620 		if (chan->sdu->len >= chan->sdu_len)
6621 			break;
6622 
6623 		err = 0;
6624 		break;
6625 
6626 	case L2CAP_SAR_END:
6627 		if (!chan->sdu)
6628 			break;
6629 
6630 		append_skb_frag(chan->sdu, skb,
6631 				&chan->sdu_last_frag);
6632 		skb = NULL;
6633 
6634 		if (chan->sdu->len != chan->sdu_len)
6635 			break;
6636 
6637 		err = chan->ops->recv(chan, chan->sdu);
6638 
6639 		if (!err) {
6640 			/* Reassembly complete */
6641 			chan->sdu = NULL;
6642 			chan->sdu_last_frag = NULL;
6643 			chan->sdu_len = 0;
6644 		}
6645 		break;
6646 	}
6647 
6648 	if (err) {
6649 		kfree_skb(skb);
6650 		kfree_skb(chan->sdu);
6651 		chan->sdu = NULL;
6652 		chan->sdu_last_frag = NULL;
6653 		chan->sdu_len = 0;
6654 	}
6655 
6656 	return err;
6657 }
6658 
l2cap_resegment(struct l2cap_chan * chan)6659 static int l2cap_resegment(struct l2cap_chan *chan)
6660 {
6661 	/* Placeholder */
6662 	return 0;
6663 }
6664 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6665 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6666 {
6667 	u8 event;
6668 
6669 	if (chan->mode != L2CAP_MODE_ERTM)
6670 		return;
6671 
6672 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6673 	l2cap_tx(chan, NULL, NULL, event);
6674 }
6675 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6676 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6677 {
6678 	int err = 0;
6679 	/* Pass sequential frames to l2cap_reassemble_sdu()
6680 	 * until a gap is encountered.
6681 	 */
6682 
6683 	BT_DBG("chan %p", chan);
6684 
6685 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6686 		struct sk_buff *skb;
6687 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6688 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6689 
6690 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6691 
6692 		if (!skb)
6693 			break;
6694 
6695 		skb_unlink(skb, &chan->srej_q);
6696 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6697 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6698 		if (err)
6699 			break;
6700 	}
6701 
6702 	if (skb_queue_empty(&chan->srej_q)) {
6703 		chan->rx_state = L2CAP_RX_STATE_RECV;
6704 		l2cap_send_ack(chan);
6705 	}
6706 
6707 	return err;
6708 }
6709 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6710 static void l2cap_handle_srej(struct l2cap_chan *chan,
6711 			      struct l2cap_ctrl *control)
6712 {
6713 	struct sk_buff *skb;
6714 
6715 	BT_DBG("chan %p, control %p", chan, control);
6716 
6717 	if (control->reqseq == chan->next_tx_seq) {
6718 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6719 		l2cap_send_disconn_req(chan, ECONNRESET);
6720 		return;
6721 	}
6722 
6723 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6724 
6725 	if (skb == NULL) {
6726 		BT_DBG("Seq %d not available for retransmission",
6727 		       control->reqseq);
6728 		return;
6729 	}
6730 
6731 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6732 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6733 		l2cap_send_disconn_req(chan, ECONNRESET);
6734 		return;
6735 	}
6736 
6737 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6738 
6739 	if (control->poll) {
6740 		l2cap_pass_to_tx(chan, control);
6741 
6742 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6743 		l2cap_retransmit(chan, control);
6744 		l2cap_ertm_send(chan);
6745 
6746 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6747 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6748 			chan->srej_save_reqseq = control->reqseq;
6749 		}
6750 	} else {
6751 		l2cap_pass_to_tx_fbit(chan, control);
6752 
6753 		if (control->final) {
6754 			if (chan->srej_save_reqseq != control->reqseq ||
6755 			    !test_and_clear_bit(CONN_SREJ_ACT,
6756 						&chan->conn_state))
6757 				l2cap_retransmit(chan, control);
6758 		} else {
6759 			l2cap_retransmit(chan, control);
6760 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6761 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6762 				chan->srej_save_reqseq = control->reqseq;
6763 			}
6764 		}
6765 	}
6766 }
6767 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6768 static void l2cap_handle_rej(struct l2cap_chan *chan,
6769 			     struct l2cap_ctrl *control)
6770 {
6771 	struct sk_buff *skb;
6772 
6773 	BT_DBG("chan %p, control %p", chan, control);
6774 
6775 	if (control->reqseq == chan->next_tx_seq) {
6776 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6777 		l2cap_send_disconn_req(chan, ECONNRESET);
6778 		return;
6779 	}
6780 
6781 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6782 
6783 	if (chan->max_tx && skb &&
6784 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6785 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6786 		l2cap_send_disconn_req(chan, ECONNRESET);
6787 		return;
6788 	}
6789 
6790 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6791 
6792 	l2cap_pass_to_tx(chan, control);
6793 
6794 	if (control->final) {
6795 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6796 			l2cap_retransmit_all(chan, control);
6797 	} else {
6798 		l2cap_retransmit_all(chan, control);
6799 		l2cap_ertm_send(chan);
6800 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6801 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6802 	}
6803 }
6804 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6805 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6806 {
6807 	BT_DBG("chan %p, txseq %d", chan, txseq);
6808 
6809 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6810 	       chan->expected_tx_seq);
6811 
6812 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6813 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6814 		    chan->tx_win) {
6815 			/* See notes below regarding "double poll" and
6816 			 * invalid packets.
6817 			 */
6818 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6819 				BT_DBG("Invalid/Ignore - after SREJ");
6820 				return L2CAP_TXSEQ_INVALID_IGNORE;
6821 			} else {
6822 				BT_DBG("Invalid - in window after SREJ sent");
6823 				return L2CAP_TXSEQ_INVALID;
6824 			}
6825 		}
6826 
6827 		if (chan->srej_list.head == txseq) {
6828 			BT_DBG("Expected SREJ");
6829 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6830 		}
6831 
6832 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6833 			BT_DBG("Duplicate SREJ - txseq already stored");
6834 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6835 		}
6836 
6837 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6838 			BT_DBG("Unexpected SREJ - not requested");
6839 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6840 		}
6841 	}
6842 
6843 	if (chan->expected_tx_seq == txseq) {
6844 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6845 		    chan->tx_win) {
6846 			BT_DBG("Invalid - txseq outside tx window");
6847 			return L2CAP_TXSEQ_INVALID;
6848 		} else {
6849 			BT_DBG("Expected");
6850 			return L2CAP_TXSEQ_EXPECTED;
6851 		}
6852 	}
6853 
6854 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6855 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6856 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6857 		return L2CAP_TXSEQ_DUPLICATE;
6858 	}
6859 
6860 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6861 		/* A source of invalid packets is a "double poll" condition,
6862 		 * where delays cause us to send multiple poll packets.  If
6863 		 * the remote stack receives and processes both polls,
6864 		 * sequence numbers can wrap around in such a way that a
6865 		 * resent frame has a sequence number that looks like new data
6866 		 * with a sequence gap.  This would trigger an erroneous SREJ
6867 		 * request.
6868 		 *
6869 		 * Fortunately, this is impossible with a tx window that's
6870 		 * less than half of the maximum sequence number, which allows
6871 		 * invalid frames to be safely ignored.
6872 		 *
6873 		 * With tx window sizes greater than half of the tx window
6874 		 * maximum, the frame is invalid and cannot be ignored.  This
6875 		 * causes a disconnect.
6876 		 */
6877 
6878 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6879 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6880 			return L2CAP_TXSEQ_INVALID_IGNORE;
6881 		} else {
6882 			BT_DBG("Invalid - txseq outside tx window");
6883 			return L2CAP_TXSEQ_INVALID;
6884 		}
6885 	} else {
6886 		BT_DBG("Unexpected - txseq indicates missing frames");
6887 		return L2CAP_TXSEQ_UNEXPECTED;
6888 	}
6889 }
6890 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6891 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6892 			       struct l2cap_ctrl *control,
6893 			       struct sk_buff *skb, u8 event)
6894 {
6895 	struct l2cap_ctrl local_control;
6896 	int err = 0;
6897 	bool skb_in_use = false;
6898 
6899 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6900 	       event);
6901 
6902 	switch (event) {
6903 	case L2CAP_EV_RECV_IFRAME:
6904 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6905 		case L2CAP_TXSEQ_EXPECTED:
6906 			l2cap_pass_to_tx(chan, control);
6907 
6908 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6909 				BT_DBG("Busy, discarding expected seq %d",
6910 				       control->txseq);
6911 				break;
6912 			}
6913 
6914 			chan->expected_tx_seq = __next_seq(chan,
6915 							   control->txseq);
6916 
6917 			chan->buffer_seq = chan->expected_tx_seq;
6918 			skb_in_use = true;
6919 
6920 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6921 			 * control, so make a copy in advance to use it after
6922 			 * l2cap_reassemble_sdu returns and to avoid the race
6923 			 * condition, for example:
6924 			 *
6925 			 * The current thread calls:
6926 			 *   l2cap_reassemble_sdu
6927 			 *     chan->ops->recv == l2cap_sock_recv_cb
6928 			 *       __sock_queue_rcv_skb
6929 			 * Another thread calls:
6930 			 *   bt_sock_recvmsg
6931 			 *     skb_recv_datagram
6932 			 *     skb_free_datagram
6933 			 * Then the current thread tries to access control, but
6934 			 * it was freed by skb_free_datagram.
6935 			 */
6936 			local_control = *control;
6937 			err = l2cap_reassemble_sdu(chan, skb, control);
6938 			if (err)
6939 				break;
6940 
6941 			if (local_control.final) {
6942 				if (!test_and_clear_bit(CONN_REJ_ACT,
6943 							&chan->conn_state)) {
6944 					local_control.final = 0;
6945 					l2cap_retransmit_all(chan, &local_control);
6946 					l2cap_ertm_send(chan);
6947 				}
6948 			}
6949 
6950 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6951 				l2cap_send_ack(chan);
6952 			break;
6953 		case L2CAP_TXSEQ_UNEXPECTED:
6954 			l2cap_pass_to_tx(chan, control);
6955 
6956 			/* Can't issue SREJ frames in the local busy state.
6957 			 * Drop this frame, it will be seen as missing
6958 			 * when local busy is exited.
6959 			 */
6960 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6961 				BT_DBG("Busy, discarding unexpected seq %d",
6962 				       control->txseq);
6963 				break;
6964 			}
6965 
6966 			/* There was a gap in the sequence, so an SREJ
6967 			 * must be sent for each missing frame.  The
6968 			 * current frame is stored for later use.
6969 			 */
6970 			skb_queue_tail(&chan->srej_q, skb);
6971 			skb_in_use = true;
6972 			BT_DBG("Queued %p (queue len %d)", skb,
6973 			       skb_queue_len(&chan->srej_q));
6974 
6975 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6976 			l2cap_seq_list_clear(&chan->srej_list);
6977 			l2cap_send_srej(chan, control->txseq);
6978 
6979 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6980 			break;
6981 		case L2CAP_TXSEQ_DUPLICATE:
6982 			l2cap_pass_to_tx(chan, control);
6983 			break;
6984 		case L2CAP_TXSEQ_INVALID_IGNORE:
6985 			break;
6986 		case L2CAP_TXSEQ_INVALID:
6987 		default:
6988 			l2cap_send_disconn_req(chan, ECONNRESET);
6989 			break;
6990 		}
6991 		break;
6992 	case L2CAP_EV_RECV_RR:
6993 		l2cap_pass_to_tx(chan, control);
6994 		if (control->final) {
6995 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6996 
6997 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6998 			    !__chan_is_moving(chan)) {
6999 				control->final = 0;
7000 				l2cap_retransmit_all(chan, control);
7001 			}
7002 
7003 			l2cap_ertm_send(chan);
7004 		} else if (control->poll) {
7005 			l2cap_send_i_or_rr_or_rnr(chan);
7006 		} else {
7007 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7008 					       &chan->conn_state) &&
7009 			    chan->unacked_frames)
7010 				__set_retrans_timer(chan);
7011 
7012 			l2cap_ertm_send(chan);
7013 		}
7014 		break;
7015 	case L2CAP_EV_RECV_RNR:
7016 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7017 		l2cap_pass_to_tx(chan, control);
7018 		if (control && control->poll) {
7019 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7020 			l2cap_send_rr_or_rnr(chan, 0);
7021 		}
7022 		__clear_retrans_timer(chan);
7023 		l2cap_seq_list_clear(&chan->retrans_list);
7024 		break;
7025 	case L2CAP_EV_RECV_REJ:
7026 		l2cap_handle_rej(chan, control);
7027 		break;
7028 	case L2CAP_EV_RECV_SREJ:
7029 		l2cap_handle_srej(chan, control);
7030 		break;
7031 	default:
7032 		break;
7033 	}
7034 
7035 	if (skb && !skb_in_use) {
7036 		BT_DBG("Freeing %p", skb);
7037 		kfree_skb(skb);
7038 	}
7039 
7040 	return err;
7041 }
7042 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7043 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7044 				    struct l2cap_ctrl *control,
7045 				    struct sk_buff *skb, u8 event)
7046 {
7047 	int err = 0;
7048 	u16 txseq = control->txseq;
7049 	bool skb_in_use = false;
7050 
7051 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7052 	       event);
7053 
7054 	switch (event) {
7055 	case L2CAP_EV_RECV_IFRAME:
7056 		switch (l2cap_classify_txseq(chan, txseq)) {
7057 		case L2CAP_TXSEQ_EXPECTED:
7058 			/* Keep frame for reassembly later */
7059 			l2cap_pass_to_tx(chan, control);
7060 			skb_queue_tail(&chan->srej_q, skb);
7061 			skb_in_use = true;
7062 			BT_DBG("Queued %p (queue len %d)", skb,
7063 			       skb_queue_len(&chan->srej_q));
7064 
7065 			chan->expected_tx_seq = __next_seq(chan, txseq);
7066 			break;
7067 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7068 			l2cap_seq_list_pop(&chan->srej_list);
7069 
7070 			l2cap_pass_to_tx(chan, control);
7071 			skb_queue_tail(&chan->srej_q, skb);
7072 			skb_in_use = true;
7073 			BT_DBG("Queued %p (queue len %d)", skb,
7074 			       skb_queue_len(&chan->srej_q));
7075 
7076 			err = l2cap_rx_queued_iframes(chan);
7077 			if (err)
7078 				break;
7079 
7080 			break;
7081 		case L2CAP_TXSEQ_UNEXPECTED:
7082 			/* Got a frame that can't be reassembled yet.
7083 			 * Save it for later, and send SREJs to cover
7084 			 * the missing frames.
7085 			 */
7086 			skb_queue_tail(&chan->srej_q, skb);
7087 			skb_in_use = true;
7088 			BT_DBG("Queued %p (queue len %d)", skb,
7089 			       skb_queue_len(&chan->srej_q));
7090 
7091 			l2cap_pass_to_tx(chan, control);
7092 			l2cap_send_srej(chan, control->txseq);
7093 			break;
7094 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7095 			/* This frame was requested with an SREJ, but
7096 			 * some expected retransmitted frames are
7097 			 * missing.  Request retransmission of missing
7098 			 * SREJ'd frames.
7099 			 */
7100 			skb_queue_tail(&chan->srej_q, skb);
7101 			skb_in_use = true;
7102 			BT_DBG("Queued %p (queue len %d)", skb,
7103 			       skb_queue_len(&chan->srej_q));
7104 
7105 			l2cap_pass_to_tx(chan, control);
7106 			l2cap_send_srej_list(chan, control->txseq);
7107 			break;
7108 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7109 			/* We've already queued this frame.  Drop this copy. */
7110 			l2cap_pass_to_tx(chan, control);
7111 			break;
7112 		case L2CAP_TXSEQ_DUPLICATE:
7113 			/* Expecting a later sequence number, so this frame
7114 			 * was already received.  Ignore it completely.
7115 			 */
7116 			break;
7117 		case L2CAP_TXSEQ_INVALID_IGNORE:
7118 			break;
7119 		case L2CAP_TXSEQ_INVALID:
7120 		default:
7121 			l2cap_send_disconn_req(chan, ECONNRESET);
7122 			break;
7123 		}
7124 		break;
7125 	case L2CAP_EV_RECV_RR:
7126 		l2cap_pass_to_tx(chan, control);
7127 		if (control->final) {
7128 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7129 
7130 			if (!test_and_clear_bit(CONN_REJ_ACT,
7131 						&chan->conn_state)) {
7132 				control->final = 0;
7133 				l2cap_retransmit_all(chan, control);
7134 			}
7135 
7136 			l2cap_ertm_send(chan);
7137 		} else if (control->poll) {
7138 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7139 					       &chan->conn_state) &&
7140 			    chan->unacked_frames) {
7141 				__set_retrans_timer(chan);
7142 			}
7143 
7144 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7145 			l2cap_send_srej_tail(chan);
7146 		} else {
7147 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7148 					       &chan->conn_state) &&
7149 			    chan->unacked_frames)
7150 				__set_retrans_timer(chan);
7151 
7152 			l2cap_send_ack(chan);
7153 		}
7154 		break;
7155 	case L2CAP_EV_RECV_RNR:
7156 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7157 		l2cap_pass_to_tx(chan, control);
7158 		if (control->poll) {
7159 			l2cap_send_srej_tail(chan);
7160 		} else {
7161 			struct l2cap_ctrl rr_control;
7162 			memset(&rr_control, 0, sizeof(rr_control));
7163 			rr_control.sframe = 1;
7164 			rr_control.super = L2CAP_SUPER_RR;
7165 			rr_control.reqseq = chan->buffer_seq;
7166 			l2cap_send_sframe(chan, &rr_control);
7167 		}
7168 
7169 		break;
7170 	case L2CAP_EV_RECV_REJ:
7171 		l2cap_handle_rej(chan, control);
7172 		break;
7173 	case L2CAP_EV_RECV_SREJ:
7174 		l2cap_handle_srej(chan, control);
7175 		break;
7176 	}
7177 
7178 	if (skb && !skb_in_use) {
7179 		BT_DBG("Freeing %p", skb);
7180 		kfree_skb(skb);
7181 	}
7182 
7183 	return err;
7184 }
7185 
l2cap_finish_move(struct l2cap_chan * chan)7186 static int l2cap_finish_move(struct l2cap_chan *chan)
7187 {
7188 	BT_DBG("chan %p", chan);
7189 
7190 	chan->rx_state = L2CAP_RX_STATE_RECV;
7191 
7192 	if (chan->hs_hcon)
7193 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7194 	else
7195 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7196 
7197 	return l2cap_resegment(chan);
7198 }
7199 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7200 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7201 				 struct l2cap_ctrl *control,
7202 				 struct sk_buff *skb, u8 event)
7203 {
7204 	int err;
7205 
7206 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7207 	       event);
7208 
7209 	if (!control->poll)
7210 		return -EPROTO;
7211 
7212 	l2cap_process_reqseq(chan, control->reqseq);
7213 
7214 	if (!skb_queue_empty(&chan->tx_q))
7215 		chan->tx_send_head = skb_peek(&chan->tx_q);
7216 	else
7217 		chan->tx_send_head = NULL;
7218 
7219 	/* Rewind next_tx_seq to the point expected
7220 	 * by the receiver.
7221 	 */
7222 	chan->next_tx_seq = control->reqseq;
7223 	chan->unacked_frames = 0;
7224 
7225 	err = l2cap_finish_move(chan);
7226 	if (err)
7227 		return err;
7228 
7229 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7230 	l2cap_send_i_or_rr_or_rnr(chan);
7231 
7232 	if (event == L2CAP_EV_RECV_IFRAME)
7233 		return -EPROTO;
7234 
7235 	return l2cap_rx_state_recv(chan, control, NULL, event);
7236 }
7237 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7238 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7239 				 struct l2cap_ctrl *control,
7240 				 struct sk_buff *skb, u8 event)
7241 {
7242 	int err;
7243 
7244 	if (!control->final)
7245 		return -EPROTO;
7246 
7247 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7248 
7249 	chan->rx_state = L2CAP_RX_STATE_RECV;
7250 	l2cap_process_reqseq(chan, control->reqseq);
7251 
7252 	if (!skb_queue_empty(&chan->tx_q))
7253 		chan->tx_send_head = skb_peek(&chan->tx_q);
7254 	else
7255 		chan->tx_send_head = NULL;
7256 
7257 	/* Rewind next_tx_seq to the point expected
7258 	 * by the receiver.
7259 	 */
7260 	chan->next_tx_seq = control->reqseq;
7261 	chan->unacked_frames = 0;
7262 
7263 	if (chan->hs_hcon)
7264 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7265 	else
7266 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7267 
7268 	err = l2cap_resegment(chan);
7269 
7270 	if (!err)
7271 		err = l2cap_rx_state_recv(chan, control, skb, event);
7272 
7273 	return err;
7274 }
7275 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7276 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7277 {
7278 	/* Make sure reqseq is for a packet that has been sent but not acked */
7279 	u16 unacked;
7280 
7281 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7282 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7283 }
7284 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7285 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7286 		    struct sk_buff *skb, u8 event)
7287 {
7288 	int err = 0;
7289 
7290 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7291 	       control, skb, event, chan->rx_state);
7292 
7293 	if (__valid_reqseq(chan, control->reqseq)) {
7294 		switch (chan->rx_state) {
7295 		case L2CAP_RX_STATE_RECV:
7296 			err = l2cap_rx_state_recv(chan, control, skb, event);
7297 			break;
7298 		case L2CAP_RX_STATE_SREJ_SENT:
7299 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7300 						       event);
7301 			break;
7302 		case L2CAP_RX_STATE_WAIT_P:
7303 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7304 			break;
7305 		case L2CAP_RX_STATE_WAIT_F:
7306 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7307 			break;
7308 		default:
7309 			/* shut it down */
7310 			break;
7311 		}
7312 	} else {
7313 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7314 		       control->reqseq, chan->next_tx_seq,
7315 		       chan->expected_ack_seq);
7316 		l2cap_send_disconn_req(chan, ECONNRESET);
7317 	}
7318 
7319 	return err;
7320 }
7321 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7322 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7323 			   struct sk_buff *skb)
7324 {
7325 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7326 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7327 	 * returns and to avoid the race condition, for example:
7328 	 *
7329 	 * The current thread calls:
7330 	 *   l2cap_reassemble_sdu
7331 	 *     chan->ops->recv == l2cap_sock_recv_cb
7332 	 *       __sock_queue_rcv_skb
7333 	 * Another thread calls:
7334 	 *   bt_sock_recvmsg
7335 	 *     skb_recv_datagram
7336 	 *     skb_free_datagram
7337 	 * Then the current thread tries to access control, but it was freed by
7338 	 * skb_free_datagram.
7339 	 */
7340 	u16 txseq = control->txseq;
7341 
7342 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7343 	       chan->rx_state);
7344 
7345 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7346 		l2cap_pass_to_tx(chan, control);
7347 
7348 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7349 		       __next_seq(chan, chan->buffer_seq));
7350 
7351 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7352 
7353 		l2cap_reassemble_sdu(chan, skb, control);
7354 	} else {
7355 		if (chan->sdu) {
7356 			kfree_skb(chan->sdu);
7357 			chan->sdu = NULL;
7358 		}
7359 		chan->sdu_last_frag = NULL;
7360 		chan->sdu_len = 0;
7361 
7362 		if (skb) {
7363 			BT_DBG("Freeing %p", skb);
7364 			kfree_skb(skb);
7365 		}
7366 	}
7367 
7368 	chan->last_acked_seq = txseq;
7369 	chan->expected_tx_seq = __next_seq(chan, txseq);
7370 
7371 	return 0;
7372 }
7373 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7374 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7375 {
7376 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7377 	u16 len;
7378 	u8 event;
7379 
7380 	__unpack_control(chan, skb);
7381 
7382 	len = skb->len;
7383 
7384 	/*
7385 	 * We can just drop the corrupted I-frame here.
7386 	 * Receiver will miss it and start proper recovery
7387 	 * procedures and ask for retransmission.
7388 	 */
7389 	if (l2cap_check_fcs(chan, skb))
7390 		goto drop;
7391 
7392 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7393 		len -= L2CAP_SDULEN_SIZE;
7394 
7395 	if (chan->fcs == L2CAP_FCS_CRC16)
7396 		len -= L2CAP_FCS_SIZE;
7397 
7398 	if (len > chan->mps) {
7399 		l2cap_send_disconn_req(chan, ECONNRESET);
7400 		goto drop;
7401 	}
7402 
7403 	if (chan->ops->filter) {
7404 		if (chan->ops->filter(chan, skb))
7405 			goto drop;
7406 	}
7407 
7408 	if (!control->sframe) {
7409 		int err;
7410 
7411 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7412 		       control->sar, control->reqseq, control->final,
7413 		       control->txseq);
7414 
7415 		/* Validate F-bit - F=0 always valid, F=1 only
7416 		 * valid in TX WAIT_F
7417 		 */
7418 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7419 			goto drop;
7420 
7421 		if (chan->mode != L2CAP_MODE_STREAMING) {
7422 			event = L2CAP_EV_RECV_IFRAME;
7423 			err = l2cap_rx(chan, control, skb, event);
7424 		} else {
7425 			err = l2cap_stream_rx(chan, control, skb);
7426 		}
7427 
7428 		if (err)
7429 			l2cap_send_disconn_req(chan, ECONNRESET);
7430 	} else {
7431 		const u8 rx_func_to_event[4] = {
7432 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7433 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7434 		};
7435 
7436 		/* Only I-frames are expected in streaming mode */
7437 		if (chan->mode == L2CAP_MODE_STREAMING)
7438 			goto drop;
7439 
7440 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7441 		       control->reqseq, control->final, control->poll,
7442 		       control->super);
7443 
7444 		if (len != 0) {
7445 			BT_ERR("Trailing bytes: %d in sframe", len);
7446 			l2cap_send_disconn_req(chan, ECONNRESET);
7447 			goto drop;
7448 		}
7449 
7450 		/* Validate F and P bits */
7451 		if (control->final && (control->poll ||
7452 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7453 			goto drop;
7454 
7455 		event = rx_func_to_event[control->super];
7456 		if (l2cap_rx(chan, control, skb, event))
7457 			l2cap_send_disconn_req(chan, ECONNRESET);
7458 	}
7459 
7460 	return 0;
7461 
7462 drop:
7463 	kfree_skb(skb);
7464 	return 0;
7465 }
7466 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7467 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7468 {
7469 	struct l2cap_conn *conn = chan->conn;
7470 	struct l2cap_le_credits pkt;
7471 	u16 return_credits;
7472 
7473 	return_credits = (chan->imtu / chan->mps) + 1;
7474 
7475 	if (chan->rx_credits >= return_credits)
7476 		return;
7477 
7478 	return_credits -= chan->rx_credits;
7479 
7480 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7481 
7482 	chan->rx_credits += return_credits;
7483 
7484 	pkt.cid     = cpu_to_le16(chan->scid);
7485 	pkt.credits = cpu_to_le16(return_credits);
7486 
7487 	chan->ident = l2cap_get_ident(conn);
7488 
7489 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7490 }
7491 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7492 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7493 {
7494 	int err;
7495 
7496 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7497 
7498 	/* Wait recv to confirm reception before updating the credits */
7499 	err = chan->ops->recv(chan, skb);
7500 
7501 	/* Update credits whenever an SDU is received */
7502 	l2cap_chan_le_send_credits(chan);
7503 
7504 	return err;
7505 }
7506 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7507 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7508 {
7509 	int err;
7510 
7511 	if (!chan->rx_credits) {
7512 		BT_ERR("No credits to receive LE L2CAP data");
7513 		l2cap_send_disconn_req(chan, ECONNRESET);
7514 		return -ENOBUFS;
7515 	}
7516 
7517 	if (chan->imtu < skb->len) {
7518 		BT_ERR("Too big LE L2CAP PDU");
7519 		return -ENOBUFS;
7520 	}
7521 
7522 	chan->rx_credits--;
7523 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7524 
7525 	/* Update if remote had run out of credits, this should only happens
7526 	 * if the remote is not using the entire MPS.
7527 	 */
7528 	if (!chan->rx_credits)
7529 		l2cap_chan_le_send_credits(chan);
7530 
7531 	err = 0;
7532 
7533 	if (!chan->sdu) {
7534 		u16 sdu_len;
7535 
7536 		sdu_len = get_unaligned_le16(skb->data);
7537 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7538 
7539 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7540 		       sdu_len, skb->len, chan->imtu);
7541 
7542 		if (sdu_len > chan->imtu) {
7543 			BT_ERR("Too big LE L2CAP SDU length received");
7544 			err = -EMSGSIZE;
7545 			goto failed;
7546 		}
7547 
7548 		if (skb->len > sdu_len) {
7549 			BT_ERR("Too much LE L2CAP data received");
7550 			err = -EINVAL;
7551 			goto failed;
7552 		}
7553 
7554 		if (skb->len == sdu_len)
7555 			return l2cap_ecred_recv(chan, skb);
7556 
7557 		chan->sdu = skb;
7558 		chan->sdu_len = sdu_len;
7559 		chan->sdu_last_frag = skb;
7560 
7561 		/* Detect if remote is not able to use the selected MPS */
7562 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7563 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7564 
7565 			/* Adjust the number of credits */
7566 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7567 			chan->mps = mps_len;
7568 			l2cap_chan_le_send_credits(chan);
7569 		}
7570 
7571 		return 0;
7572 	}
7573 
7574 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7575 	       chan->sdu->len, skb->len, chan->sdu_len);
7576 
7577 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7578 		BT_ERR("Too much LE L2CAP data received");
7579 		err = -EINVAL;
7580 		goto failed;
7581 	}
7582 
7583 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7584 	skb = NULL;
7585 
7586 	if (chan->sdu->len == chan->sdu_len) {
7587 		err = l2cap_ecred_recv(chan, chan->sdu);
7588 		if (!err) {
7589 			chan->sdu = NULL;
7590 			chan->sdu_last_frag = NULL;
7591 			chan->sdu_len = 0;
7592 		}
7593 	}
7594 
7595 failed:
7596 	if (err) {
7597 		kfree_skb(skb);
7598 		kfree_skb(chan->sdu);
7599 		chan->sdu = NULL;
7600 		chan->sdu_last_frag = NULL;
7601 		chan->sdu_len = 0;
7602 	}
7603 
7604 	/* We can't return an error here since we took care of the skb
7605 	 * freeing internally. An error return would cause the caller to
7606 	 * do a double-free of the skb.
7607 	 */
7608 	return 0;
7609 }
7610 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7611 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7612 			       struct sk_buff *skb)
7613 {
7614 	struct l2cap_chan *chan;
7615 
7616 	chan = l2cap_get_chan_by_scid(conn, cid);
7617 	if (!chan) {
7618 		if (cid == L2CAP_CID_A2MP) {
7619 			chan = a2mp_channel_create(conn, skb);
7620 			if (!chan) {
7621 				kfree_skb(skb);
7622 				return;
7623 			}
7624 
7625 			l2cap_chan_hold(chan);
7626 			l2cap_chan_lock(chan);
7627 		} else {
7628 			BT_DBG("unknown cid 0x%4.4x", cid);
7629 			/* Drop packet and return */
7630 			kfree_skb(skb);
7631 			return;
7632 		}
7633 	}
7634 
7635 	BT_DBG("chan %p, len %d", chan, skb->len);
7636 
7637 	/* If we receive data on a fixed channel before the info req/rsp
7638 	 * procdure is done simply assume that the channel is supported
7639 	 * and mark it as ready.
7640 	 */
7641 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7642 		l2cap_chan_ready(chan);
7643 
7644 	if (chan->state != BT_CONNECTED)
7645 		goto drop;
7646 
7647 	switch (chan->mode) {
7648 	case L2CAP_MODE_LE_FLOWCTL:
7649 	case L2CAP_MODE_EXT_FLOWCTL:
7650 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7651 			goto drop;
7652 
7653 		goto done;
7654 
7655 	case L2CAP_MODE_BASIC:
7656 		/* If socket recv buffers overflows we drop data here
7657 		 * which is *bad* because L2CAP has to be reliable.
7658 		 * But we don't have any other choice. L2CAP doesn't
7659 		 * provide flow control mechanism. */
7660 
7661 		if (chan->imtu < skb->len) {
7662 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7663 			goto drop;
7664 		}
7665 
7666 		if (!chan->ops->recv(chan, skb))
7667 			goto done;
7668 		break;
7669 
7670 	case L2CAP_MODE_ERTM:
7671 	case L2CAP_MODE_STREAMING:
7672 		l2cap_data_rcv(chan, skb);
7673 		goto done;
7674 
7675 	default:
7676 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7677 		break;
7678 	}
7679 
7680 drop:
7681 	kfree_skb(skb);
7682 
7683 done:
7684 	l2cap_chan_unlock(chan);
7685 	l2cap_chan_put(chan);
7686 }
7687 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7688 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7689 				  struct sk_buff *skb)
7690 {
7691 	struct hci_conn *hcon = conn->hcon;
7692 	struct l2cap_chan *chan;
7693 
7694 	if (hcon->type != ACL_LINK)
7695 		goto free_skb;
7696 
7697 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7698 					ACL_LINK);
7699 	if (!chan)
7700 		goto free_skb;
7701 
7702 	BT_DBG("chan %p, len %d", chan, skb->len);
7703 
7704 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7705 		goto drop;
7706 
7707 	if (chan->imtu < skb->len)
7708 		goto drop;
7709 
7710 	/* Store remote BD_ADDR and PSM for msg_name */
7711 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7712 	bt_cb(skb)->l2cap.psm = psm;
7713 
7714 	if (!chan->ops->recv(chan, skb)) {
7715 		l2cap_chan_put(chan);
7716 		return;
7717 	}
7718 
7719 drop:
7720 	l2cap_chan_put(chan);
7721 free_skb:
7722 	kfree_skb(skb);
7723 }
7724 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7725 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7726 {
7727 	struct l2cap_hdr *lh = (void *) skb->data;
7728 	struct hci_conn *hcon = conn->hcon;
7729 	u16 cid, len;
7730 	__le16 psm;
7731 
7732 	if (hcon->state != BT_CONNECTED) {
7733 		BT_DBG("queueing pending rx skb");
7734 		skb_queue_tail(&conn->pending_rx, skb);
7735 		return;
7736 	}
7737 
7738 	skb_pull(skb, L2CAP_HDR_SIZE);
7739 	cid = __le16_to_cpu(lh->cid);
7740 	len = __le16_to_cpu(lh->len);
7741 
7742 	if (len != skb->len) {
7743 		kfree_skb(skb);
7744 		return;
7745 	}
7746 
7747 	/* Since we can't actively block incoming LE connections we must
7748 	 * at least ensure that we ignore incoming data from them.
7749 	 */
7750 	if (hcon->type == LE_LINK &&
7751 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7752 				   bdaddr_dst_type(hcon))) {
7753 		kfree_skb(skb);
7754 		return;
7755 	}
7756 
7757 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7758 
7759 	switch (cid) {
7760 	case L2CAP_CID_SIGNALING:
7761 		l2cap_sig_channel(conn, skb);
7762 		break;
7763 
7764 	case L2CAP_CID_CONN_LESS:
7765 		psm = get_unaligned((__le16 *) skb->data);
7766 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7767 		l2cap_conless_channel(conn, psm, skb);
7768 		break;
7769 
7770 	case L2CAP_CID_LE_SIGNALING:
7771 		l2cap_le_sig_channel(conn, skb);
7772 		break;
7773 
7774 	default:
7775 		l2cap_data_channel(conn, cid, skb);
7776 		break;
7777 	}
7778 }
7779 
process_pending_rx(struct work_struct * work)7780 static void process_pending_rx(struct work_struct *work)
7781 {
7782 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7783 					       pending_rx_work);
7784 	struct sk_buff *skb;
7785 
7786 	BT_DBG("");
7787 
7788 	while ((skb = skb_dequeue(&conn->pending_rx)))
7789 		l2cap_recv_frame(conn, skb);
7790 }
7791 
l2cap_conn_add(struct hci_conn * hcon)7792 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7793 {
7794 	struct l2cap_conn *conn = hcon->l2cap_data;
7795 	struct hci_chan *hchan;
7796 
7797 	if (conn)
7798 		return conn;
7799 
7800 	hchan = hci_chan_create(hcon);
7801 	if (!hchan)
7802 		return NULL;
7803 
7804 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7805 	if (!conn) {
7806 		hci_chan_del(hchan);
7807 		return NULL;
7808 	}
7809 
7810 	kref_init(&conn->ref);
7811 	hcon->l2cap_data = conn;
7812 	conn->hcon = hci_conn_get(hcon);
7813 	conn->hchan = hchan;
7814 
7815 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7816 
7817 	switch (hcon->type) {
7818 	case LE_LINK:
7819 		if (hcon->hdev->le_mtu) {
7820 			conn->mtu = hcon->hdev->le_mtu;
7821 			break;
7822 		}
7823 		fallthrough;
7824 	default:
7825 		conn->mtu = hcon->hdev->acl_mtu;
7826 		break;
7827 	}
7828 
7829 	conn->feat_mask = 0;
7830 
7831 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7832 
7833 	if (hcon->type == ACL_LINK &&
7834 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7835 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7836 
7837 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7838 	    (bredr_sc_enabled(hcon->hdev) ||
7839 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7840 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7841 
7842 	mutex_init(&conn->ident_lock);
7843 	mutex_init(&conn->chan_lock);
7844 
7845 	INIT_LIST_HEAD(&conn->chan_l);
7846 	INIT_LIST_HEAD(&conn->users);
7847 
7848 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7849 
7850 	skb_queue_head_init(&conn->pending_rx);
7851 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7852 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7853 
7854 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7855 
7856 	return conn;
7857 }
7858 
is_valid_psm(u16 psm,u8 dst_type)7859 static bool is_valid_psm(u16 psm, u8 dst_type) {
7860 	if (!psm)
7861 		return false;
7862 
7863 	if (bdaddr_type_is_le(dst_type))
7864 		return (psm <= 0x00ff);
7865 
7866 	/* PSM must be odd and lsb of upper byte must be 0 */
7867 	return ((psm & 0x0101) == 0x0001);
7868 }
7869 
7870 struct l2cap_chan_data {
7871 	struct l2cap_chan *chan;
7872 	struct pid *pid;
7873 	int count;
7874 };
7875 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7876 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7877 {
7878 	struct l2cap_chan_data *d = data;
7879 	struct pid *pid;
7880 
7881 	if (chan == d->chan)
7882 		return;
7883 
7884 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7885 		return;
7886 
7887 	pid = chan->ops->get_peer_pid(chan);
7888 
7889 	/* Only count deferred channels with the same PID/PSM */
7890 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7891 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7892 		return;
7893 
7894 	d->count++;
7895 }
7896 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7897 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7898 		       bdaddr_t *dst, u8 dst_type)
7899 {
7900 	struct l2cap_conn *conn;
7901 	struct hci_conn *hcon;
7902 	struct hci_dev *hdev;
7903 	int err;
7904 
7905 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7906 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7907 
7908 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7909 	if (!hdev)
7910 		return -EHOSTUNREACH;
7911 
7912 	hci_dev_lock(hdev);
7913 
7914 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7915 	    chan->chan_type != L2CAP_CHAN_RAW) {
7916 		err = -EINVAL;
7917 		goto done;
7918 	}
7919 
7920 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7921 		err = -EINVAL;
7922 		goto done;
7923 	}
7924 
7925 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7926 		err = -EINVAL;
7927 		goto done;
7928 	}
7929 
7930 	switch (chan->mode) {
7931 	case L2CAP_MODE_BASIC:
7932 		break;
7933 	case L2CAP_MODE_LE_FLOWCTL:
7934 		break;
7935 	case L2CAP_MODE_EXT_FLOWCTL:
7936 		if (!enable_ecred) {
7937 			err = -EOPNOTSUPP;
7938 			goto done;
7939 		}
7940 		break;
7941 	case L2CAP_MODE_ERTM:
7942 	case L2CAP_MODE_STREAMING:
7943 		if (!disable_ertm)
7944 			break;
7945 		fallthrough;
7946 	default:
7947 		err = -EOPNOTSUPP;
7948 		goto done;
7949 	}
7950 
7951 	switch (chan->state) {
7952 	case BT_CONNECT:
7953 	case BT_CONNECT2:
7954 	case BT_CONFIG:
7955 		/* Already connecting */
7956 		err = 0;
7957 		goto done;
7958 
7959 	case BT_CONNECTED:
7960 		/* Already connected */
7961 		err = -EISCONN;
7962 		goto done;
7963 
7964 	case BT_OPEN:
7965 	case BT_BOUND:
7966 		/* Can connect */
7967 		break;
7968 
7969 	default:
7970 		err = -EBADFD;
7971 		goto done;
7972 	}
7973 
7974 	/* Set destination address and psm */
7975 	bacpy(&chan->dst, dst);
7976 	chan->dst_type = dst_type;
7977 
7978 	chan->psm = psm;
7979 	chan->dcid = cid;
7980 
7981 	if (bdaddr_type_is_le(dst_type)) {
7982 		/* Convert from L2CAP channel address type to HCI address type
7983 		 */
7984 		if (dst_type == BDADDR_LE_PUBLIC)
7985 			dst_type = ADDR_LE_DEV_PUBLIC;
7986 		else
7987 			dst_type = ADDR_LE_DEV_RANDOM;
7988 
7989 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7990 			hcon = hci_connect_le(hdev, dst, dst_type,
7991 					      chan->sec_level,
7992 					      HCI_LE_CONN_TIMEOUT,
7993 					      HCI_ROLE_SLAVE, NULL);
7994 		else
7995 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7996 						   chan->sec_level,
7997 						   HCI_LE_CONN_TIMEOUT,
7998 						   CONN_REASON_L2CAP_CHAN);
7999 
8000 	} else {
8001 		u8 auth_type = l2cap_get_auth_type(chan);
8002 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8003 				       CONN_REASON_L2CAP_CHAN);
8004 	}
8005 
8006 	if (IS_ERR(hcon)) {
8007 		err = PTR_ERR(hcon);
8008 		goto done;
8009 	}
8010 
8011 	conn = l2cap_conn_add(hcon);
8012 	if (!conn) {
8013 		hci_conn_drop(hcon);
8014 		err = -ENOMEM;
8015 		goto done;
8016 	}
8017 
8018 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8019 		struct l2cap_chan_data data;
8020 
8021 		data.chan = chan;
8022 		data.pid = chan->ops->get_peer_pid(chan);
8023 		data.count = 1;
8024 
8025 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8026 
8027 		/* Check if there isn't too many channels being connected */
8028 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8029 			hci_conn_drop(hcon);
8030 			err = -EPROTO;
8031 			goto done;
8032 		}
8033 	}
8034 
8035 	mutex_lock(&conn->chan_lock);
8036 	l2cap_chan_lock(chan);
8037 
8038 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8039 		hci_conn_drop(hcon);
8040 		err = -EBUSY;
8041 		goto chan_unlock;
8042 	}
8043 
8044 	/* Update source addr of the socket */
8045 	bacpy(&chan->src, &hcon->src);
8046 	chan->src_type = bdaddr_src_type(hcon);
8047 
8048 	__l2cap_chan_add(conn, chan);
8049 
8050 	/* l2cap_chan_add takes its own ref so we can drop this one */
8051 	hci_conn_drop(hcon);
8052 
8053 	l2cap_state_change(chan, BT_CONNECT);
8054 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8055 
8056 	/* Release chan->sport so that it can be reused by other
8057 	 * sockets (as it's only used for listening sockets).
8058 	 */
8059 	write_lock(&chan_list_lock);
8060 	chan->sport = 0;
8061 	write_unlock(&chan_list_lock);
8062 
8063 	if (hcon->state == BT_CONNECTED) {
8064 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8065 			__clear_chan_timer(chan);
8066 			if (l2cap_chan_check_security(chan, true))
8067 				l2cap_state_change(chan, BT_CONNECTED);
8068 		} else
8069 			l2cap_do_start(chan);
8070 	}
8071 
8072 	err = 0;
8073 
8074 chan_unlock:
8075 	l2cap_chan_unlock(chan);
8076 	mutex_unlock(&conn->chan_lock);
8077 done:
8078 	hci_dev_unlock(hdev);
8079 	hci_dev_put(hdev);
8080 	return err;
8081 }
8082 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8083 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8084 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8085 {
8086 	struct l2cap_conn *conn = chan->conn;
8087 	struct {
8088 		struct l2cap_ecred_reconf_req req;
8089 		__le16 scid;
8090 	} pdu;
8091 
8092 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8093 	pdu.req.mps = cpu_to_le16(chan->mps);
8094 	pdu.scid    = cpu_to_le16(chan->scid);
8095 
8096 	chan->ident = l2cap_get_ident(conn);
8097 
8098 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8099 		       sizeof(pdu), &pdu);
8100 }
8101 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8102 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8103 {
8104 	if (chan->imtu > mtu)
8105 		return -EINVAL;
8106 
8107 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8108 
8109 	chan->imtu = mtu;
8110 
8111 	l2cap_ecred_reconfigure(chan);
8112 
8113 	return 0;
8114 }
8115 
8116 /* ---- L2CAP interface with lower layer (HCI) ---- */
8117 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8118 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8119 {
8120 	int exact = 0, lm1 = 0, lm2 = 0;
8121 	struct l2cap_chan *c;
8122 
8123 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8124 
8125 	/* Find listening sockets and check their link_mode */
8126 	read_lock(&chan_list_lock);
8127 	list_for_each_entry(c, &chan_list, global_l) {
8128 		if (c->state != BT_LISTEN)
8129 			continue;
8130 
8131 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8132 			lm1 |= HCI_LM_ACCEPT;
8133 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8134 				lm1 |= HCI_LM_MASTER;
8135 			exact++;
8136 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8137 			lm2 |= HCI_LM_ACCEPT;
8138 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8139 				lm2 |= HCI_LM_MASTER;
8140 		}
8141 	}
8142 	read_unlock(&chan_list_lock);
8143 
8144 	return exact ? lm1 : lm2;
8145 }
8146 
8147 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8148  * from an existing channel in the list or from the beginning of the
8149  * global list (by passing NULL as first parameter).
8150  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8151 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8152 						  struct hci_conn *hcon)
8153 {
8154 	u8 src_type = bdaddr_src_type(hcon);
8155 
8156 	read_lock(&chan_list_lock);
8157 
8158 	if (c)
8159 		c = list_next_entry(c, global_l);
8160 	else
8161 		c = list_entry(chan_list.next, typeof(*c), global_l);
8162 
8163 	list_for_each_entry_from(c, &chan_list, global_l) {
8164 		if (c->chan_type != L2CAP_CHAN_FIXED)
8165 			continue;
8166 		if (c->state != BT_LISTEN)
8167 			continue;
8168 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8169 			continue;
8170 		if (src_type != c->src_type)
8171 			continue;
8172 
8173 		c = l2cap_chan_hold_unless_zero(c);
8174 		read_unlock(&chan_list_lock);
8175 		return c;
8176 	}
8177 
8178 	read_unlock(&chan_list_lock);
8179 
8180 	return NULL;
8181 }
8182 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8183 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8184 {
8185 	struct hci_dev *hdev = hcon->hdev;
8186 	struct l2cap_conn *conn;
8187 	struct l2cap_chan *pchan;
8188 	u8 dst_type;
8189 
8190 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8191 		return;
8192 
8193 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8194 
8195 	if (status) {
8196 		l2cap_conn_del(hcon, bt_to_errno(status));
8197 		return;
8198 	}
8199 
8200 	conn = l2cap_conn_add(hcon);
8201 	if (!conn)
8202 		return;
8203 
8204 	dst_type = bdaddr_dst_type(hcon);
8205 
8206 	/* If device is blocked, do not create channels for it */
8207 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8208 		return;
8209 
8210 	/* Find fixed channels and notify them of the new connection. We
8211 	 * use multiple individual lookups, continuing each time where
8212 	 * we left off, because the list lock would prevent calling the
8213 	 * potentially sleeping l2cap_chan_lock() function.
8214 	 */
8215 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8216 	while (pchan) {
8217 		struct l2cap_chan *chan, *next;
8218 
8219 		/* Client fixed channels should override server ones */
8220 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8221 			goto next;
8222 
8223 		l2cap_chan_lock(pchan);
8224 		chan = pchan->ops->new_connection(pchan);
8225 		if (chan) {
8226 			bacpy(&chan->src, &hcon->src);
8227 			bacpy(&chan->dst, &hcon->dst);
8228 			chan->src_type = bdaddr_src_type(hcon);
8229 			chan->dst_type = dst_type;
8230 
8231 			__l2cap_chan_add(conn, chan);
8232 		}
8233 
8234 		l2cap_chan_unlock(pchan);
8235 next:
8236 		next = l2cap_global_fixed_chan(pchan, hcon);
8237 		l2cap_chan_put(pchan);
8238 		pchan = next;
8239 	}
8240 
8241 	l2cap_conn_ready(conn);
8242 }
8243 
l2cap_disconn_ind(struct hci_conn * hcon)8244 int l2cap_disconn_ind(struct hci_conn *hcon)
8245 {
8246 	struct l2cap_conn *conn = hcon->l2cap_data;
8247 
8248 	BT_DBG("hcon %p", hcon);
8249 
8250 	if (!conn)
8251 		return HCI_ERROR_REMOTE_USER_TERM;
8252 	return conn->disc_reason;
8253 }
8254 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8255 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8256 {
8257 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8258 		return;
8259 
8260 	BT_DBG("hcon %p reason %d", hcon, reason);
8261 
8262 	l2cap_conn_del(hcon, bt_to_errno(reason));
8263 }
8264 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8265 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8266 {
8267 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8268 		return;
8269 
8270 	if (encrypt == 0x00) {
8271 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8272 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8273 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8274 			   chan->sec_level == BT_SECURITY_FIPS)
8275 			l2cap_chan_close(chan, ECONNREFUSED);
8276 	} else {
8277 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8278 			__clear_chan_timer(chan);
8279 	}
8280 }
8281 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8282 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8283 {
8284 	struct l2cap_conn *conn = hcon->l2cap_data;
8285 	struct l2cap_chan *chan;
8286 
8287 	if (!conn)
8288 		return;
8289 
8290 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8291 
8292 	mutex_lock(&conn->chan_lock);
8293 
8294 	list_for_each_entry(chan, &conn->chan_l, list) {
8295 		l2cap_chan_lock(chan);
8296 
8297 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8298 		       state_to_string(chan->state));
8299 
8300 		if (chan->scid == L2CAP_CID_A2MP) {
8301 			l2cap_chan_unlock(chan);
8302 			continue;
8303 		}
8304 
8305 		if (!status && encrypt)
8306 			chan->sec_level = hcon->sec_level;
8307 
8308 		if (!__l2cap_no_conn_pending(chan)) {
8309 			l2cap_chan_unlock(chan);
8310 			continue;
8311 		}
8312 
8313 		if (!status && (chan->state == BT_CONNECTED ||
8314 				chan->state == BT_CONFIG)) {
8315 			chan->ops->resume(chan);
8316 			l2cap_check_encryption(chan, encrypt);
8317 			l2cap_chan_unlock(chan);
8318 			continue;
8319 		}
8320 
8321 		if (chan->state == BT_CONNECT) {
8322 			if (!status && l2cap_check_enc_key_size(hcon))
8323 				l2cap_start_connection(chan);
8324 			else
8325 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8326 		} else if (chan->state == BT_CONNECT2 &&
8327 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8328 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8329 			struct l2cap_conn_rsp rsp;
8330 			__u16 res, stat;
8331 
8332 			if (!status && l2cap_check_enc_key_size(hcon)) {
8333 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8334 					res = L2CAP_CR_PEND;
8335 					stat = L2CAP_CS_AUTHOR_PEND;
8336 					chan->ops->defer(chan);
8337 				} else {
8338 					l2cap_state_change(chan, BT_CONFIG);
8339 					res = L2CAP_CR_SUCCESS;
8340 					stat = L2CAP_CS_NO_INFO;
8341 				}
8342 			} else {
8343 				l2cap_state_change(chan, BT_DISCONN);
8344 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8345 				res = L2CAP_CR_SEC_BLOCK;
8346 				stat = L2CAP_CS_NO_INFO;
8347 			}
8348 
8349 			rsp.scid   = cpu_to_le16(chan->dcid);
8350 			rsp.dcid   = cpu_to_le16(chan->scid);
8351 			rsp.result = cpu_to_le16(res);
8352 			rsp.status = cpu_to_le16(stat);
8353 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8354 				       sizeof(rsp), &rsp);
8355 
8356 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8357 			    res == L2CAP_CR_SUCCESS) {
8358 				char buf[128];
8359 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8360 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8361 					       L2CAP_CONF_REQ,
8362 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8363 					       buf);
8364 				chan->num_conf_req++;
8365 			}
8366 		}
8367 
8368 		l2cap_chan_unlock(chan);
8369 	}
8370 
8371 	mutex_unlock(&conn->chan_lock);
8372 }
8373 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8374 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8375 {
8376 	struct l2cap_conn *conn = hcon->l2cap_data;
8377 	struct l2cap_hdr *hdr;
8378 	int len;
8379 
8380 	/* For AMP controller do not create l2cap conn */
8381 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8382 		goto drop;
8383 
8384 	if (!conn)
8385 		conn = l2cap_conn_add(hcon);
8386 
8387 	if (!conn)
8388 		goto drop;
8389 
8390 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8391 
8392 	switch (flags) {
8393 	case ACL_START:
8394 	case ACL_START_NO_FLUSH:
8395 	case ACL_COMPLETE:
8396 		if (conn->rx_len) {
8397 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8398 			kfree_skb(conn->rx_skb);
8399 			conn->rx_skb = NULL;
8400 			conn->rx_len = 0;
8401 			l2cap_conn_unreliable(conn, ECOMM);
8402 		}
8403 
8404 		/* Start fragment always begin with Basic L2CAP header */
8405 		if (skb->len < L2CAP_HDR_SIZE) {
8406 			BT_ERR("Frame is too short (len %d)", skb->len);
8407 			l2cap_conn_unreliable(conn, ECOMM);
8408 			goto drop;
8409 		}
8410 
8411 		hdr = (struct l2cap_hdr *) skb->data;
8412 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8413 
8414 		if (len == skb->len) {
8415 			/* Complete frame received */
8416 			l2cap_recv_frame(conn, skb);
8417 			return;
8418 		}
8419 
8420 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8421 
8422 		if (skb->len > len) {
8423 			BT_ERR("Frame is too long (len %d, expected len %d)",
8424 			       skb->len, len);
8425 			l2cap_conn_unreliable(conn, ECOMM);
8426 			goto drop;
8427 		}
8428 
8429 		/* Allocate skb for the complete frame (with header) */
8430 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8431 		if (!conn->rx_skb)
8432 			goto drop;
8433 
8434 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8435 					  skb->len);
8436 		conn->rx_len = len - skb->len;
8437 		break;
8438 
8439 	case ACL_CONT:
8440 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8441 
8442 		if (!conn->rx_len) {
8443 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8444 			l2cap_conn_unreliable(conn, ECOMM);
8445 			goto drop;
8446 		}
8447 
8448 		if (skb->len > conn->rx_len) {
8449 			BT_ERR("Fragment is too long (len %d, expected %d)",
8450 			       skb->len, conn->rx_len);
8451 			kfree_skb(conn->rx_skb);
8452 			conn->rx_skb = NULL;
8453 			conn->rx_len = 0;
8454 			l2cap_conn_unreliable(conn, ECOMM);
8455 			goto drop;
8456 		}
8457 
8458 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8459 					  skb->len);
8460 		conn->rx_len -= skb->len;
8461 
8462 		if (!conn->rx_len) {
8463 			/* Complete frame received. l2cap_recv_frame
8464 			 * takes ownership of the skb so set the global
8465 			 * rx_skb pointer to NULL first.
8466 			 */
8467 			struct sk_buff *rx_skb = conn->rx_skb;
8468 			conn->rx_skb = NULL;
8469 			l2cap_recv_frame(conn, rx_skb);
8470 		}
8471 		break;
8472 	}
8473 
8474 drop:
8475 	kfree_skb(skb);
8476 }
8477 
8478 static struct hci_cb l2cap_cb = {
8479 	.name		= "L2CAP",
8480 	.connect_cfm	= l2cap_connect_cfm,
8481 	.disconn_cfm	= l2cap_disconn_cfm,
8482 	.security_cfm	= l2cap_security_cfm,
8483 };
8484 
l2cap_debugfs_show(struct seq_file * f,void * p)8485 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8486 {
8487 	struct l2cap_chan *c;
8488 
8489 	read_lock(&chan_list_lock);
8490 
8491 	list_for_each_entry(c, &chan_list, global_l) {
8492 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8493 			   &c->src, c->src_type, &c->dst, c->dst_type,
8494 			   c->state, __le16_to_cpu(c->psm),
8495 			   c->scid, c->dcid, c->imtu, c->omtu,
8496 			   c->sec_level, c->mode);
8497 	}
8498 
8499 	read_unlock(&chan_list_lock);
8500 
8501 	return 0;
8502 }
8503 
8504 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8505 
8506 static struct dentry *l2cap_debugfs;
8507 
l2cap_init(void)8508 int __init l2cap_init(void)
8509 {
8510 	int err;
8511 
8512 	err = l2cap_init_sockets();
8513 	if (err < 0)
8514 		return err;
8515 
8516 	hci_register_cb(&l2cap_cb);
8517 
8518 	if (IS_ERR_OR_NULL(bt_debugfs))
8519 		return 0;
8520 
8521 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8522 					    NULL, &l2cap_debugfs_fops);
8523 
8524 	return 0;
8525 }
8526 
l2cap_exit(void)8527 void l2cap_exit(void)
8528 {
8529 	debugfs_remove(l2cap_debugfs);
8530 	hci_unregister_cb(&l2cap_cb);
8531 	l2cap_cleanup_sockets();
8532 }
8533 
8534 module_param(disable_ertm, bool, 0644);
8535 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8536 
8537 module_param(enable_ecred, bool, 0644);
8538 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8539