• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45 
46 bool disable_ertm;
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55 
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 				       u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 			   void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 		     struct sk_buff_head *skbs, u8 event);
65 
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c)
124 		l2cap_chan_lock(c);
125 	mutex_unlock(&conn->chan_lock);
126 
127 	return c;
128 }
129 
130 /* Find channel with given DCID.
131  * Returns locked channel.
132  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 						 u16 cid)
135 {
136 	struct l2cap_chan *c;
137 
138 	mutex_lock(&conn->chan_lock);
139 	c = __l2cap_get_chan_by_dcid(conn, cid);
140 	if (c)
141 		l2cap_chan_lock(c);
142 	mutex_unlock(&conn->chan_lock);
143 
144 	return c;
145 }
146 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 						    u8 ident)
149 {
150 	struct l2cap_chan *c;
151 
152 	list_for_each_entry(c, &conn->chan_l, list) {
153 		if (c->ident == ident)
154 			return c;
155 	}
156 	return NULL;
157 }
158 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 						  u8 ident)
161 {
162 	struct l2cap_chan *c;
163 
164 	mutex_lock(&conn->chan_lock);
165 	c = __l2cap_get_chan_by_ident(conn, ident);
166 	if (c)
167 		l2cap_chan_lock(c);
168 	mutex_unlock(&conn->chan_lock);
169 
170 	return c;
171 }
172 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src)173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (c->sport == psm && !bacmp(&c->src, src))
179 			return c;
180 	}
181 	return NULL;
182 }
183 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185 {
186 	int err;
187 
188 	write_lock(&chan_list_lock);
189 
190 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 		err = -EADDRINUSE;
192 		goto done;
193 	}
194 
195 	if (psm) {
196 		chan->psm = psm;
197 		chan->sport = psm;
198 		err = 0;
199 	} else {
200 		u16 p;
201 
202 		err = -EINVAL;
203 		for (p = 0x1001; p < 0x1100; p += 2)
204 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 				chan->psm   = cpu_to_le16(p);
206 				chan->sport = cpu_to_le16(p);
207 				err = 0;
208 				break;
209 			}
210 	}
211 
212 done:
213 	write_unlock(&chan_list_lock);
214 	return err;
215 }
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
217 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)218 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
219 {
220 	write_lock(&chan_list_lock);
221 
222 	/* Override the defaults (which are for conn-oriented) */
223 	chan->omtu = L2CAP_DEFAULT_MTU;
224 	chan->chan_type = L2CAP_CHAN_FIXED;
225 
226 	chan->scid = scid;
227 
228 	write_unlock(&chan_list_lock);
229 
230 	return 0;
231 }
232 
l2cap_alloc_cid(struct l2cap_conn * conn)233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
234 {
235 	u16 cid, dyn_end;
236 
237 	if (conn->hcon->type == LE_LINK)
238 		dyn_end = L2CAP_CID_LE_DYN_END;
239 	else
240 		dyn_end = L2CAP_CID_DYN_END;
241 
242 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
243 		if (!__l2cap_get_chan_by_scid(conn, cid))
244 			return cid;
245 	}
246 
247 	return 0;
248 }
249 
l2cap_state_change(struct l2cap_chan * chan,int state)250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
251 {
252 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 	       state_to_string(state));
254 
255 	chan->state = state;
256 	chan->ops->state_change(chan, state, 0);
257 }
258 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 						int state, int err)
261 {
262 	chan->state = state;
263 	chan->ops->state_change(chan, chan->state, err);
264 }
265 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
267 {
268 	chan->ops->state_change(chan, chan->state, err);
269 }
270 
__set_retrans_timer(struct l2cap_chan * chan)271 static void __set_retrans_timer(struct l2cap_chan *chan)
272 {
273 	if (!delayed_work_pending(&chan->monitor_timer) &&
274 	    chan->retrans_timeout) {
275 		l2cap_set_timer(chan, &chan->retrans_timer,
276 				msecs_to_jiffies(chan->retrans_timeout));
277 	}
278 }
279 
__set_monitor_timer(struct l2cap_chan * chan)280 static void __set_monitor_timer(struct l2cap_chan *chan)
281 {
282 	__clear_retrans_timer(chan);
283 	if (chan->monitor_timeout) {
284 		l2cap_set_timer(chan, &chan->monitor_timer,
285 				msecs_to_jiffies(chan->monitor_timeout));
286 	}
287 }
288 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 					       u16 seq)
291 {
292 	struct sk_buff *skb;
293 
294 	skb_queue_walk(head, skb) {
295 		if (bt_cb(skb)->l2cap.txseq == seq)
296 			return skb;
297 	}
298 
299 	return NULL;
300 }
301 
302 /* ---- L2CAP sequence number lists ---- */
303 
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305  * SREJ requests that are received and for frames that are to be
306  * retransmitted. These seq_list functions implement a singly-linked
307  * list in an array, where membership in the list can also be checked
308  * in constant time. Items can also be added to the tail of the list
309  * and removed from the head in constant time, without further memory
310  * allocs or frees.
311  */
312 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
314 {
315 	size_t alloc_size, i;
316 
317 	/* Allocated size is a power of 2 to map sequence numbers
318 	 * (which may be up to 14 bits) in to a smaller array that is
319 	 * sized for the negotiated ERTM transmit windows.
320 	 */
321 	alloc_size = roundup_pow_of_two(size);
322 
323 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 	if (!seq_list->list)
325 		return -ENOMEM;
326 
327 	seq_list->mask = alloc_size - 1;
328 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 	for (i = 0; i < alloc_size; i++)
331 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
332 
333 	return 0;
334 }
335 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
337 {
338 	kfree(seq_list->list);
339 }
340 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 					   u16 seq)
343 {
344 	/* Constant-time check for list membership */
345 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
346 }
347 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
349 {
350 	u16 seq = seq_list->head;
351 	u16 mask = seq_list->mask;
352 
353 	seq_list->head = seq_list->list[seq & mask];
354 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
355 
356 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 	}
360 
361 	return seq;
362 }
363 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
l2cap_chan_timeout(struct work_struct * work)396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
407 	 * this work. No need to call l2cap_chan_hold(chan) here again.
408 	 */
409 	l2cap_chan_lock(chan);
410 
411 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
412 		reason = ECONNREFUSED;
413 	else if (chan->state == BT_CONNECT &&
414 		 chan->sec_level != BT_SECURITY_SDP)
415 		reason = ECONNREFUSED;
416 	else
417 		reason = ETIMEDOUT;
418 
419 	l2cap_chan_close(chan, reason);
420 
421 	chan->ops->close(chan);
422 
423 	l2cap_chan_unlock(chan);
424 	l2cap_chan_put(chan);
425 
426 	mutex_unlock(&conn->chan_lock);
427 }
428 
l2cap_chan_create(void)429 struct l2cap_chan *l2cap_chan_create(void)
430 {
431 	struct l2cap_chan *chan;
432 
433 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 	if (!chan)
435 		return NULL;
436 
437 	skb_queue_head_init(&chan->tx_q);
438 	skb_queue_head_init(&chan->srej_q);
439 	mutex_init(&chan->lock);
440 
441 	/* Set default lock nesting level */
442 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
443 
444 	write_lock(&chan_list_lock);
445 	list_add(&chan->global_l, &chan_list);
446 	write_unlock(&chan_list_lock);
447 
448 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
449 
450 	chan->state = BT_OPEN;
451 
452 	kref_init(&chan->kref);
453 
454 	/* This flag is cleared in l2cap_chan_ready() */
455 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
456 
457 	BT_DBG("chan %p", chan);
458 
459 	return chan;
460 }
461 EXPORT_SYMBOL_GPL(l2cap_chan_create);
462 
l2cap_chan_destroy(struct kref * kref)463 static void l2cap_chan_destroy(struct kref *kref)
464 {
465 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
466 
467 	BT_DBG("chan %p", chan);
468 
469 	write_lock(&chan_list_lock);
470 	list_del(&chan->global_l);
471 	write_unlock(&chan_list_lock);
472 
473 	kfree(chan);
474 }
475 
l2cap_chan_hold(struct l2cap_chan * c)476 void l2cap_chan_hold(struct l2cap_chan *c)
477 {
478 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 
480 	kref_get(&c->kref);
481 }
482 
l2cap_chan_put(struct l2cap_chan * c)483 void l2cap_chan_put(struct l2cap_chan *c)
484 {
485 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
486 
487 	kref_put(&c->kref, l2cap_chan_destroy);
488 }
489 EXPORT_SYMBOL_GPL(l2cap_chan_put);
490 
l2cap_chan_set_defaults(struct l2cap_chan * chan)491 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
492 {
493 	chan->fcs  = L2CAP_FCS_CRC16;
494 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
495 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
496 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
497 	chan->remote_max_tx = chan->max_tx;
498 	chan->remote_tx_win = chan->tx_win;
499 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
500 	chan->sec_level = BT_SECURITY_LOW;
501 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
502 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
503 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
504 
505 	chan->conf_state = 0;
506 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
507 
508 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
509 }
510 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
511 
l2cap_le_flowctl_init(struct l2cap_chan * chan)512 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
513 {
514 	chan->sdu = NULL;
515 	chan->sdu_last_frag = NULL;
516 	chan->sdu_len = 0;
517 	chan->tx_credits = 0;
518 	chan->rx_credits = le_max_credits;
519 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
520 
521 	skb_queue_head_init(&chan->tx_q);
522 }
523 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)524 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
525 {
526 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
527 	       __le16_to_cpu(chan->psm), chan->dcid);
528 
529 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
530 
531 	chan->conn = conn;
532 
533 	switch (chan->chan_type) {
534 	case L2CAP_CHAN_CONN_ORIENTED:
535 		/* Alloc CID for connection-oriented socket */
536 		chan->scid = l2cap_alloc_cid(conn);
537 		if (conn->hcon->type == ACL_LINK)
538 			chan->omtu = L2CAP_DEFAULT_MTU;
539 		break;
540 
541 	case L2CAP_CHAN_CONN_LESS:
542 		/* Connectionless socket */
543 		chan->scid = L2CAP_CID_CONN_LESS;
544 		chan->dcid = L2CAP_CID_CONN_LESS;
545 		chan->omtu = L2CAP_DEFAULT_MTU;
546 		break;
547 
548 	case L2CAP_CHAN_FIXED:
549 		/* Caller will set CID and CID specific MTU values */
550 		break;
551 
552 	default:
553 		/* Raw socket can send/recv signalling messages only */
554 		chan->scid = L2CAP_CID_SIGNALING;
555 		chan->dcid = L2CAP_CID_SIGNALING;
556 		chan->omtu = L2CAP_DEFAULT_MTU;
557 	}
558 
559 	chan->local_id		= L2CAP_BESTEFFORT_ID;
560 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
561 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
562 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
563 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
564 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
565 
566 	l2cap_chan_hold(chan);
567 
568 	/* Only keep a reference for fixed channels if they requested it */
569 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
570 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
571 		hci_conn_hold(conn->hcon);
572 
573 	list_add(&chan->list, &conn->chan_l);
574 }
575 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)576 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
577 {
578 	mutex_lock(&conn->chan_lock);
579 	__l2cap_chan_add(conn, chan);
580 	mutex_unlock(&conn->chan_lock);
581 }
582 
l2cap_chan_del(struct l2cap_chan * chan,int err)583 void l2cap_chan_del(struct l2cap_chan *chan, int err)
584 {
585 	struct l2cap_conn *conn = chan->conn;
586 
587 	__clear_chan_timer(chan);
588 
589 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
590 	       state_to_string(chan->state));
591 
592 	chan->ops->teardown(chan, err);
593 
594 	if (conn) {
595 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
596 		/* Delete from channel list */
597 		list_del(&chan->list);
598 
599 		l2cap_chan_put(chan);
600 
601 		chan->conn = NULL;
602 
603 		/* Reference was only held for non-fixed channels or
604 		 * fixed channels that explicitly requested it using the
605 		 * FLAG_HOLD_HCI_CONN flag.
606 		 */
607 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
608 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
609 			hci_conn_drop(conn->hcon);
610 
611 		if (mgr && mgr->bredr_chan == chan)
612 			mgr->bredr_chan = NULL;
613 	}
614 
615 	if (chan->hs_hchan) {
616 		struct hci_chan *hs_hchan = chan->hs_hchan;
617 
618 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
619 		amp_disconnect_logical_link(hs_hchan);
620 	}
621 
622 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
623 		return;
624 
625 	switch(chan->mode) {
626 	case L2CAP_MODE_BASIC:
627 		break;
628 
629 	case L2CAP_MODE_LE_FLOWCTL:
630 		skb_queue_purge(&chan->tx_q);
631 		break;
632 
633 	case L2CAP_MODE_ERTM:
634 		__clear_retrans_timer(chan);
635 		__clear_monitor_timer(chan);
636 		__clear_ack_timer(chan);
637 
638 		skb_queue_purge(&chan->srej_q);
639 
640 		l2cap_seq_list_free(&chan->srej_list);
641 		l2cap_seq_list_free(&chan->retrans_list);
642 
643 		/* fall through */
644 
645 	case L2CAP_MODE_STREAMING:
646 		skb_queue_purge(&chan->tx_q);
647 		break;
648 	}
649 
650 	return;
651 }
652 EXPORT_SYMBOL_GPL(l2cap_chan_del);
653 
l2cap_conn_update_id_addr(struct work_struct * work)654 static void l2cap_conn_update_id_addr(struct work_struct *work)
655 {
656 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
657 					       id_addr_update_work);
658 	struct hci_conn *hcon = conn->hcon;
659 	struct l2cap_chan *chan;
660 
661 	mutex_lock(&conn->chan_lock);
662 
663 	list_for_each_entry(chan, &conn->chan_l, list) {
664 		l2cap_chan_lock(chan);
665 		bacpy(&chan->dst, &hcon->dst);
666 		chan->dst_type = bdaddr_dst_type(hcon);
667 		l2cap_chan_unlock(chan);
668 	}
669 
670 	mutex_unlock(&conn->chan_lock);
671 }
672 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)673 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
674 {
675 	struct l2cap_conn *conn = chan->conn;
676 	struct l2cap_le_conn_rsp rsp;
677 	u16 result;
678 
679 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
680 		result = L2CAP_CR_AUTHORIZATION;
681 	else
682 		result = L2CAP_CR_BAD_PSM;
683 
684 	l2cap_state_change(chan, BT_DISCONN);
685 
686 	rsp.dcid    = cpu_to_le16(chan->scid);
687 	rsp.mtu     = cpu_to_le16(chan->imtu);
688 	rsp.mps     = cpu_to_le16(chan->mps);
689 	rsp.credits = cpu_to_le16(chan->rx_credits);
690 	rsp.result  = cpu_to_le16(result);
691 
692 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
693 		       &rsp);
694 }
695 
l2cap_chan_connect_reject(struct l2cap_chan * chan)696 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
697 {
698 	struct l2cap_conn *conn = chan->conn;
699 	struct l2cap_conn_rsp rsp;
700 	u16 result;
701 
702 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
703 		result = L2CAP_CR_SEC_BLOCK;
704 	else
705 		result = L2CAP_CR_BAD_PSM;
706 
707 	l2cap_state_change(chan, BT_DISCONN);
708 
709 	rsp.scid   = cpu_to_le16(chan->dcid);
710 	rsp.dcid   = cpu_to_le16(chan->scid);
711 	rsp.result = cpu_to_le16(result);
712 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
713 
714 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
715 }
716 
l2cap_chan_close(struct l2cap_chan * chan,int reason)717 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
718 {
719 	struct l2cap_conn *conn = chan->conn;
720 
721 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
722 
723 	switch (chan->state) {
724 	case BT_LISTEN:
725 		chan->ops->teardown(chan, 0);
726 		break;
727 
728 	case BT_CONNECTED:
729 	case BT_CONFIG:
730 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
731 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
732 			l2cap_send_disconn_req(chan, reason);
733 		} else
734 			l2cap_chan_del(chan, reason);
735 		break;
736 
737 	case BT_CONNECT2:
738 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
739 			if (conn->hcon->type == ACL_LINK)
740 				l2cap_chan_connect_reject(chan);
741 			else if (conn->hcon->type == LE_LINK)
742 				l2cap_chan_le_connect_reject(chan);
743 		}
744 
745 		l2cap_chan_del(chan, reason);
746 		break;
747 
748 	case BT_CONNECT:
749 	case BT_DISCONN:
750 		l2cap_chan_del(chan, reason);
751 		break;
752 
753 	default:
754 		chan->ops->teardown(chan, 0);
755 		break;
756 	}
757 }
758 EXPORT_SYMBOL(l2cap_chan_close);
759 
l2cap_get_auth_type(struct l2cap_chan * chan)760 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
761 {
762 	switch (chan->chan_type) {
763 	case L2CAP_CHAN_RAW:
764 		switch (chan->sec_level) {
765 		case BT_SECURITY_HIGH:
766 		case BT_SECURITY_FIPS:
767 			return HCI_AT_DEDICATED_BONDING_MITM;
768 		case BT_SECURITY_MEDIUM:
769 			return HCI_AT_DEDICATED_BONDING;
770 		default:
771 			return HCI_AT_NO_BONDING;
772 		}
773 		break;
774 	case L2CAP_CHAN_CONN_LESS:
775 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
776 			if (chan->sec_level == BT_SECURITY_LOW)
777 				chan->sec_level = BT_SECURITY_SDP;
778 		}
779 		if (chan->sec_level == BT_SECURITY_HIGH ||
780 		    chan->sec_level == BT_SECURITY_FIPS)
781 			return HCI_AT_NO_BONDING_MITM;
782 		else
783 			return HCI_AT_NO_BONDING;
784 		break;
785 	case L2CAP_CHAN_CONN_ORIENTED:
786 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
787 			if (chan->sec_level == BT_SECURITY_LOW)
788 				chan->sec_level = BT_SECURITY_SDP;
789 
790 			if (chan->sec_level == BT_SECURITY_HIGH ||
791 			    chan->sec_level == BT_SECURITY_FIPS)
792 				return HCI_AT_NO_BONDING_MITM;
793 			else
794 				return HCI_AT_NO_BONDING;
795 		}
796 		/* fall through */
797 	default:
798 		switch (chan->sec_level) {
799 		case BT_SECURITY_HIGH:
800 		case BT_SECURITY_FIPS:
801 			return HCI_AT_GENERAL_BONDING_MITM;
802 		case BT_SECURITY_MEDIUM:
803 			return HCI_AT_GENERAL_BONDING;
804 		default:
805 			return HCI_AT_NO_BONDING;
806 		}
807 		break;
808 	}
809 }
810 
811 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)812 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
813 {
814 	struct l2cap_conn *conn = chan->conn;
815 	__u8 auth_type;
816 
817 	if (conn->hcon->type == LE_LINK)
818 		return smp_conn_security(conn->hcon, chan->sec_level);
819 
820 	auth_type = l2cap_get_auth_type(chan);
821 
822 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
823 				 initiator);
824 }
825 
l2cap_get_ident(struct l2cap_conn * conn)826 static u8 l2cap_get_ident(struct l2cap_conn *conn)
827 {
828 	u8 id;
829 
830 	/* Get next available identificator.
831 	 *    1 - 128 are used by kernel.
832 	 *  129 - 199 are reserved.
833 	 *  200 - 254 are used by utilities like l2ping, etc.
834 	 */
835 
836 	mutex_lock(&conn->ident_lock);
837 
838 	if (++conn->tx_ident > 128)
839 		conn->tx_ident = 1;
840 
841 	id = conn->tx_ident;
842 
843 	mutex_unlock(&conn->ident_lock);
844 
845 	return id;
846 }
847 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)848 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
849 			   void *data)
850 {
851 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
852 	u8 flags;
853 
854 	BT_DBG("code 0x%2.2x", code);
855 
856 	if (!skb)
857 		return;
858 
859 	/* Use NO_FLUSH if supported or we have an LE link (which does
860 	 * not support auto-flushing packets) */
861 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
862 	    conn->hcon->type == LE_LINK)
863 		flags = ACL_START_NO_FLUSH;
864 	else
865 		flags = ACL_START;
866 
867 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
868 	skb->priority = HCI_PRIO_MAX;
869 
870 	hci_send_acl(conn->hchan, skb, flags);
871 }
872 
__chan_is_moving(struct l2cap_chan * chan)873 static bool __chan_is_moving(struct l2cap_chan *chan)
874 {
875 	return chan->move_state != L2CAP_MOVE_STABLE &&
876 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
877 }
878 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)879 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
880 {
881 	struct hci_conn *hcon = chan->conn->hcon;
882 	u16 flags;
883 
884 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
885 	       skb->priority);
886 
887 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
888 		if (chan->hs_hchan)
889 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
890 		else
891 			kfree_skb(skb);
892 
893 		return;
894 	}
895 
896 	/* Use NO_FLUSH for LE links (where this is the only option) or
897 	 * if the BR/EDR link supports it and flushing has not been
898 	 * explicitly requested (through FLAG_FLUSHABLE).
899 	 */
900 	if (hcon->type == LE_LINK ||
901 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
902 	     lmp_no_flush_capable(hcon->hdev)))
903 		flags = ACL_START_NO_FLUSH;
904 	else
905 		flags = ACL_START;
906 
907 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
908 	hci_send_acl(chan->conn->hchan, skb, flags);
909 }
910 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)911 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
912 {
913 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
914 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
915 
916 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
917 		/* S-Frame */
918 		control->sframe = 1;
919 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
920 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
921 
922 		control->sar = 0;
923 		control->txseq = 0;
924 	} else {
925 		/* I-Frame */
926 		control->sframe = 0;
927 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
928 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
929 
930 		control->poll = 0;
931 		control->super = 0;
932 	}
933 }
934 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)935 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
936 {
937 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
938 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
939 
940 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
941 		/* S-Frame */
942 		control->sframe = 1;
943 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
944 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
945 
946 		control->sar = 0;
947 		control->txseq = 0;
948 	} else {
949 		/* I-Frame */
950 		control->sframe = 0;
951 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
952 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
953 
954 		control->poll = 0;
955 		control->super = 0;
956 	}
957 }
958 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)959 static inline void __unpack_control(struct l2cap_chan *chan,
960 				    struct sk_buff *skb)
961 {
962 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
963 		__unpack_extended_control(get_unaligned_le32(skb->data),
964 					  &bt_cb(skb)->l2cap);
965 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
966 	} else {
967 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
968 					  &bt_cb(skb)->l2cap);
969 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
970 	}
971 }
972 
__pack_extended_control(struct l2cap_ctrl * control)973 static u32 __pack_extended_control(struct l2cap_ctrl *control)
974 {
975 	u32 packed;
976 
977 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
978 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
979 
980 	if (control->sframe) {
981 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
982 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
983 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
984 	} else {
985 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
986 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
987 	}
988 
989 	return packed;
990 }
991 
__pack_enhanced_control(struct l2cap_ctrl * control)992 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
993 {
994 	u16 packed;
995 
996 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
997 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
998 
999 	if (control->sframe) {
1000 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1001 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1002 		packed |= L2CAP_CTRL_FRAME_TYPE;
1003 	} else {
1004 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1005 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1006 	}
1007 
1008 	return packed;
1009 }
1010 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1011 static inline void __pack_control(struct l2cap_chan *chan,
1012 				  struct l2cap_ctrl *control,
1013 				  struct sk_buff *skb)
1014 {
1015 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1016 		put_unaligned_le32(__pack_extended_control(control),
1017 				   skb->data + L2CAP_HDR_SIZE);
1018 	} else {
1019 		put_unaligned_le16(__pack_enhanced_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	}
1022 }
1023 
__ertm_hdr_size(struct l2cap_chan * chan)1024 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1025 {
1026 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 		return L2CAP_EXT_HDR_SIZE;
1028 	else
1029 		return L2CAP_ENH_HDR_SIZE;
1030 }
1031 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1032 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1033 					       u32 control)
1034 {
1035 	struct sk_buff *skb;
1036 	struct l2cap_hdr *lh;
1037 	int hlen = __ertm_hdr_size(chan);
1038 
1039 	if (chan->fcs == L2CAP_FCS_CRC16)
1040 		hlen += L2CAP_FCS_SIZE;
1041 
1042 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1043 
1044 	if (!skb)
1045 		return ERR_PTR(-ENOMEM);
1046 
1047 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1048 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1049 	lh->cid = cpu_to_le16(chan->dcid);
1050 
1051 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1052 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1053 	else
1054 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1055 
1056 	if (chan->fcs == L2CAP_FCS_CRC16) {
1057 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1058 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1059 	}
1060 
1061 	skb->priority = HCI_PRIO_MAX;
1062 	return skb;
1063 }
1064 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1065 static void l2cap_send_sframe(struct l2cap_chan *chan,
1066 			      struct l2cap_ctrl *control)
1067 {
1068 	struct sk_buff *skb;
1069 	u32 control_field;
1070 
1071 	BT_DBG("chan %p, control %p", chan, control);
1072 
1073 	if (!control->sframe)
1074 		return;
1075 
1076 	if (__chan_is_moving(chan))
1077 		return;
1078 
1079 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1080 	    !control->poll)
1081 		control->final = 1;
1082 
1083 	if (control->super == L2CAP_SUPER_RR)
1084 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1085 	else if (control->super == L2CAP_SUPER_RNR)
1086 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1087 
1088 	if (control->super != L2CAP_SUPER_SREJ) {
1089 		chan->last_acked_seq = control->reqseq;
1090 		__clear_ack_timer(chan);
1091 	}
1092 
1093 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1094 	       control->final, control->poll, control->super);
1095 
1096 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1097 		control_field = __pack_extended_control(control);
1098 	else
1099 		control_field = __pack_enhanced_control(control);
1100 
1101 	skb = l2cap_create_sframe_pdu(chan, control_field);
1102 	if (!IS_ERR(skb))
1103 		l2cap_do_send(chan, skb);
1104 }
1105 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1106 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1107 {
1108 	struct l2cap_ctrl control;
1109 
1110 	BT_DBG("chan %p, poll %d", chan, poll);
1111 
1112 	memset(&control, 0, sizeof(control));
1113 	control.sframe = 1;
1114 	control.poll = poll;
1115 
1116 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1117 		control.super = L2CAP_SUPER_RNR;
1118 	else
1119 		control.super = L2CAP_SUPER_RR;
1120 
1121 	control.reqseq = chan->buffer_seq;
1122 	l2cap_send_sframe(chan, &control);
1123 }
1124 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1125 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1126 {
1127 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1128 		return true;
1129 
1130 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1131 }
1132 
__amp_capable(struct l2cap_chan * chan)1133 static bool __amp_capable(struct l2cap_chan *chan)
1134 {
1135 	struct l2cap_conn *conn = chan->conn;
1136 	struct hci_dev *hdev;
1137 	bool amp_available = false;
1138 
1139 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1140 		return false;
1141 
1142 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	read_lock(&hci_dev_list_lock);
1146 	list_for_each_entry(hdev, &hci_dev_list, list) {
1147 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1148 		    test_bit(HCI_UP, &hdev->flags)) {
1149 			amp_available = true;
1150 			break;
1151 		}
1152 	}
1153 	read_unlock(&hci_dev_list_lock);
1154 
1155 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1156 		return amp_available;
1157 
1158 	return false;
1159 }
1160 
l2cap_check_efs(struct l2cap_chan * chan)1161 static bool l2cap_check_efs(struct l2cap_chan *chan)
1162 {
1163 	/* Check EFS parameters */
1164 	return true;
1165 }
1166 
l2cap_send_conn_req(struct l2cap_chan * chan)1167 void l2cap_send_conn_req(struct l2cap_chan *chan)
1168 {
1169 	struct l2cap_conn *conn = chan->conn;
1170 	struct l2cap_conn_req req;
1171 
1172 	req.scid = cpu_to_le16(chan->scid);
1173 	req.psm  = chan->psm;
1174 
1175 	chan->ident = l2cap_get_ident(conn);
1176 
1177 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1178 
1179 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1180 }
1181 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1182 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1183 {
1184 	struct l2cap_create_chan_req req;
1185 	req.scid = cpu_to_le16(chan->scid);
1186 	req.psm  = chan->psm;
1187 	req.amp_id = amp_id;
1188 
1189 	chan->ident = l2cap_get_ident(chan->conn);
1190 
1191 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1192 		       sizeof(req), &req);
1193 }
1194 
l2cap_move_setup(struct l2cap_chan * chan)1195 static void l2cap_move_setup(struct l2cap_chan *chan)
1196 {
1197 	struct sk_buff *skb;
1198 
1199 	BT_DBG("chan %p", chan);
1200 
1201 	if (chan->mode != L2CAP_MODE_ERTM)
1202 		return;
1203 
1204 	__clear_retrans_timer(chan);
1205 	__clear_monitor_timer(chan);
1206 	__clear_ack_timer(chan);
1207 
1208 	chan->retry_count = 0;
1209 	skb_queue_walk(&chan->tx_q, skb) {
1210 		if (bt_cb(skb)->l2cap.retries)
1211 			bt_cb(skb)->l2cap.retries = 1;
1212 		else
1213 			break;
1214 	}
1215 
1216 	chan->expected_tx_seq = chan->buffer_seq;
1217 
1218 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1219 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1220 	l2cap_seq_list_clear(&chan->retrans_list);
1221 	l2cap_seq_list_clear(&chan->srej_list);
1222 	skb_queue_purge(&chan->srej_q);
1223 
1224 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1225 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1226 
1227 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1228 }
1229 
l2cap_move_done(struct l2cap_chan * chan)1230 static void l2cap_move_done(struct l2cap_chan *chan)
1231 {
1232 	u8 move_role = chan->move_role;
1233 	BT_DBG("chan %p", chan);
1234 
1235 	chan->move_state = L2CAP_MOVE_STABLE;
1236 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1237 
1238 	if (chan->mode != L2CAP_MODE_ERTM)
1239 		return;
1240 
1241 	switch (move_role) {
1242 	case L2CAP_MOVE_ROLE_INITIATOR:
1243 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1244 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1245 		break;
1246 	case L2CAP_MOVE_ROLE_RESPONDER:
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1248 		break;
1249 	}
1250 }
1251 
l2cap_chan_ready(struct l2cap_chan * chan)1252 static void l2cap_chan_ready(struct l2cap_chan *chan)
1253 {
1254 	/* The channel may have already been flagged as connected in
1255 	 * case of receiving data before the L2CAP info req/rsp
1256 	 * procedure is complete.
1257 	 */
1258 	if (chan->state == BT_CONNECTED)
1259 		return;
1260 
1261 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1262 	chan->conf_state = 0;
1263 	__clear_chan_timer(chan);
1264 
1265 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1266 		chan->ops->suspend(chan);
1267 
1268 	chan->state = BT_CONNECTED;
1269 
1270 	chan->ops->ready(chan);
1271 }
1272 
l2cap_le_connect(struct l2cap_chan * chan)1273 static void l2cap_le_connect(struct l2cap_chan *chan)
1274 {
1275 	struct l2cap_conn *conn = chan->conn;
1276 	struct l2cap_le_conn_req req;
1277 
1278 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1279 		return;
1280 
1281 	req.psm     = chan->psm;
1282 	req.scid    = cpu_to_le16(chan->scid);
1283 	req.mtu     = cpu_to_le16(chan->imtu);
1284 	req.mps     = cpu_to_le16(chan->mps);
1285 	req.credits = cpu_to_le16(chan->rx_credits);
1286 
1287 	chan->ident = l2cap_get_ident(conn);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1290 		       sizeof(req), &req);
1291 }
1292 
l2cap_le_start(struct l2cap_chan * chan)1293 static void l2cap_le_start(struct l2cap_chan *chan)
1294 {
1295 	struct l2cap_conn *conn = chan->conn;
1296 
1297 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1298 		return;
1299 
1300 	if (!chan->psm) {
1301 		l2cap_chan_ready(chan);
1302 		return;
1303 	}
1304 
1305 	if (chan->state == BT_CONNECT)
1306 		l2cap_le_connect(chan);
1307 }
1308 
l2cap_start_connection(struct l2cap_chan * chan)1309 static void l2cap_start_connection(struct l2cap_chan *chan)
1310 {
1311 	if (__amp_capable(chan)) {
1312 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1313 		a2mp_discover_amp(chan);
1314 	} else if (chan->conn->hcon->type == LE_LINK) {
1315 		l2cap_le_start(chan);
1316 	} else {
1317 		l2cap_send_conn_req(chan);
1318 	}
1319 }
1320 
l2cap_request_info(struct l2cap_conn * conn)1321 static void l2cap_request_info(struct l2cap_conn *conn)
1322 {
1323 	struct l2cap_info_req req;
1324 
1325 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1326 		return;
1327 
1328 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1329 
1330 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1331 	conn->info_ident = l2cap_get_ident(conn);
1332 
1333 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1334 
1335 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1336 		       sizeof(req), &req);
1337 }
1338 
l2cap_check_enc_key_size(struct hci_conn * hcon)1339 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1340 {
1341 	/* The minimum encryption key size needs to be enforced by the
1342 	 * host stack before establishing any L2CAP connections. The
1343 	 * specification in theory allows a minimum of 1, but to align
1344 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1345 	 *
1346 	 * This check might also be called for unencrypted connections
1347 	 * that have no key size requirements. Ensure that the link is
1348 	 * actually encrypted before enforcing a key size.
1349 	 */
1350 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1351 		hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1352 }
1353 
l2cap_do_start(struct l2cap_chan * chan)1354 static void l2cap_do_start(struct l2cap_chan *chan)
1355 {
1356 	struct l2cap_conn *conn = chan->conn;
1357 
1358 	if (conn->hcon->type == LE_LINK) {
1359 		l2cap_le_start(chan);
1360 		return;
1361 	}
1362 
1363 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1364 		l2cap_request_info(conn);
1365 		return;
1366 	}
1367 
1368 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1369 		return;
1370 
1371 	if (!l2cap_chan_check_security(chan, true) ||
1372 	    !__l2cap_no_conn_pending(chan))
1373 		return;
1374 
1375 	if (l2cap_check_enc_key_size(conn->hcon))
1376 		l2cap_start_connection(chan);
1377 	else
1378 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1379 }
1380 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1381 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1382 {
1383 	u32 local_feat_mask = l2cap_feat_mask;
1384 	if (!disable_ertm)
1385 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1386 
1387 	switch (mode) {
1388 	case L2CAP_MODE_ERTM:
1389 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1390 	case L2CAP_MODE_STREAMING:
1391 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1392 	default:
1393 		return 0x00;
1394 	}
1395 }
1396 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1397 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1398 {
1399 	struct l2cap_conn *conn = chan->conn;
1400 	struct l2cap_disconn_req req;
1401 
1402 	if (!conn)
1403 		return;
1404 
1405 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1406 		__clear_retrans_timer(chan);
1407 		__clear_monitor_timer(chan);
1408 		__clear_ack_timer(chan);
1409 	}
1410 
1411 	if (chan->scid == L2CAP_CID_A2MP) {
1412 		l2cap_state_change(chan, BT_DISCONN);
1413 		return;
1414 	}
1415 
1416 	req.dcid = cpu_to_le16(chan->dcid);
1417 	req.scid = cpu_to_le16(chan->scid);
1418 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1419 		       sizeof(req), &req);
1420 
1421 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1422 }
1423 
1424 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1425 static void l2cap_conn_start(struct l2cap_conn *conn)
1426 {
1427 	struct l2cap_chan *chan, *tmp;
1428 
1429 	BT_DBG("conn %p", conn);
1430 
1431 	mutex_lock(&conn->chan_lock);
1432 
1433 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1434 		l2cap_chan_lock(chan);
1435 
1436 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1437 			l2cap_chan_ready(chan);
1438 			l2cap_chan_unlock(chan);
1439 			continue;
1440 		}
1441 
1442 		if (chan->state == BT_CONNECT) {
1443 			if (!l2cap_chan_check_security(chan, true) ||
1444 			    !__l2cap_no_conn_pending(chan)) {
1445 				l2cap_chan_unlock(chan);
1446 				continue;
1447 			}
1448 
1449 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1450 			    && test_bit(CONF_STATE2_DEVICE,
1451 					&chan->conf_state)) {
1452 				l2cap_chan_close(chan, ECONNRESET);
1453 				l2cap_chan_unlock(chan);
1454 				continue;
1455 			}
1456 
1457 			if (l2cap_check_enc_key_size(conn->hcon))
1458 				l2cap_start_connection(chan);
1459 			else
1460 				l2cap_chan_close(chan, ECONNREFUSED);
1461 
1462 		} else if (chan->state == BT_CONNECT2) {
1463 			struct l2cap_conn_rsp rsp;
1464 			char buf[128];
1465 			rsp.scid = cpu_to_le16(chan->dcid);
1466 			rsp.dcid = cpu_to_le16(chan->scid);
1467 
1468 			if (l2cap_chan_check_security(chan, false)) {
1469 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1470 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1471 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1472 					chan->ops->defer(chan);
1473 
1474 				} else {
1475 					l2cap_state_change(chan, BT_CONFIG);
1476 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1477 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1478 				}
1479 			} else {
1480 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1481 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1482 			}
1483 
1484 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1485 				       sizeof(rsp), &rsp);
1486 
1487 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1488 			    rsp.result != L2CAP_CR_SUCCESS) {
1489 				l2cap_chan_unlock(chan);
1490 				continue;
1491 			}
1492 
1493 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1494 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1495 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1496 			chan->num_conf_req++;
1497 		}
1498 
1499 		l2cap_chan_unlock(chan);
1500 	}
1501 
1502 	mutex_unlock(&conn->chan_lock);
1503 }
1504 
l2cap_le_conn_ready(struct l2cap_conn * conn)1505 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1506 {
1507 	struct hci_conn *hcon = conn->hcon;
1508 	struct hci_dev *hdev = hcon->hdev;
1509 
1510 	BT_DBG("%s conn %p", hdev->name, conn);
1511 
1512 	/* For outgoing pairing which doesn't necessarily have an
1513 	 * associated socket (e.g. mgmt_pair_device).
1514 	 */
1515 	if (hcon->out)
1516 		smp_conn_security(hcon, hcon->pending_sec_level);
1517 
1518 	/* For LE slave connections, make sure the connection interval
1519 	 * is in the range of the minium and maximum interval that has
1520 	 * been configured for this connection. If not, then trigger
1521 	 * the connection update procedure.
1522 	 */
1523 	if (hcon->role == HCI_ROLE_SLAVE &&
1524 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1525 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1526 		struct l2cap_conn_param_update_req req;
1527 
1528 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1529 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1530 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1531 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1532 
1533 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1534 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1535 	}
1536 }
1537 
l2cap_conn_ready(struct l2cap_conn * conn)1538 static void l2cap_conn_ready(struct l2cap_conn *conn)
1539 {
1540 	struct l2cap_chan *chan;
1541 	struct hci_conn *hcon = conn->hcon;
1542 
1543 	BT_DBG("conn %p", conn);
1544 
1545 	if (hcon->type == ACL_LINK)
1546 		l2cap_request_info(conn);
1547 
1548 	mutex_lock(&conn->chan_lock);
1549 
1550 	list_for_each_entry(chan, &conn->chan_l, list) {
1551 
1552 		l2cap_chan_lock(chan);
1553 
1554 		if (chan->scid == L2CAP_CID_A2MP) {
1555 			l2cap_chan_unlock(chan);
1556 			continue;
1557 		}
1558 
1559 		if (hcon->type == LE_LINK) {
1560 			l2cap_le_start(chan);
1561 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1562 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1563 				l2cap_chan_ready(chan);
1564 		} else if (chan->state == BT_CONNECT) {
1565 			l2cap_do_start(chan);
1566 		}
1567 
1568 		l2cap_chan_unlock(chan);
1569 	}
1570 
1571 	mutex_unlock(&conn->chan_lock);
1572 
1573 	if (hcon->type == LE_LINK)
1574 		l2cap_le_conn_ready(conn);
1575 
1576 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1577 }
1578 
1579 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1580 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1581 {
1582 	struct l2cap_chan *chan;
1583 
1584 	BT_DBG("conn %p", conn);
1585 
1586 	mutex_lock(&conn->chan_lock);
1587 
1588 	list_for_each_entry(chan, &conn->chan_l, list) {
1589 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1590 			l2cap_chan_set_err(chan, err);
1591 	}
1592 
1593 	mutex_unlock(&conn->chan_lock);
1594 }
1595 
l2cap_info_timeout(struct work_struct * work)1596 static void l2cap_info_timeout(struct work_struct *work)
1597 {
1598 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1599 					       info_timer.work);
1600 
1601 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1602 	conn->info_ident = 0;
1603 
1604 	l2cap_conn_start(conn);
1605 }
1606 
1607 /*
1608  * l2cap_user
1609  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1610  * callback is called during registration. The ->remove callback is called
1611  * during unregistration.
1612  * An l2cap_user object can either be explicitly unregistered or when the
1613  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1614  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1615  * External modules must own a reference to the l2cap_conn object if they intend
1616  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1617  * any time if they don't.
1618  */
1619 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1620 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1621 {
1622 	struct hci_dev *hdev = conn->hcon->hdev;
1623 	int ret;
1624 
1625 	/* We need to check whether l2cap_conn is registered. If it is not, we
1626 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1627 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1628 	 * relies on the parent hci_conn object to be locked. This itself relies
1629 	 * on the hci_dev object to be locked. So we must lock the hci device
1630 	 * here, too. */
1631 
1632 	hci_dev_lock(hdev);
1633 
1634 	if (!list_empty(&user->list)) {
1635 		ret = -EINVAL;
1636 		goto out_unlock;
1637 	}
1638 
1639 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1640 	if (!conn->hchan) {
1641 		ret = -ENODEV;
1642 		goto out_unlock;
1643 	}
1644 
1645 	ret = user->probe(conn, user);
1646 	if (ret)
1647 		goto out_unlock;
1648 
1649 	list_add(&user->list, &conn->users);
1650 	ret = 0;
1651 
1652 out_unlock:
1653 	hci_dev_unlock(hdev);
1654 	return ret;
1655 }
1656 EXPORT_SYMBOL(l2cap_register_user);
1657 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1658 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1659 {
1660 	struct hci_dev *hdev = conn->hcon->hdev;
1661 
1662 	hci_dev_lock(hdev);
1663 
1664 	if (list_empty(&user->list))
1665 		goto out_unlock;
1666 
1667 	list_del_init(&user->list);
1668 	user->remove(conn, user);
1669 
1670 out_unlock:
1671 	hci_dev_unlock(hdev);
1672 }
1673 EXPORT_SYMBOL(l2cap_unregister_user);
1674 
l2cap_unregister_all_users(struct l2cap_conn * conn)1675 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1676 {
1677 	struct l2cap_user *user;
1678 
1679 	while (!list_empty(&conn->users)) {
1680 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1681 		list_del_init(&user->list);
1682 		user->remove(conn, user);
1683 	}
1684 }
1685 
l2cap_conn_del(struct hci_conn * hcon,int err)1686 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1687 {
1688 	struct l2cap_conn *conn = hcon->l2cap_data;
1689 	struct l2cap_chan *chan, *l;
1690 
1691 	if (!conn)
1692 		return;
1693 
1694 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1695 
1696 	kfree_skb(conn->rx_skb);
1697 
1698 	skb_queue_purge(&conn->pending_rx);
1699 
1700 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1701 	 * might block if we are running on a worker from the same workqueue
1702 	 * pending_rx_work is waiting on.
1703 	 */
1704 	if (work_pending(&conn->pending_rx_work))
1705 		cancel_work_sync(&conn->pending_rx_work);
1706 
1707 	if (work_pending(&conn->id_addr_update_work))
1708 		cancel_work_sync(&conn->id_addr_update_work);
1709 
1710 	l2cap_unregister_all_users(conn);
1711 
1712 	/* Force the connection to be immediately dropped */
1713 	hcon->disc_timeout = 0;
1714 
1715 	mutex_lock(&conn->chan_lock);
1716 
1717 	/* Kill channels */
1718 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1719 		l2cap_chan_hold(chan);
1720 		l2cap_chan_lock(chan);
1721 
1722 		l2cap_chan_del(chan, err);
1723 
1724 		chan->ops->close(chan);
1725 
1726 		l2cap_chan_unlock(chan);
1727 		l2cap_chan_put(chan);
1728 	}
1729 
1730 	mutex_unlock(&conn->chan_lock);
1731 
1732 	hci_chan_del(conn->hchan);
1733 
1734 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1735 		cancel_delayed_work_sync(&conn->info_timer);
1736 
1737 	hcon->l2cap_data = NULL;
1738 	conn->hchan = NULL;
1739 	l2cap_conn_put(conn);
1740 }
1741 
l2cap_conn_free(struct kref * ref)1742 static void l2cap_conn_free(struct kref *ref)
1743 {
1744 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1745 
1746 	hci_conn_put(conn->hcon);
1747 	kfree(conn);
1748 }
1749 
l2cap_conn_get(struct l2cap_conn * conn)1750 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1751 {
1752 	kref_get(&conn->ref);
1753 	return conn;
1754 }
1755 EXPORT_SYMBOL(l2cap_conn_get);
1756 
l2cap_conn_put(struct l2cap_conn * conn)1757 void l2cap_conn_put(struct l2cap_conn *conn)
1758 {
1759 	kref_put(&conn->ref, l2cap_conn_free);
1760 }
1761 EXPORT_SYMBOL(l2cap_conn_put);
1762 
1763 /* ---- Socket interface ---- */
1764 
1765 /* Find socket with psm and source / destination bdaddr.
1766  * Returns closest match.
1767  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1768 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1769 						   bdaddr_t *src,
1770 						   bdaddr_t *dst,
1771 						   u8 link_type)
1772 {
1773 	struct l2cap_chan *c, *c1 = NULL;
1774 
1775 	read_lock(&chan_list_lock);
1776 
1777 	list_for_each_entry(c, &chan_list, global_l) {
1778 		if (state && c->state != state)
1779 			continue;
1780 
1781 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1782 			continue;
1783 
1784 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1785 			continue;
1786 
1787 		if (c->psm == psm) {
1788 			int src_match, dst_match;
1789 			int src_any, dst_any;
1790 
1791 			/* Exact match. */
1792 			src_match = !bacmp(&c->src, src);
1793 			dst_match = !bacmp(&c->dst, dst);
1794 			if (src_match && dst_match) {
1795 				l2cap_chan_hold(c);
1796 				read_unlock(&chan_list_lock);
1797 				return c;
1798 			}
1799 
1800 			/* Closest match */
1801 			src_any = !bacmp(&c->src, BDADDR_ANY);
1802 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1803 			if ((src_match && dst_any) || (src_any && dst_match) ||
1804 			    (src_any && dst_any))
1805 				c1 = c;
1806 		}
1807 	}
1808 
1809 	if (c1)
1810 		l2cap_chan_hold(c1);
1811 
1812 	read_unlock(&chan_list_lock);
1813 
1814 	return c1;
1815 }
1816 
l2cap_monitor_timeout(struct work_struct * work)1817 static void l2cap_monitor_timeout(struct work_struct *work)
1818 {
1819 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1820 					       monitor_timer.work);
1821 
1822 	BT_DBG("chan %p", chan);
1823 
1824 	l2cap_chan_lock(chan);
1825 
1826 	if (!chan->conn) {
1827 		l2cap_chan_unlock(chan);
1828 		l2cap_chan_put(chan);
1829 		return;
1830 	}
1831 
1832 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1833 
1834 	l2cap_chan_unlock(chan);
1835 	l2cap_chan_put(chan);
1836 }
1837 
l2cap_retrans_timeout(struct work_struct * work)1838 static void l2cap_retrans_timeout(struct work_struct *work)
1839 {
1840 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1841 					       retrans_timer.work);
1842 
1843 	BT_DBG("chan %p", chan);
1844 
1845 	l2cap_chan_lock(chan);
1846 
1847 	if (!chan->conn) {
1848 		l2cap_chan_unlock(chan);
1849 		l2cap_chan_put(chan);
1850 		return;
1851 	}
1852 
1853 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1854 	l2cap_chan_unlock(chan);
1855 	l2cap_chan_put(chan);
1856 }
1857 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1858 static void l2cap_streaming_send(struct l2cap_chan *chan,
1859 				 struct sk_buff_head *skbs)
1860 {
1861 	struct sk_buff *skb;
1862 	struct l2cap_ctrl *control;
1863 
1864 	BT_DBG("chan %p, skbs %p", chan, skbs);
1865 
1866 	if (__chan_is_moving(chan))
1867 		return;
1868 
1869 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1870 
1871 	while (!skb_queue_empty(&chan->tx_q)) {
1872 
1873 		skb = skb_dequeue(&chan->tx_q);
1874 
1875 		bt_cb(skb)->l2cap.retries = 1;
1876 		control = &bt_cb(skb)->l2cap;
1877 
1878 		control->reqseq = 0;
1879 		control->txseq = chan->next_tx_seq;
1880 
1881 		__pack_control(chan, control, skb);
1882 
1883 		if (chan->fcs == L2CAP_FCS_CRC16) {
1884 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1885 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1886 		}
1887 
1888 		l2cap_do_send(chan, skb);
1889 
1890 		BT_DBG("Sent txseq %u", control->txseq);
1891 
1892 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1893 		chan->frames_sent++;
1894 	}
1895 }
1896 
l2cap_ertm_send(struct l2cap_chan * chan)1897 static int l2cap_ertm_send(struct l2cap_chan *chan)
1898 {
1899 	struct sk_buff *skb, *tx_skb;
1900 	struct l2cap_ctrl *control;
1901 	int sent = 0;
1902 
1903 	BT_DBG("chan %p", chan);
1904 
1905 	if (chan->state != BT_CONNECTED)
1906 		return -ENOTCONN;
1907 
1908 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1909 		return 0;
1910 
1911 	if (__chan_is_moving(chan))
1912 		return 0;
1913 
1914 	while (chan->tx_send_head &&
1915 	       chan->unacked_frames < chan->remote_tx_win &&
1916 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1917 
1918 		skb = chan->tx_send_head;
1919 
1920 		bt_cb(skb)->l2cap.retries = 1;
1921 		control = &bt_cb(skb)->l2cap;
1922 
1923 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1924 			control->final = 1;
1925 
1926 		control->reqseq = chan->buffer_seq;
1927 		chan->last_acked_seq = chan->buffer_seq;
1928 		control->txseq = chan->next_tx_seq;
1929 
1930 		__pack_control(chan, control, skb);
1931 
1932 		if (chan->fcs == L2CAP_FCS_CRC16) {
1933 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1934 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1935 		}
1936 
1937 		/* Clone after data has been modified. Data is assumed to be
1938 		   read-only (for locking purposes) on cloned sk_buffs.
1939 		 */
1940 		tx_skb = skb_clone(skb, GFP_KERNEL);
1941 
1942 		if (!tx_skb)
1943 			break;
1944 
1945 		__set_retrans_timer(chan);
1946 
1947 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1948 		chan->unacked_frames++;
1949 		chan->frames_sent++;
1950 		sent++;
1951 
1952 		if (skb_queue_is_last(&chan->tx_q, skb))
1953 			chan->tx_send_head = NULL;
1954 		else
1955 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1956 
1957 		l2cap_do_send(chan, tx_skb);
1958 		BT_DBG("Sent txseq %u", control->txseq);
1959 	}
1960 
1961 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1962 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1963 
1964 	return sent;
1965 }
1966 
l2cap_ertm_resend(struct l2cap_chan * chan)1967 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1968 {
1969 	struct l2cap_ctrl control;
1970 	struct sk_buff *skb;
1971 	struct sk_buff *tx_skb;
1972 	u16 seq;
1973 
1974 	BT_DBG("chan %p", chan);
1975 
1976 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1977 		return;
1978 
1979 	if (__chan_is_moving(chan))
1980 		return;
1981 
1982 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1983 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1984 
1985 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1986 		if (!skb) {
1987 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1988 			       seq);
1989 			continue;
1990 		}
1991 
1992 		bt_cb(skb)->l2cap.retries++;
1993 		control = bt_cb(skb)->l2cap;
1994 
1995 		if (chan->max_tx != 0 &&
1996 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1997 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1998 			l2cap_send_disconn_req(chan, ECONNRESET);
1999 			l2cap_seq_list_clear(&chan->retrans_list);
2000 			break;
2001 		}
2002 
2003 		control.reqseq = chan->buffer_seq;
2004 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2005 			control.final = 1;
2006 		else
2007 			control.final = 0;
2008 
2009 		if (skb_cloned(skb)) {
2010 			/* Cloned sk_buffs are read-only, so we need a
2011 			 * writeable copy
2012 			 */
2013 			tx_skb = skb_copy(skb, GFP_KERNEL);
2014 		} else {
2015 			tx_skb = skb_clone(skb, GFP_KERNEL);
2016 		}
2017 
2018 		if (!tx_skb) {
2019 			l2cap_seq_list_clear(&chan->retrans_list);
2020 			break;
2021 		}
2022 
2023 		/* Update skb contents */
2024 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2025 			put_unaligned_le32(__pack_extended_control(&control),
2026 					   tx_skb->data + L2CAP_HDR_SIZE);
2027 		} else {
2028 			put_unaligned_le16(__pack_enhanced_control(&control),
2029 					   tx_skb->data + L2CAP_HDR_SIZE);
2030 		}
2031 
2032 		/* Update FCS */
2033 		if (chan->fcs == L2CAP_FCS_CRC16) {
2034 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2035 					tx_skb->len - L2CAP_FCS_SIZE);
2036 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2037 						L2CAP_FCS_SIZE);
2038 		}
2039 
2040 		l2cap_do_send(chan, tx_skb);
2041 
2042 		BT_DBG("Resent txseq %d", control.txseq);
2043 
2044 		chan->last_acked_seq = chan->buffer_seq;
2045 	}
2046 }
2047 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2048 static void l2cap_retransmit(struct l2cap_chan *chan,
2049 			     struct l2cap_ctrl *control)
2050 {
2051 	BT_DBG("chan %p, control %p", chan, control);
2052 
2053 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2054 	l2cap_ertm_resend(chan);
2055 }
2056 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2057 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2058 				 struct l2cap_ctrl *control)
2059 {
2060 	struct sk_buff *skb;
2061 
2062 	BT_DBG("chan %p, control %p", chan, control);
2063 
2064 	if (control->poll)
2065 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2066 
2067 	l2cap_seq_list_clear(&chan->retrans_list);
2068 
2069 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2070 		return;
2071 
2072 	if (chan->unacked_frames) {
2073 		skb_queue_walk(&chan->tx_q, skb) {
2074 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2075 			    skb == chan->tx_send_head)
2076 				break;
2077 		}
2078 
2079 		skb_queue_walk_from(&chan->tx_q, skb) {
2080 			if (skb == chan->tx_send_head)
2081 				break;
2082 
2083 			l2cap_seq_list_append(&chan->retrans_list,
2084 					      bt_cb(skb)->l2cap.txseq);
2085 		}
2086 
2087 		l2cap_ertm_resend(chan);
2088 	}
2089 }
2090 
l2cap_send_ack(struct l2cap_chan * chan)2091 static void l2cap_send_ack(struct l2cap_chan *chan)
2092 {
2093 	struct l2cap_ctrl control;
2094 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2095 					 chan->last_acked_seq);
2096 	int threshold;
2097 
2098 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2099 	       chan, chan->last_acked_seq, chan->buffer_seq);
2100 
2101 	memset(&control, 0, sizeof(control));
2102 	control.sframe = 1;
2103 
2104 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2105 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2106 		__clear_ack_timer(chan);
2107 		control.super = L2CAP_SUPER_RNR;
2108 		control.reqseq = chan->buffer_seq;
2109 		l2cap_send_sframe(chan, &control);
2110 	} else {
2111 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2112 			l2cap_ertm_send(chan);
2113 			/* If any i-frames were sent, they included an ack */
2114 			if (chan->buffer_seq == chan->last_acked_seq)
2115 				frames_to_ack = 0;
2116 		}
2117 
2118 		/* Ack now if the window is 3/4ths full.
2119 		 * Calculate without mul or div
2120 		 */
2121 		threshold = chan->ack_win;
2122 		threshold += threshold << 1;
2123 		threshold >>= 2;
2124 
2125 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2126 		       threshold);
2127 
2128 		if (frames_to_ack >= threshold) {
2129 			__clear_ack_timer(chan);
2130 			control.super = L2CAP_SUPER_RR;
2131 			control.reqseq = chan->buffer_seq;
2132 			l2cap_send_sframe(chan, &control);
2133 			frames_to_ack = 0;
2134 		}
2135 
2136 		if (frames_to_ack)
2137 			__set_ack_timer(chan);
2138 	}
2139 }
2140 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2141 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2142 					 struct msghdr *msg, int len,
2143 					 int count, struct sk_buff *skb)
2144 {
2145 	struct l2cap_conn *conn = chan->conn;
2146 	struct sk_buff **frag;
2147 	int sent = 0;
2148 
2149 	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2150 		return -EFAULT;
2151 
2152 	sent += count;
2153 	len  -= count;
2154 
2155 	/* Continuation fragments (no L2CAP header) */
2156 	frag = &skb_shinfo(skb)->frag_list;
2157 	while (len) {
2158 		struct sk_buff *tmp;
2159 
2160 		count = min_t(unsigned int, conn->mtu, len);
2161 
2162 		tmp = chan->ops->alloc_skb(chan, 0, count,
2163 					   msg->msg_flags & MSG_DONTWAIT);
2164 		if (IS_ERR(tmp))
2165 			return PTR_ERR(tmp);
2166 
2167 		*frag = tmp;
2168 
2169 		if (copy_from_iter(skb_put(*frag, count), count,
2170 				   &msg->msg_iter) != count)
2171 			return -EFAULT;
2172 
2173 		sent += count;
2174 		len  -= count;
2175 
2176 		skb->len += (*frag)->len;
2177 		skb->data_len += (*frag)->len;
2178 
2179 		frag = &(*frag)->next;
2180 	}
2181 
2182 	return sent;
2183 }
2184 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2185 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2186 						 struct msghdr *msg, size_t len)
2187 {
2188 	struct l2cap_conn *conn = chan->conn;
2189 	struct sk_buff *skb;
2190 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2191 	struct l2cap_hdr *lh;
2192 
2193 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2194 	       __le16_to_cpu(chan->psm), len);
2195 
2196 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2197 
2198 	skb = chan->ops->alloc_skb(chan, hlen, count,
2199 				   msg->msg_flags & MSG_DONTWAIT);
2200 	if (IS_ERR(skb))
2201 		return skb;
2202 
2203 	/* Create L2CAP header */
2204 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 	lh->cid = cpu_to_le16(chan->dcid);
2206 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2207 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2208 
2209 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2210 	if (unlikely(err < 0)) {
2211 		kfree_skb(skb);
2212 		return ERR_PTR(err);
2213 	}
2214 	return skb;
2215 }
2216 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2217 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2218 					      struct msghdr *msg, size_t len)
2219 {
2220 	struct l2cap_conn *conn = chan->conn;
2221 	struct sk_buff *skb;
2222 	int err, count;
2223 	struct l2cap_hdr *lh;
2224 
2225 	BT_DBG("chan %p len %zu", chan, len);
2226 
2227 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2228 
2229 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2230 				   msg->msg_flags & MSG_DONTWAIT);
2231 	if (IS_ERR(skb))
2232 		return skb;
2233 
2234 	/* Create L2CAP header */
2235 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2236 	lh->cid = cpu_to_le16(chan->dcid);
2237 	lh->len = cpu_to_le16(len);
2238 
2239 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2240 	if (unlikely(err < 0)) {
2241 		kfree_skb(skb);
2242 		return ERR_PTR(err);
2243 	}
2244 	return skb;
2245 }
2246 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2247 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2248 					       struct msghdr *msg, size_t len,
2249 					       u16 sdulen)
2250 {
2251 	struct l2cap_conn *conn = chan->conn;
2252 	struct sk_buff *skb;
2253 	int err, count, hlen;
2254 	struct l2cap_hdr *lh;
2255 
2256 	BT_DBG("chan %p len %zu", chan, len);
2257 
2258 	if (!conn)
2259 		return ERR_PTR(-ENOTCONN);
2260 
2261 	hlen = __ertm_hdr_size(chan);
2262 
2263 	if (sdulen)
2264 		hlen += L2CAP_SDULEN_SIZE;
2265 
2266 	if (chan->fcs == L2CAP_FCS_CRC16)
2267 		hlen += L2CAP_FCS_SIZE;
2268 
2269 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2270 
2271 	skb = chan->ops->alloc_skb(chan, hlen, count,
2272 				   msg->msg_flags & MSG_DONTWAIT);
2273 	if (IS_ERR(skb))
2274 		return skb;
2275 
2276 	/* Create L2CAP header */
2277 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2278 	lh->cid = cpu_to_le16(chan->dcid);
2279 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2280 
2281 	/* Control header is populated later */
2282 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2283 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2284 	else
2285 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2286 
2287 	if (sdulen)
2288 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2289 
2290 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2291 	if (unlikely(err < 0)) {
2292 		kfree_skb(skb);
2293 		return ERR_PTR(err);
2294 	}
2295 
2296 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2297 	bt_cb(skb)->l2cap.retries = 0;
2298 	return skb;
2299 }
2300 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2301 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2302 			     struct sk_buff_head *seg_queue,
2303 			     struct msghdr *msg, size_t len)
2304 {
2305 	struct sk_buff *skb;
2306 	u16 sdu_len;
2307 	size_t pdu_len;
2308 	u8 sar;
2309 
2310 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2311 
2312 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2313 	 * so fragmented skbs are not used.  The HCI layer's handling
2314 	 * of fragmented skbs is not compatible with ERTM's queueing.
2315 	 */
2316 
2317 	/* PDU size is derived from the HCI MTU */
2318 	pdu_len = chan->conn->mtu;
2319 
2320 	/* Constrain PDU size for BR/EDR connections */
2321 	if (!chan->hs_hcon)
2322 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2323 
2324 	/* Adjust for largest possible L2CAP overhead. */
2325 	if (chan->fcs)
2326 		pdu_len -= L2CAP_FCS_SIZE;
2327 
2328 	pdu_len -= __ertm_hdr_size(chan);
2329 
2330 	/* Remote device may have requested smaller PDUs */
2331 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2332 
2333 	if (len <= pdu_len) {
2334 		sar = L2CAP_SAR_UNSEGMENTED;
2335 		sdu_len = 0;
2336 		pdu_len = len;
2337 	} else {
2338 		sar = L2CAP_SAR_START;
2339 		sdu_len = len;
2340 	}
2341 
2342 	while (len > 0) {
2343 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2344 
2345 		if (IS_ERR(skb)) {
2346 			__skb_queue_purge(seg_queue);
2347 			return PTR_ERR(skb);
2348 		}
2349 
2350 		bt_cb(skb)->l2cap.sar = sar;
2351 		__skb_queue_tail(seg_queue, skb);
2352 
2353 		len -= pdu_len;
2354 		if (sdu_len)
2355 			sdu_len = 0;
2356 
2357 		if (len <= pdu_len) {
2358 			sar = L2CAP_SAR_END;
2359 			pdu_len = len;
2360 		} else {
2361 			sar = L2CAP_SAR_CONTINUE;
2362 		}
2363 	}
2364 
2365 	return 0;
2366 }
2367 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2368 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2369 						   struct msghdr *msg,
2370 						   size_t len, u16 sdulen)
2371 {
2372 	struct l2cap_conn *conn = chan->conn;
2373 	struct sk_buff *skb;
2374 	int err, count, hlen;
2375 	struct l2cap_hdr *lh;
2376 
2377 	BT_DBG("chan %p len %zu", chan, len);
2378 
2379 	if (!conn)
2380 		return ERR_PTR(-ENOTCONN);
2381 
2382 	hlen = L2CAP_HDR_SIZE;
2383 
2384 	if (sdulen)
2385 		hlen += L2CAP_SDULEN_SIZE;
2386 
2387 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2388 
2389 	skb = chan->ops->alloc_skb(chan, hlen, count,
2390 				   msg->msg_flags & MSG_DONTWAIT);
2391 	if (IS_ERR(skb))
2392 		return skb;
2393 
2394 	/* Create L2CAP header */
2395 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2396 	lh->cid = cpu_to_le16(chan->dcid);
2397 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2398 
2399 	if (sdulen)
2400 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2401 
2402 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2403 	if (unlikely(err < 0)) {
2404 		kfree_skb(skb);
2405 		return ERR_PTR(err);
2406 	}
2407 
2408 	return skb;
2409 }
2410 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2411 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2412 				struct sk_buff_head *seg_queue,
2413 				struct msghdr *msg, size_t len)
2414 {
2415 	struct sk_buff *skb;
2416 	size_t pdu_len;
2417 	u16 sdu_len;
2418 
2419 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2420 
2421 	sdu_len = len;
2422 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2423 
2424 	while (len > 0) {
2425 		if (len <= pdu_len)
2426 			pdu_len = len;
2427 
2428 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2429 		if (IS_ERR(skb)) {
2430 			__skb_queue_purge(seg_queue);
2431 			return PTR_ERR(skb);
2432 		}
2433 
2434 		__skb_queue_tail(seg_queue, skb);
2435 
2436 		len -= pdu_len;
2437 
2438 		if (sdu_len) {
2439 			sdu_len = 0;
2440 			pdu_len += L2CAP_SDULEN_SIZE;
2441 		}
2442 	}
2443 
2444 	return 0;
2445 }
2446 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2447 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2448 {
2449 	struct sk_buff *skb;
2450 	int err;
2451 	struct sk_buff_head seg_queue;
2452 
2453 	if (!chan->conn)
2454 		return -ENOTCONN;
2455 
2456 	/* Connectionless channel */
2457 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2458 		skb = l2cap_create_connless_pdu(chan, msg, len);
2459 		if (IS_ERR(skb))
2460 			return PTR_ERR(skb);
2461 
2462 		/* Channel lock is released before requesting new skb and then
2463 		 * reacquired thus we need to recheck channel state.
2464 		 */
2465 		if (chan->state != BT_CONNECTED) {
2466 			kfree_skb(skb);
2467 			return -ENOTCONN;
2468 		}
2469 
2470 		l2cap_do_send(chan, skb);
2471 		return len;
2472 	}
2473 
2474 	switch (chan->mode) {
2475 	case L2CAP_MODE_LE_FLOWCTL:
2476 		/* Check outgoing MTU */
2477 		if (len > chan->omtu)
2478 			return -EMSGSIZE;
2479 
2480 		if (!chan->tx_credits)
2481 			return -EAGAIN;
2482 
2483 		__skb_queue_head_init(&seg_queue);
2484 
2485 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2486 
2487 		if (chan->state != BT_CONNECTED) {
2488 			__skb_queue_purge(&seg_queue);
2489 			err = -ENOTCONN;
2490 		}
2491 
2492 		if (err)
2493 			return err;
2494 
2495 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2496 
2497 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2498 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2499 			chan->tx_credits--;
2500 		}
2501 
2502 		if (!chan->tx_credits)
2503 			chan->ops->suspend(chan);
2504 
2505 		err = len;
2506 
2507 		break;
2508 
2509 	case L2CAP_MODE_BASIC:
2510 		/* Check outgoing MTU */
2511 		if (len > chan->omtu)
2512 			return -EMSGSIZE;
2513 
2514 		/* Create a basic PDU */
2515 		skb = l2cap_create_basic_pdu(chan, msg, len);
2516 		if (IS_ERR(skb))
2517 			return PTR_ERR(skb);
2518 
2519 		/* Channel lock is released before requesting new skb and then
2520 		 * reacquired thus we need to recheck channel state.
2521 		 */
2522 		if (chan->state != BT_CONNECTED) {
2523 			kfree_skb(skb);
2524 			return -ENOTCONN;
2525 		}
2526 
2527 		l2cap_do_send(chan, skb);
2528 		err = len;
2529 		break;
2530 
2531 	case L2CAP_MODE_ERTM:
2532 	case L2CAP_MODE_STREAMING:
2533 		/* Check outgoing MTU */
2534 		if (len > chan->omtu) {
2535 			err = -EMSGSIZE;
2536 			break;
2537 		}
2538 
2539 		__skb_queue_head_init(&seg_queue);
2540 
2541 		/* Do segmentation before calling in to the state machine,
2542 		 * since it's possible to block while waiting for memory
2543 		 * allocation.
2544 		 */
2545 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2546 
2547 		/* The channel could have been closed while segmenting,
2548 		 * check that it is still connected.
2549 		 */
2550 		if (chan->state != BT_CONNECTED) {
2551 			__skb_queue_purge(&seg_queue);
2552 			err = -ENOTCONN;
2553 		}
2554 
2555 		if (err)
2556 			break;
2557 
2558 		if (chan->mode == L2CAP_MODE_ERTM)
2559 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2560 		else
2561 			l2cap_streaming_send(chan, &seg_queue);
2562 
2563 		err = len;
2564 
2565 		/* If the skbs were not queued for sending, they'll still be in
2566 		 * seg_queue and need to be purged.
2567 		 */
2568 		__skb_queue_purge(&seg_queue);
2569 		break;
2570 
2571 	default:
2572 		BT_DBG("bad state %1.1x", chan->mode);
2573 		err = -EBADFD;
2574 	}
2575 
2576 	return err;
2577 }
2578 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2579 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2580 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2581 {
2582 	struct l2cap_ctrl control;
2583 	u16 seq;
2584 
2585 	BT_DBG("chan %p, txseq %u", chan, txseq);
2586 
2587 	memset(&control, 0, sizeof(control));
2588 	control.sframe = 1;
2589 	control.super = L2CAP_SUPER_SREJ;
2590 
2591 	for (seq = chan->expected_tx_seq; seq != txseq;
2592 	     seq = __next_seq(chan, seq)) {
2593 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2594 			control.reqseq = seq;
2595 			l2cap_send_sframe(chan, &control);
2596 			l2cap_seq_list_append(&chan->srej_list, seq);
2597 		}
2598 	}
2599 
2600 	chan->expected_tx_seq = __next_seq(chan, txseq);
2601 }
2602 
l2cap_send_srej_tail(struct l2cap_chan * chan)2603 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2604 {
2605 	struct l2cap_ctrl control;
2606 
2607 	BT_DBG("chan %p", chan);
2608 
2609 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2610 		return;
2611 
2612 	memset(&control, 0, sizeof(control));
2613 	control.sframe = 1;
2614 	control.super = L2CAP_SUPER_SREJ;
2615 	control.reqseq = chan->srej_list.tail;
2616 	l2cap_send_sframe(chan, &control);
2617 }
2618 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2619 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2620 {
2621 	struct l2cap_ctrl control;
2622 	u16 initial_head;
2623 	u16 seq;
2624 
2625 	BT_DBG("chan %p, txseq %u", chan, txseq);
2626 
2627 	memset(&control, 0, sizeof(control));
2628 	control.sframe = 1;
2629 	control.super = L2CAP_SUPER_SREJ;
2630 
2631 	/* Capture initial list head to allow only one pass through the list. */
2632 	initial_head = chan->srej_list.head;
2633 
2634 	do {
2635 		seq = l2cap_seq_list_pop(&chan->srej_list);
2636 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2637 			break;
2638 
2639 		control.reqseq = seq;
2640 		l2cap_send_sframe(chan, &control);
2641 		l2cap_seq_list_append(&chan->srej_list, seq);
2642 	} while (chan->srej_list.head != initial_head);
2643 }
2644 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2645 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2646 {
2647 	struct sk_buff *acked_skb;
2648 	u16 ackseq;
2649 
2650 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2651 
2652 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2653 		return;
2654 
2655 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2656 	       chan->expected_ack_seq, chan->unacked_frames);
2657 
2658 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2659 	     ackseq = __next_seq(chan, ackseq)) {
2660 
2661 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2662 		if (acked_skb) {
2663 			skb_unlink(acked_skb, &chan->tx_q);
2664 			kfree_skb(acked_skb);
2665 			chan->unacked_frames--;
2666 		}
2667 	}
2668 
2669 	chan->expected_ack_seq = reqseq;
2670 
2671 	if (chan->unacked_frames == 0)
2672 		__clear_retrans_timer(chan);
2673 
2674 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2675 }
2676 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2677 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2678 {
2679 	BT_DBG("chan %p", chan);
2680 
2681 	chan->expected_tx_seq = chan->buffer_seq;
2682 	l2cap_seq_list_clear(&chan->srej_list);
2683 	skb_queue_purge(&chan->srej_q);
2684 	chan->rx_state = L2CAP_RX_STATE_RECV;
2685 }
2686 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2687 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2688 				struct l2cap_ctrl *control,
2689 				struct sk_buff_head *skbs, u8 event)
2690 {
2691 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2692 	       event);
2693 
2694 	switch (event) {
2695 	case L2CAP_EV_DATA_REQUEST:
2696 		if (chan->tx_send_head == NULL)
2697 			chan->tx_send_head = skb_peek(skbs);
2698 
2699 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2700 		l2cap_ertm_send(chan);
2701 		break;
2702 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2703 		BT_DBG("Enter LOCAL_BUSY");
2704 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2705 
2706 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2707 			/* The SREJ_SENT state must be aborted if we are to
2708 			 * enter the LOCAL_BUSY state.
2709 			 */
2710 			l2cap_abort_rx_srej_sent(chan);
2711 		}
2712 
2713 		l2cap_send_ack(chan);
2714 
2715 		break;
2716 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2717 		BT_DBG("Exit LOCAL_BUSY");
2718 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2719 
2720 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2721 			struct l2cap_ctrl local_control;
2722 
2723 			memset(&local_control, 0, sizeof(local_control));
2724 			local_control.sframe = 1;
2725 			local_control.super = L2CAP_SUPER_RR;
2726 			local_control.poll = 1;
2727 			local_control.reqseq = chan->buffer_seq;
2728 			l2cap_send_sframe(chan, &local_control);
2729 
2730 			chan->retry_count = 1;
2731 			__set_monitor_timer(chan);
2732 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2733 		}
2734 		break;
2735 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2736 		l2cap_process_reqseq(chan, control->reqseq);
2737 		break;
2738 	case L2CAP_EV_EXPLICIT_POLL:
2739 		l2cap_send_rr_or_rnr(chan, 1);
2740 		chan->retry_count = 1;
2741 		__set_monitor_timer(chan);
2742 		__clear_ack_timer(chan);
2743 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2744 		break;
2745 	case L2CAP_EV_RETRANS_TO:
2746 		l2cap_send_rr_or_rnr(chan, 1);
2747 		chan->retry_count = 1;
2748 		__set_monitor_timer(chan);
2749 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 		break;
2751 	case L2CAP_EV_RECV_FBIT:
2752 		/* Nothing to process */
2753 		break;
2754 	default:
2755 		break;
2756 	}
2757 }
2758 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2759 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2760 				  struct l2cap_ctrl *control,
2761 				  struct sk_buff_head *skbs, u8 event)
2762 {
2763 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2764 	       event);
2765 
2766 	switch (event) {
2767 	case L2CAP_EV_DATA_REQUEST:
2768 		if (chan->tx_send_head == NULL)
2769 			chan->tx_send_head = skb_peek(skbs);
2770 		/* Queue data, but don't send. */
2771 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2772 		break;
2773 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2774 		BT_DBG("Enter LOCAL_BUSY");
2775 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2776 
2777 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2778 			/* The SREJ_SENT state must be aborted if we are to
2779 			 * enter the LOCAL_BUSY state.
2780 			 */
2781 			l2cap_abort_rx_srej_sent(chan);
2782 		}
2783 
2784 		l2cap_send_ack(chan);
2785 
2786 		break;
2787 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2788 		BT_DBG("Exit LOCAL_BUSY");
2789 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2790 
2791 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2792 			struct l2cap_ctrl local_control;
2793 			memset(&local_control, 0, sizeof(local_control));
2794 			local_control.sframe = 1;
2795 			local_control.super = L2CAP_SUPER_RR;
2796 			local_control.poll = 1;
2797 			local_control.reqseq = chan->buffer_seq;
2798 			l2cap_send_sframe(chan, &local_control);
2799 
2800 			chan->retry_count = 1;
2801 			__set_monitor_timer(chan);
2802 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2803 		}
2804 		break;
2805 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2806 		l2cap_process_reqseq(chan, control->reqseq);
2807 
2808 		/* Fall through */
2809 
2810 	case L2CAP_EV_RECV_FBIT:
2811 		if (control && control->final) {
2812 			__clear_monitor_timer(chan);
2813 			if (chan->unacked_frames > 0)
2814 				__set_retrans_timer(chan);
2815 			chan->retry_count = 0;
2816 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2817 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2818 		}
2819 		break;
2820 	case L2CAP_EV_EXPLICIT_POLL:
2821 		/* Ignore */
2822 		break;
2823 	case L2CAP_EV_MONITOR_TO:
2824 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2825 			l2cap_send_rr_or_rnr(chan, 1);
2826 			__set_monitor_timer(chan);
2827 			chan->retry_count++;
2828 		} else {
2829 			l2cap_send_disconn_req(chan, ECONNABORTED);
2830 		}
2831 		break;
2832 	default:
2833 		break;
2834 	}
2835 }
2836 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2837 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2838 		     struct sk_buff_head *skbs, u8 event)
2839 {
2840 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2841 	       chan, control, skbs, event, chan->tx_state);
2842 
2843 	switch (chan->tx_state) {
2844 	case L2CAP_TX_STATE_XMIT:
2845 		l2cap_tx_state_xmit(chan, control, skbs, event);
2846 		break;
2847 	case L2CAP_TX_STATE_WAIT_F:
2848 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2849 		break;
2850 	default:
2851 		/* Ignore event */
2852 		break;
2853 	}
2854 }
2855 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2856 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2857 			     struct l2cap_ctrl *control)
2858 {
2859 	BT_DBG("chan %p, control %p", chan, control);
2860 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2861 }
2862 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2863 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2864 				  struct l2cap_ctrl *control)
2865 {
2866 	BT_DBG("chan %p, control %p", chan, control);
2867 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2868 }
2869 
2870 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2871 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2872 {
2873 	struct sk_buff *nskb;
2874 	struct l2cap_chan *chan;
2875 
2876 	BT_DBG("conn %p", conn);
2877 
2878 	mutex_lock(&conn->chan_lock);
2879 
2880 	list_for_each_entry(chan, &conn->chan_l, list) {
2881 		if (chan->chan_type != L2CAP_CHAN_RAW)
2882 			continue;
2883 
2884 		/* Don't send frame to the channel it came from */
2885 		if (bt_cb(skb)->l2cap.chan == chan)
2886 			continue;
2887 
2888 		nskb = skb_clone(skb, GFP_KERNEL);
2889 		if (!nskb)
2890 			continue;
2891 		if (chan->ops->recv(chan, nskb))
2892 			kfree_skb(nskb);
2893 	}
2894 
2895 	mutex_unlock(&conn->chan_lock);
2896 }
2897 
2898 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2899 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2900 				       u8 ident, u16 dlen, void *data)
2901 {
2902 	struct sk_buff *skb, **frag;
2903 	struct l2cap_cmd_hdr *cmd;
2904 	struct l2cap_hdr *lh;
2905 	int len, count;
2906 
2907 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2908 	       conn, code, ident, dlen);
2909 
2910 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2911 		return NULL;
2912 
2913 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2914 	count = min_t(unsigned int, conn->mtu, len);
2915 
2916 	skb = bt_skb_alloc(count, GFP_KERNEL);
2917 	if (!skb)
2918 		return NULL;
2919 
2920 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2921 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2922 
2923 	if (conn->hcon->type == LE_LINK)
2924 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2925 	else
2926 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2927 
2928 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2929 	cmd->code  = code;
2930 	cmd->ident = ident;
2931 	cmd->len   = cpu_to_le16(dlen);
2932 
2933 	if (dlen) {
2934 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2935 		memcpy(skb_put(skb, count), data, count);
2936 		data += count;
2937 	}
2938 
2939 	len -= skb->len;
2940 
2941 	/* Continuation fragments (no L2CAP header) */
2942 	frag = &skb_shinfo(skb)->frag_list;
2943 	while (len) {
2944 		count = min_t(unsigned int, conn->mtu, len);
2945 
2946 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2947 		if (!*frag)
2948 			goto fail;
2949 
2950 		memcpy(skb_put(*frag, count), data, count);
2951 
2952 		len  -= count;
2953 		data += count;
2954 
2955 		frag = &(*frag)->next;
2956 	}
2957 
2958 	return skb;
2959 
2960 fail:
2961 	kfree_skb(skb);
2962 	return NULL;
2963 }
2964 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2965 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2966 				     unsigned long *val)
2967 {
2968 	struct l2cap_conf_opt *opt = *ptr;
2969 	int len;
2970 
2971 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2972 	*ptr += len;
2973 
2974 	*type = opt->type;
2975 	*olen = opt->len;
2976 
2977 	switch (opt->len) {
2978 	case 1:
2979 		*val = *((u8 *) opt->val);
2980 		break;
2981 
2982 	case 2:
2983 		*val = get_unaligned_le16(opt->val);
2984 		break;
2985 
2986 	case 4:
2987 		*val = get_unaligned_le32(opt->val);
2988 		break;
2989 
2990 	default:
2991 		*val = (unsigned long) opt->val;
2992 		break;
2993 	}
2994 
2995 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2996 	return len;
2997 }
2998 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)2999 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3000 {
3001 	struct l2cap_conf_opt *opt = *ptr;
3002 
3003 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3004 
3005 	if (size < L2CAP_CONF_OPT_SIZE + len)
3006 		return;
3007 
3008 	opt->type = type;
3009 	opt->len  = len;
3010 
3011 	switch (len) {
3012 	case 1:
3013 		*((u8 *) opt->val)  = val;
3014 		break;
3015 
3016 	case 2:
3017 		put_unaligned_le16(val, opt->val);
3018 		break;
3019 
3020 	case 4:
3021 		put_unaligned_le32(val, opt->val);
3022 		break;
3023 
3024 	default:
3025 		memcpy(opt->val, (void *) val, len);
3026 		break;
3027 	}
3028 
3029 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3030 }
3031 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3032 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3033 {
3034 	struct l2cap_conf_efs efs;
3035 
3036 	switch (chan->mode) {
3037 	case L2CAP_MODE_ERTM:
3038 		efs.id		= chan->local_id;
3039 		efs.stype	= chan->local_stype;
3040 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3041 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3042 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3043 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3044 		break;
3045 
3046 	case L2CAP_MODE_STREAMING:
3047 		efs.id		= 1;
3048 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3049 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3050 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3051 		efs.acc_lat	= 0;
3052 		efs.flush_to	= 0;
3053 		break;
3054 
3055 	default:
3056 		return;
3057 	}
3058 
3059 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3060 			   (unsigned long) &efs, size);
3061 }
3062 
l2cap_ack_timeout(struct work_struct * work)3063 static void l2cap_ack_timeout(struct work_struct *work)
3064 {
3065 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3066 					       ack_timer.work);
3067 	u16 frames_to_ack;
3068 
3069 	BT_DBG("chan %p", chan);
3070 
3071 	l2cap_chan_lock(chan);
3072 
3073 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3074 				     chan->last_acked_seq);
3075 
3076 	if (frames_to_ack)
3077 		l2cap_send_rr_or_rnr(chan, 0);
3078 
3079 	l2cap_chan_unlock(chan);
3080 	l2cap_chan_put(chan);
3081 }
3082 
l2cap_ertm_init(struct l2cap_chan * chan)3083 int l2cap_ertm_init(struct l2cap_chan *chan)
3084 {
3085 	int err;
3086 
3087 	chan->next_tx_seq = 0;
3088 	chan->expected_tx_seq = 0;
3089 	chan->expected_ack_seq = 0;
3090 	chan->unacked_frames = 0;
3091 	chan->buffer_seq = 0;
3092 	chan->frames_sent = 0;
3093 	chan->last_acked_seq = 0;
3094 	chan->sdu = NULL;
3095 	chan->sdu_last_frag = NULL;
3096 	chan->sdu_len = 0;
3097 
3098 	skb_queue_head_init(&chan->tx_q);
3099 
3100 	chan->local_amp_id = AMP_ID_BREDR;
3101 	chan->move_id = AMP_ID_BREDR;
3102 	chan->move_state = L2CAP_MOVE_STABLE;
3103 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3104 
3105 	if (chan->mode != L2CAP_MODE_ERTM)
3106 		return 0;
3107 
3108 	chan->rx_state = L2CAP_RX_STATE_RECV;
3109 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3110 
3111 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3112 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3113 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3114 
3115 	skb_queue_head_init(&chan->srej_q);
3116 
3117 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3118 	if (err < 0)
3119 		return err;
3120 
3121 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3122 	if (err < 0)
3123 		l2cap_seq_list_free(&chan->srej_list);
3124 
3125 	return err;
3126 }
3127 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3128 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3129 {
3130 	switch (mode) {
3131 	case L2CAP_MODE_STREAMING:
3132 	case L2CAP_MODE_ERTM:
3133 		if (l2cap_mode_supported(mode, remote_feat_mask))
3134 			return mode;
3135 		/* fall through */
3136 	default:
3137 		return L2CAP_MODE_BASIC;
3138 	}
3139 }
3140 
__l2cap_ews_supported(struct l2cap_conn * conn)3141 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3142 {
3143 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3144 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3145 }
3146 
__l2cap_efs_supported(struct l2cap_conn * conn)3147 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3148 {
3149 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3150 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3151 }
3152 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3153 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3154 				      struct l2cap_conf_rfc *rfc)
3155 {
3156 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3157 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3158 
3159 		/* Class 1 devices have must have ERTM timeouts
3160 		 * exceeding the Link Supervision Timeout.  The
3161 		 * default Link Supervision Timeout for AMP
3162 		 * controllers is 10 seconds.
3163 		 *
3164 		 * Class 1 devices use 0xffffffff for their
3165 		 * best-effort flush timeout, so the clamping logic
3166 		 * will result in a timeout that meets the above
3167 		 * requirement.  ERTM timeouts are 16-bit values, so
3168 		 * the maximum timeout is 65.535 seconds.
3169 		 */
3170 
3171 		/* Convert timeout to milliseconds and round */
3172 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3173 
3174 		/* This is the recommended formula for class 2 devices
3175 		 * that start ERTM timers when packets are sent to the
3176 		 * controller.
3177 		 */
3178 		ertm_to = 3 * ertm_to + 500;
3179 
3180 		if (ertm_to > 0xffff)
3181 			ertm_to = 0xffff;
3182 
3183 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3184 		rfc->monitor_timeout = rfc->retrans_timeout;
3185 	} else {
3186 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3187 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3188 	}
3189 }
3190 
l2cap_txwin_setup(struct l2cap_chan * chan)3191 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3192 {
3193 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3194 	    __l2cap_ews_supported(chan->conn)) {
3195 		/* use extended control field */
3196 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3197 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3198 	} else {
3199 		chan->tx_win = min_t(u16, chan->tx_win,
3200 				     L2CAP_DEFAULT_TX_WINDOW);
3201 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3202 	}
3203 	chan->ack_win = chan->tx_win;
3204 }
3205 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3206 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3207 {
3208 	struct l2cap_conf_req *req = data;
3209 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3210 	void *ptr = req->data;
3211 	void *endptr = data + data_size;
3212 	u16 size;
3213 
3214 	BT_DBG("chan %p", chan);
3215 
3216 	if (chan->num_conf_req || chan->num_conf_rsp)
3217 		goto done;
3218 
3219 	switch (chan->mode) {
3220 	case L2CAP_MODE_STREAMING:
3221 	case L2CAP_MODE_ERTM:
3222 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3223 			break;
3224 
3225 		if (__l2cap_efs_supported(chan->conn))
3226 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3227 
3228 		/* fall through */
3229 	default:
3230 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3231 		break;
3232 	}
3233 
3234 done:
3235 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3236 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3237 
3238 	switch (chan->mode) {
3239 	case L2CAP_MODE_BASIC:
3240 		if (disable_ertm)
3241 			break;
3242 
3243 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3244 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3245 			break;
3246 
3247 		rfc.mode            = L2CAP_MODE_BASIC;
3248 		rfc.txwin_size      = 0;
3249 		rfc.max_transmit    = 0;
3250 		rfc.retrans_timeout = 0;
3251 		rfc.monitor_timeout = 0;
3252 		rfc.max_pdu_size    = 0;
3253 
3254 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3255 				   (unsigned long) &rfc, endptr - ptr);
3256 		break;
3257 
3258 	case L2CAP_MODE_ERTM:
3259 		rfc.mode            = L2CAP_MODE_ERTM;
3260 		rfc.max_transmit    = chan->max_tx;
3261 
3262 		__l2cap_set_ertm_timeouts(chan, &rfc);
3263 
3264 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3265 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3266 			     L2CAP_FCS_SIZE);
3267 		rfc.max_pdu_size = cpu_to_le16(size);
3268 
3269 		l2cap_txwin_setup(chan);
3270 
3271 		rfc.txwin_size = min_t(u16, chan->tx_win,
3272 				       L2CAP_DEFAULT_TX_WINDOW);
3273 
3274 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3275 				   (unsigned long) &rfc, endptr - ptr);
3276 
3277 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3278 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3279 
3280 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3281 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3282 					   chan->tx_win, endptr - ptr);
3283 
3284 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3285 			if (chan->fcs == L2CAP_FCS_NONE ||
3286 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3287 				chan->fcs = L2CAP_FCS_NONE;
3288 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3289 						   chan->fcs, endptr - ptr);
3290 			}
3291 		break;
3292 
3293 	case L2CAP_MODE_STREAMING:
3294 		l2cap_txwin_setup(chan);
3295 		rfc.mode            = L2CAP_MODE_STREAMING;
3296 		rfc.txwin_size      = 0;
3297 		rfc.max_transmit    = 0;
3298 		rfc.retrans_timeout = 0;
3299 		rfc.monitor_timeout = 0;
3300 
3301 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3302 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3303 			     L2CAP_FCS_SIZE);
3304 		rfc.max_pdu_size = cpu_to_le16(size);
3305 
3306 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3307 				   (unsigned long) &rfc, endptr - ptr);
3308 
3309 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3310 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3311 
3312 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3313 			if (chan->fcs == L2CAP_FCS_NONE ||
3314 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3315 				chan->fcs = L2CAP_FCS_NONE;
3316 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3317 						   chan->fcs, endptr - ptr);
3318 			}
3319 		break;
3320 	}
3321 
3322 	req->dcid  = cpu_to_le16(chan->dcid);
3323 	req->flags = cpu_to_le16(0);
3324 
3325 	return ptr - data;
3326 }
3327 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3328 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3329 {
3330 	struct l2cap_conf_rsp *rsp = data;
3331 	void *ptr = rsp->data;
3332 	void *endptr = data + data_size;
3333 	void *req = chan->conf_req;
3334 	int len = chan->conf_len;
3335 	int type, hint, olen;
3336 	unsigned long val;
3337 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3338 	struct l2cap_conf_efs efs;
3339 	u8 remote_efs = 0;
3340 	u16 mtu = L2CAP_DEFAULT_MTU;
3341 	u16 result = L2CAP_CONF_SUCCESS;
3342 	u16 size;
3343 
3344 	BT_DBG("chan %p", chan);
3345 
3346 	while (len >= L2CAP_CONF_OPT_SIZE) {
3347 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3348 		if (len < 0)
3349 			break;
3350 
3351 		hint  = type & L2CAP_CONF_HINT;
3352 		type &= L2CAP_CONF_MASK;
3353 
3354 		switch (type) {
3355 		case L2CAP_CONF_MTU:
3356 			if (olen != 2)
3357 				break;
3358 			mtu = val;
3359 			break;
3360 
3361 		case L2CAP_CONF_FLUSH_TO:
3362 			if (olen != 2)
3363 				break;
3364 			chan->flush_to = val;
3365 			break;
3366 
3367 		case L2CAP_CONF_QOS:
3368 			break;
3369 
3370 		case L2CAP_CONF_RFC:
3371 			if (olen != sizeof(rfc))
3372 				break;
3373 			memcpy(&rfc, (void *) val, olen);
3374 			break;
3375 
3376 		case L2CAP_CONF_FCS:
3377 			if (olen != 1)
3378 				break;
3379 			if (val == L2CAP_FCS_NONE)
3380 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3381 			break;
3382 
3383 		case L2CAP_CONF_EFS:
3384 			if (olen != sizeof(efs))
3385 				break;
3386 			remote_efs = 1;
3387 			memcpy(&efs, (void *) val, olen);
3388 			break;
3389 
3390 		case L2CAP_CONF_EWS:
3391 			if (olen != 2)
3392 				break;
3393 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3394 				return -ECONNREFUSED;
3395 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3396 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3397 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3398 			chan->remote_tx_win = val;
3399 			break;
3400 
3401 		default:
3402 			if (hint)
3403 				break;
3404 			result = L2CAP_CONF_UNKNOWN;
3405 			*((u8 *) ptr++) = type;
3406 			break;
3407 		}
3408 	}
3409 
3410 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3411 		goto done;
3412 
3413 	switch (chan->mode) {
3414 	case L2CAP_MODE_STREAMING:
3415 	case L2CAP_MODE_ERTM:
3416 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3417 			chan->mode = l2cap_select_mode(rfc.mode,
3418 						       chan->conn->feat_mask);
3419 			break;
3420 		}
3421 
3422 		if (remote_efs) {
3423 			if (__l2cap_efs_supported(chan->conn))
3424 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3425 			else
3426 				return -ECONNREFUSED;
3427 		}
3428 
3429 		if (chan->mode != rfc.mode)
3430 			return -ECONNREFUSED;
3431 
3432 		break;
3433 	}
3434 
3435 done:
3436 	if (chan->mode != rfc.mode) {
3437 		result = L2CAP_CONF_UNACCEPT;
3438 		rfc.mode = chan->mode;
3439 
3440 		if (chan->num_conf_rsp == 1)
3441 			return -ECONNREFUSED;
3442 
3443 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3444 				   (unsigned long) &rfc, endptr - ptr);
3445 	}
3446 
3447 	if (result == L2CAP_CONF_SUCCESS) {
3448 		/* Configure output options and let the other side know
3449 		 * which ones we don't like. */
3450 
3451 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3452 			result = L2CAP_CONF_UNACCEPT;
3453 		else {
3454 			chan->omtu = mtu;
3455 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3456 		}
3457 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3458 
3459 		if (remote_efs) {
3460 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3461 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3462 			    efs.stype != chan->local_stype) {
3463 
3464 				result = L2CAP_CONF_UNACCEPT;
3465 
3466 				if (chan->num_conf_req >= 1)
3467 					return -ECONNREFUSED;
3468 
3469 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3470 						   sizeof(efs),
3471 						   (unsigned long) &efs, endptr - ptr);
3472 			} else {
3473 				/* Send PENDING Conf Rsp */
3474 				result = L2CAP_CONF_PENDING;
3475 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3476 			}
3477 		}
3478 
3479 		switch (rfc.mode) {
3480 		case L2CAP_MODE_BASIC:
3481 			chan->fcs = L2CAP_FCS_NONE;
3482 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3483 			break;
3484 
3485 		case L2CAP_MODE_ERTM:
3486 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3487 				chan->remote_tx_win = rfc.txwin_size;
3488 			else
3489 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3490 
3491 			chan->remote_max_tx = rfc.max_transmit;
3492 
3493 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3494 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3495 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3496 			rfc.max_pdu_size = cpu_to_le16(size);
3497 			chan->remote_mps = size;
3498 
3499 			__l2cap_set_ertm_timeouts(chan, &rfc);
3500 
3501 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3502 
3503 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3504 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3505 
3506 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3507 				chan->remote_id = efs.id;
3508 				chan->remote_stype = efs.stype;
3509 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3510 				chan->remote_flush_to =
3511 					le32_to_cpu(efs.flush_to);
3512 				chan->remote_acc_lat =
3513 					le32_to_cpu(efs.acc_lat);
3514 				chan->remote_sdu_itime =
3515 					le32_to_cpu(efs.sdu_itime);
3516 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3517 						   sizeof(efs),
3518 						   (unsigned long) &efs, endptr - ptr);
3519 			}
3520 			break;
3521 
3522 		case L2CAP_MODE_STREAMING:
3523 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3524 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3525 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3526 			rfc.max_pdu_size = cpu_to_le16(size);
3527 			chan->remote_mps = size;
3528 
3529 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3530 
3531 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3532 					   (unsigned long) &rfc, endptr - ptr);
3533 
3534 			break;
3535 
3536 		default:
3537 			result = L2CAP_CONF_UNACCEPT;
3538 
3539 			memset(&rfc, 0, sizeof(rfc));
3540 			rfc.mode = chan->mode;
3541 		}
3542 
3543 		if (result == L2CAP_CONF_SUCCESS)
3544 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3545 	}
3546 	rsp->scid   = cpu_to_le16(chan->dcid);
3547 	rsp->result = cpu_to_le16(result);
3548 	rsp->flags  = cpu_to_le16(0);
3549 
3550 	return ptr - data;
3551 }
3552 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3553 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3554 				void *data, size_t size, u16 *result)
3555 {
3556 	struct l2cap_conf_req *req = data;
3557 	void *ptr = req->data;
3558 	void *endptr = data + size;
3559 	int type, olen;
3560 	unsigned long val;
3561 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3562 	struct l2cap_conf_efs efs;
3563 
3564 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3565 
3566 	while (len >= L2CAP_CONF_OPT_SIZE) {
3567 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3568 		if (len < 0)
3569 			break;
3570 
3571 		switch (type) {
3572 		case L2CAP_CONF_MTU:
3573 			if (olen != 2)
3574 				break;
3575 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3576 				*result = L2CAP_CONF_UNACCEPT;
3577 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3578 			} else
3579 				chan->imtu = val;
3580 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3581 					   endptr - ptr);
3582 			break;
3583 
3584 		case L2CAP_CONF_FLUSH_TO:
3585 			if (olen != 2)
3586 				break;
3587 			chan->flush_to = val;
3588 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3589 					   chan->flush_to, endptr - ptr);
3590 			break;
3591 
3592 		case L2CAP_CONF_RFC:
3593 			if (olen != sizeof(rfc))
3594 				break;
3595 			memcpy(&rfc, (void *)val, olen);
3596 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3597 			    rfc.mode != chan->mode)
3598 				return -ECONNREFUSED;
3599 			chan->fcs = 0;
3600 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3601 					   (unsigned long) &rfc, endptr - ptr);
3602 			break;
3603 
3604 		case L2CAP_CONF_EWS:
3605 			if (olen != 2)
3606 				break;
3607 			chan->ack_win = min_t(u16, val, chan->ack_win);
3608 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3609 					   chan->tx_win, endptr - ptr);
3610 			break;
3611 
3612 		case L2CAP_CONF_EFS:
3613 			if (olen != sizeof(efs))
3614 				break;
3615 			memcpy(&efs, (void *)val, olen);
3616 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3617 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3618 			    efs.stype != chan->local_stype)
3619 				return -ECONNREFUSED;
3620 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3621 					   (unsigned long) &efs, endptr - ptr);
3622 			break;
3623 
3624 		case L2CAP_CONF_FCS:
3625 			if (olen != 1)
3626 				break;
3627 			if (*result == L2CAP_CONF_PENDING)
3628 				if (val == L2CAP_FCS_NONE)
3629 					set_bit(CONF_RECV_NO_FCS,
3630 						&chan->conf_state);
3631 			break;
3632 		}
3633 	}
3634 
3635 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3636 		return -ECONNREFUSED;
3637 
3638 	chan->mode = rfc.mode;
3639 
3640 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3641 		switch (rfc.mode) {
3642 		case L2CAP_MODE_ERTM:
3643 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3644 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3645 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3646 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3647 				chan->ack_win = min_t(u16, chan->ack_win,
3648 						      rfc.txwin_size);
3649 
3650 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3651 				chan->local_msdu = le16_to_cpu(efs.msdu);
3652 				chan->local_sdu_itime =
3653 					le32_to_cpu(efs.sdu_itime);
3654 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3655 				chan->local_flush_to =
3656 					le32_to_cpu(efs.flush_to);
3657 			}
3658 			break;
3659 
3660 		case L2CAP_MODE_STREAMING:
3661 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3662 		}
3663 	}
3664 
3665 	req->dcid   = cpu_to_le16(chan->dcid);
3666 	req->flags  = cpu_to_le16(0);
3667 
3668 	return ptr - data;
3669 }
3670 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3671 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3672 				u16 result, u16 flags)
3673 {
3674 	struct l2cap_conf_rsp *rsp = data;
3675 	void *ptr = rsp->data;
3676 
3677 	BT_DBG("chan %p", chan);
3678 
3679 	rsp->scid   = cpu_to_le16(chan->dcid);
3680 	rsp->result = cpu_to_le16(result);
3681 	rsp->flags  = cpu_to_le16(flags);
3682 
3683 	return ptr - data;
3684 }
3685 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3686 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3687 {
3688 	struct l2cap_le_conn_rsp rsp;
3689 	struct l2cap_conn *conn = chan->conn;
3690 
3691 	BT_DBG("chan %p", chan);
3692 
3693 	rsp.dcid    = cpu_to_le16(chan->scid);
3694 	rsp.mtu     = cpu_to_le16(chan->imtu);
3695 	rsp.mps     = cpu_to_le16(chan->mps);
3696 	rsp.credits = cpu_to_le16(chan->rx_credits);
3697 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3698 
3699 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3700 		       &rsp);
3701 }
3702 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3703 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3704 {
3705 	struct l2cap_conn_rsp rsp;
3706 	struct l2cap_conn *conn = chan->conn;
3707 	u8 buf[128];
3708 	u8 rsp_code;
3709 
3710 	rsp.scid   = cpu_to_le16(chan->dcid);
3711 	rsp.dcid   = cpu_to_le16(chan->scid);
3712 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3713 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3714 
3715 	if (chan->hs_hcon)
3716 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3717 	else
3718 		rsp_code = L2CAP_CONN_RSP;
3719 
3720 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3721 
3722 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3723 
3724 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3725 		return;
3726 
3727 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3728 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3729 	chan->num_conf_req++;
3730 }
3731 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3732 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3733 {
3734 	int type, olen;
3735 	unsigned long val;
3736 	/* Use sane default values in case a misbehaving remote device
3737 	 * did not send an RFC or extended window size option.
3738 	 */
3739 	u16 txwin_ext = chan->ack_win;
3740 	struct l2cap_conf_rfc rfc = {
3741 		.mode = chan->mode,
3742 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3743 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3744 		.max_pdu_size = cpu_to_le16(chan->imtu),
3745 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3746 	};
3747 
3748 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3749 
3750 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3751 		return;
3752 
3753 	while (len >= L2CAP_CONF_OPT_SIZE) {
3754 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3755 		if (len < 0)
3756 			break;
3757 
3758 		switch (type) {
3759 		case L2CAP_CONF_RFC:
3760 			if (olen != sizeof(rfc))
3761 				break;
3762 			memcpy(&rfc, (void *)val, olen);
3763 			break;
3764 		case L2CAP_CONF_EWS:
3765 			if (olen != 2)
3766 				break;
3767 			txwin_ext = val;
3768 			break;
3769 		}
3770 	}
3771 
3772 	switch (rfc.mode) {
3773 	case L2CAP_MODE_ERTM:
3774 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3775 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3776 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3777 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3778 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3779 		else
3780 			chan->ack_win = min_t(u16, chan->ack_win,
3781 					      rfc.txwin_size);
3782 		break;
3783 	case L2CAP_MODE_STREAMING:
3784 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3785 	}
3786 }
3787 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3788 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3789 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3790 				    u8 *data)
3791 {
3792 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3793 
3794 	if (cmd_len < sizeof(*rej))
3795 		return -EPROTO;
3796 
3797 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3798 		return 0;
3799 
3800 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3801 	    cmd->ident == conn->info_ident) {
3802 		cancel_delayed_work(&conn->info_timer);
3803 
3804 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3805 		conn->info_ident = 0;
3806 
3807 		l2cap_conn_start(conn);
3808 	}
3809 
3810 	return 0;
3811 }
3812 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3813 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3814 					struct l2cap_cmd_hdr *cmd,
3815 					u8 *data, u8 rsp_code, u8 amp_id)
3816 {
3817 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3818 	struct l2cap_conn_rsp rsp;
3819 	struct l2cap_chan *chan = NULL, *pchan;
3820 	int result, status = L2CAP_CS_NO_INFO;
3821 
3822 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3823 	__le16 psm = req->psm;
3824 
3825 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3826 
3827 	/* Check if we have socket listening on psm */
3828 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3829 					 &conn->hcon->dst, ACL_LINK);
3830 	if (!pchan) {
3831 		result = L2CAP_CR_BAD_PSM;
3832 		goto sendresp;
3833 	}
3834 
3835 	mutex_lock(&conn->chan_lock);
3836 	l2cap_chan_lock(pchan);
3837 
3838 	/* Check if the ACL is secure enough (if not SDP) */
3839 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3840 	    !hci_conn_check_link_mode(conn->hcon)) {
3841 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3842 		result = L2CAP_CR_SEC_BLOCK;
3843 		goto response;
3844 	}
3845 
3846 	result = L2CAP_CR_NO_MEM;
3847 
3848 	/* Check if we already have channel with that dcid */
3849 	if (__l2cap_get_chan_by_dcid(conn, scid))
3850 		goto response;
3851 
3852 	chan = pchan->ops->new_connection(pchan);
3853 	if (!chan)
3854 		goto response;
3855 
3856 	/* For certain devices (ex: HID mouse), support for authentication,
3857 	 * pairing and bonding is optional. For such devices, inorder to avoid
3858 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3859 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3860 	 */
3861 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3862 
3863 	bacpy(&chan->src, &conn->hcon->src);
3864 	bacpy(&chan->dst, &conn->hcon->dst);
3865 	chan->src_type = bdaddr_src_type(conn->hcon);
3866 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3867 	chan->psm  = psm;
3868 	chan->dcid = scid;
3869 	chan->local_amp_id = amp_id;
3870 
3871 	__l2cap_chan_add(conn, chan);
3872 
3873 	dcid = chan->scid;
3874 
3875 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3876 
3877 	chan->ident = cmd->ident;
3878 
3879 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3880 		if (l2cap_chan_check_security(chan, false)) {
3881 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3882 				l2cap_state_change(chan, BT_CONNECT2);
3883 				result = L2CAP_CR_PEND;
3884 				status = L2CAP_CS_AUTHOR_PEND;
3885 				chan->ops->defer(chan);
3886 			} else {
3887 				/* Force pending result for AMP controllers.
3888 				 * The connection will succeed after the
3889 				 * physical link is up.
3890 				 */
3891 				if (amp_id == AMP_ID_BREDR) {
3892 					l2cap_state_change(chan, BT_CONFIG);
3893 					result = L2CAP_CR_SUCCESS;
3894 				} else {
3895 					l2cap_state_change(chan, BT_CONNECT2);
3896 					result = L2CAP_CR_PEND;
3897 				}
3898 				status = L2CAP_CS_NO_INFO;
3899 			}
3900 		} else {
3901 			l2cap_state_change(chan, BT_CONNECT2);
3902 			result = L2CAP_CR_PEND;
3903 			status = L2CAP_CS_AUTHEN_PEND;
3904 		}
3905 	} else {
3906 		l2cap_state_change(chan, BT_CONNECT2);
3907 		result = L2CAP_CR_PEND;
3908 		status = L2CAP_CS_NO_INFO;
3909 	}
3910 
3911 response:
3912 	l2cap_chan_unlock(pchan);
3913 	mutex_unlock(&conn->chan_lock);
3914 	l2cap_chan_put(pchan);
3915 
3916 sendresp:
3917 	rsp.scid   = cpu_to_le16(scid);
3918 	rsp.dcid   = cpu_to_le16(dcid);
3919 	rsp.result = cpu_to_le16(result);
3920 	rsp.status = cpu_to_le16(status);
3921 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3922 
3923 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3924 		struct l2cap_info_req info;
3925 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3926 
3927 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3928 		conn->info_ident = l2cap_get_ident(conn);
3929 
3930 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3931 
3932 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3933 			       sizeof(info), &info);
3934 	}
3935 
3936 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3937 	    result == L2CAP_CR_SUCCESS) {
3938 		u8 buf[128];
3939 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3940 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3941 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3942 		chan->num_conf_req++;
3943 	}
3944 
3945 	return chan;
3946 }
3947 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3948 static int l2cap_connect_req(struct l2cap_conn *conn,
3949 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3950 {
3951 	struct hci_dev *hdev = conn->hcon->hdev;
3952 	struct hci_conn *hcon = conn->hcon;
3953 
3954 	if (cmd_len < sizeof(struct l2cap_conn_req))
3955 		return -EPROTO;
3956 
3957 	hci_dev_lock(hdev);
3958 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3959 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3960 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3961 	hci_dev_unlock(hdev);
3962 
3963 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3964 	return 0;
3965 }
3966 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3967 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3968 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3969 				    u8 *data)
3970 {
3971 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3972 	u16 scid, dcid, result, status;
3973 	struct l2cap_chan *chan;
3974 	u8 req[128];
3975 	int err;
3976 
3977 	if (cmd_len < sizeof(*rsp))
3978 		return -EPROTO;
3979 
3980 	scid   = __le16_to_cpu(rsp->scid);
3981 	dcid   = __le16_to_cpu(rsp->dcid);
3982 	result = __le16_to_cpu(rsp->result);
3983 	status = __le16_to_cpu(rsp->status);
3984 
3985 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3986 	       dcid, scid, result, status);
3987 
3988 	mutex_lock(&conn->chan_lock);
3989 
3990 	if (scid) {
3991 		chan = __l2cap_get_chan_by_scid(conn, scid);
3992 		if (!chan) {
3993 			err = -EBADSLT;
3994 			goto unlock;
3995 		}
3996 	} else {
3997 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3998 		if (!chan) {
3999 			err = -EBADSLT;
4000 			goto unlock;
4001 		}
4002 	}
4003 
4004 	err = 0;
4005 
4006 	l2cap_chan_lock(chan);
4007 
4008 	switch (result) {
4009 	case L2CAP_CR_SUCCESS:
4010 		l2cap_state_change(chan, BT_CONFIG);
4011 		chan->ident = 0;
4012 		chan->dcid = dcid;
4013 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4014 
4015 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4016 			break;
4017 
4018 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4019 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4020 		chan->num_conf_req++;
4021 		break;
4022 
4023 	case L2CAP_CR_PEND:
4024 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4025 		break;
4026 
4027 	default:
4028 		l2cap_chan_del(chan, ECONNREFUSED);
4029 		break;
4030 	}
4031 
4032 	l2cap_chan_unlock(chan);
4033 
4034 unlock:
4035 	mutex_unlock(&conn->chan_lock);
4036 
4037 	return err;
4038 }
4039 
set_default_fcs(struct l2cap_chan * chan)4040 static inline void set_default_fcs(struct l2cap_chan *chan)
4041 {
4042 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4043 	 * sides request it.
4044 	 */
4045 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4046 		chan->fcs = L2CAP_FCS_NONE;
4047 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4048 		chan->fcs = L2CAP_FCS_CRC16;
4049 }
4050 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4051 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4052 				    u8 ident, u16 flags)
4053 {
4054 	struct l2cap_conn *conn = chan->conn;
4055 
4056 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4057 	       flags);
4058 
4059 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4060 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4061 
4062 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4063 		       l2cap_build_conf_rsp(chan, data,
4064 					    L2CAP_CONF_SUCCESS, flags), data);
4065 }
4066 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4067 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4068 				   u16 scid, u16 dcid)
4069 {
4070 	struct l2cap_cmd_rej_cid rej;
4071 
4072 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4073 	rej.scid = __cpu_to_le16(scid);
4074 	rej.dcid = __cpu_to_le16(dcid);
4075 
4076 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4077 }
4078 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4079 static inline int l2cap_config_req(struct l2cap_conn *conn,
4080 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4081 				   u8 *data)
4082 {
4083 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4084 	u16 dcid, flags;
4085 	u8 rsp[64];
4086 	struct l2cap_chan *chan;
4087 	int len, err = 0;
4088 
4089 	if (cmd_len < sizeof(*req))
4090 		return -EPROTO;
4091 
4092 	dcid  = __le16_to_cpu(req->dcid);
4093 	flags = __le16_to_cpu(req->flags);
4094 
4095 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4096 
4097 	chan = l2cap_get_chan_by_scid(conn, dcid);
4098 	if (!chan) {
4099 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4100 		return 0;
4101 	}
4102 
4103 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4104 	    chan->state != BT_CONNECTED) {
4105 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4106 				       chan->dcid);
4107 		goto unlock;
4108 	}
4109 
4110 	/* Reject if config buffer is too small. */
4111 	len = cmd_len - sizeof(*req);
4112 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4113 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4114 			       l2cap_build_conf_rsp(chan, rsp,
4115 			       L2CAP_CONF_REJECT, flags), rsp);
4116 		goto unlock;
4117 	}
4118 
4119 	/* Store config. */
4120 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4121 	chan->conf_len += len;
4122 
4123 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4124 		/* Incomplete config. Send empty response. */
4125 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4126 			       l2cap_build_conf_rsp(chan, rsp,
4127 			       L2CAP_CONF_SUCCESS, flags), rsp);
4128 		goto unlock;
4129 	}
4130 
4131 	/* Complete config. */
4132 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4133 	if (len < 0) {
4134 		l2cap_send_disconn_req(chan, ECONNRESET);
4135 		goto unlock;
4136 	}
4137 
4138 	chan->ident = cmd->ident;
4139 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4140 	chan->num_conf_rsp++;
4141 
4142 	/* Reset config buffer. */
4143 	chan->conf_len = 0;
4144 
4145 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4146 		goto unlock;
4147 
4148 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4149 		set_default_fcs(chan);
4150 
4151 		if (chan->mode == L2CAP_MODE_ERTM ||
4152 		    chan->mode == L2CAP_MODE_STREAMING)
4153 			err = l2cap_ertm_init(chan);
4154 
4155 		if (err < 0)
4156 			l2cap_send_disconn_req(chan, -err);
4157 		else
4158 			l2cap_chan_ready(chan);
4159 
4160 		goto unlock;
4161 	}
4162 
4163 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4164 		u8 buf[64];
4165 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4166 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4167 		chan->num_conf_req++;
4168 	}
4169 
4170 	/* Got Conf Rsp PENDING from remote side and assume we sent
4171 	   Conf Rsp PENDING in the code above */
4172 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4173 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4174 
4175 		/* check compatibility */
4176 
4177 		/* Send rsp for BR/EDR channel */
4178 		if (!chan->hs_hcon)
4179 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4180 		else
4181 			chan->ident = cmd->ident;
4182 	}
4183 
4184 unlock:
4185 	l2cap_chan_unlock(chan);
4186 	return err;
4187 }
4188 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4189 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4190 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4191 				   u8 *data)
4192 {
4193 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4194 	u16 scid, flags, result;
4195 	struct l2cap_chan *chan;
4196 	int len = cmd_len - sizeof(*rsp);
4197 	int err = 0;
4198 
4199 	if (cmd_len < sizeof(*rsp))
4200 		return -EPROTO;
4201 
4202 	scid   = __le16_to_cpu(rsp->scid);
4203 	flags  = __le16_to_cpu(rsp->flags);
4204 	result = __le16_to_cpu(rsp->result);
4205 
4206 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4207 	       result, len);
4208 
4209 	chan = l2cap_get_chan_by_scid(conn, scid);
4210 	if (!chan)
4211 		return 0;
4212 
4213 	switch (result) {
4214 	case L2CAP_CONF_SUCCESS:
4215 		l2cap_conf_rfc_get(chan, rsp->data, len);
4216 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4217 		break;
4218 
4219 	case L2CAP_CONF_PENDING:
4220 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4221 
4222 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4223 			char buf[64];
4224 
4225 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4226 						   buf, sizeof(buf), &result);
4227 			if (len < 0) {
4228 				l2cap_send_disconn_req(chan, ECONNRESET);
4229 				goto done;
4230 			}
4231 
4232 			if (!chan->hs_hcon) {
4233 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4234 							0);
4235 			} else {
4236 				if (l2cap_check_efs(chan)) {
4237 					amp_create_logical_link(chan);
4238 					chan->ident = cmd->ident;
4239 				}
4240 			}
4241 		}
4242 		goto done;
4243 
4244 	case L2CAP_CONF_UNACCEPT:
4245 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4246 			char req[64];
4247 
4248 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4249 				l2cap_send_disconn_req(chan, ECONNRESET);
4250 				goto done;
4251 			}
4252 
4253 			/* throw out any old stored conf requests */
4254 			result = L2CAP_CONF_SUCCESS;
4255 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4256 						   req, sizeof(req), &result);
4257 			if (len < 0) {
4258 				l2cap_send_disconn_req(chan, ECONNRESET);
4259 				goto done;
4260 			}
4261 
4262 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4263 				       L2CAP_CONF_REQ, len, req);
4264 			chan->num_conf_req++;
4265 			if (result != L2CAP_CONF_SUCCESS)
4266 				goto done;
4267 			break;
4268 		}
4269 
4270 	default:
4271 		l2cap_chan_set_err(chan, ECONNRESET);
4272 
4273 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4274 		l2cap_send_disconn_req(chan, ECONNRESET);
4275 		goto done;
4276 	}
4277 
4278 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4279 		goto done;
4280 
4281 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4282 
4283 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4284 		set_default_fcs(chan);
4285 
4286 		if (chan->mode == L2CAP_MODE_ERTM ||
4287 		    chan->mode == L2CAP_MODE_STREAMING)
4288 			err = l2cap_ertm_init(chan);
4289 
4290 		if (err < 0)
4291 			l2cap_send_disconn_req(chan, -err);
4292 		else
4293 			l2cap_chan_ready(chan);
4294 	}
4295 
4296 done:
4297 	l2cap_chan_unlock(chan);
4298 	return err;
4299 }
4300 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4301 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4302 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 				       u8 *data)
4304 {
4305 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4306 	struct l2cap_disconn_rsp rsp;
4307 	u16 dcid, scid;
4308 	struct l2cap_chan *chan;
4309 
4310 	if (cmd_len != sizeof(*req))
4311 		return -EPROTO;
4312 
4313 	scid = __le16_to_cpu(req->scid);
4314 	dcid = __le16_to_cpu(req->dcid);
4315 
4316 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4317 
4318 	mutex_lock(&conn->chan_lock);
4319 
4320 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4321 	if (!chan) {
4322 		mutex_unlock(&conn->chan_lock);
4323 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4324 		return 0;
4325 	}
4326 
4327 	l2cap_chan_hold(chan);
4328 	l2cap_chan_lock(chan);
4329 
4330 	rsp.dcid = cpu_to_le16(chan->scid);
4331 	rsp.scid = cpu_to_le16(chan->dcid);
4332 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4333 
4334 	chan->ops->set_shutdown(chan);
4335 
4336 	l2cap_chan_del(chan, ECONNRESET);
4337 
4338 	chan->ops->close(chan);
4339 
4340 	l2cap_chan_unlock(chan);
4341 	l2cap_chan_put(chan);
4342 
4343 	mutex_unlock(&conn->chan_lock);
4344 
4345 	return 0;
4346 }
4347 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4348 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4349 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4350 				       u8 *data)
4351 {
4352 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4353 	u16 dcid, scid;
4354 	struct l2cap_chan *chan;
4355 
4356 	if (cmd_len != sizeof(*rsp))
4357 		return -EPROTO;
4358 
4359 	scid = __le16_to_cpu(rsp->scid);
4360 	dcid = __le16_to_cpu(rsp->dcid);
4361 
4362 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4363 
4364 	mutex_lock(&conn->chan_lock);
4365 
4366 	chan = __l2cap_get_chan_by_scid(conn, scid);
4367 	if (!chan) {
4368 		mutex_unlock(&conn->chan_lock);
4369 		return 0;
4370 	}
4371 
4372 	l2cap_chan_hold(chan);
4373 	l2cap_chan_lock(chan);
4374 
4375 	if (chan->state != BT_DISCONN) {
4376 		l2cap_chan_unlock(chan);
4377 		l2cap_chan_put(chan);
4378 		mutex_unlock(&conn->chan_lock);
4379 		return 0;
4380 	}
4381 
4382 	l2cap_chan_del(chan, 0);
4383 
4384 	chan->ops->close(chan);
4385 
4386 	l2cap_chan_unlock(chan);
4387 	l2cap_chan_put(chan);
4388 
4389 	mutex_unlock(&conn->chan_lock);
4390 
4391 	return 0;
4392 }
4393 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4394 static inline int l2cap_information_req(struct l2cap_conn *conn,
4395 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4396 					u8 *data)
4397 {
4398 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4399 	u16 type;
4400 
4401 	if (cmd_len != sizeof(*req))
4402 		return -EPROTO;
4403 
4404 	type = __le16_to_cpu(req->type);
4405 
4406 	BT_DBG("type 0x%4.4x", type);
4407 
4408 	if (type == L2CAP_IT_FEAT_MASK) {
4409 		u8 buf[8];
4410 		u32 feat_mask = l2cap_feat_mask;
4411 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4412 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4413 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4414 		if (!disable_ertm)
4415 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4416 				| L2CAP_FEAT_FCS;
4417 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4418 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4419 				| L2CAP_FEAT_EXT_WINDOW;
4420 
4421 		put_unaligned_le32(feat_mask, rsp->data);
4422 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4423 			       buf);
4424 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4425 		u8 buf[12];
4426 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4427 
4428 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4429 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4430 		rsp->data[0] = conn->local_fixed_chan;
4431 		memset(rsp->data + 1, 0, 7);
4432 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4433 			       buf);
4434 	} else {
4435 		struct l2cap_info_rsp rsp;
4436 		rsp.type   = cpu_to_le16(type);
4437 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4438 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4439 			       &rsp);
4440 	}
4441 
4442 	return 0;
4443 }
4444 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4445 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4446 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4447 					u8 *data)
4448 {
4449 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4450 	u16 type, result;
4451 
4452 	if (cmd_len < sizeof(*rsp))
4453 		return -EPROTO;
4454 
4455 	type   = __le16_to_cpu(rsp->type);
4456 	result = __le16_to_cpu(rsp->result);
4457 
4458 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4459 
4460 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4461 	if (cmd->ident != conn->info_ident ||
4462 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4463 		return 0;
4464 
4465 	cancel_delayed_work(&conn->info_timer);
4466 
4467 	if (result != L2CAP_IR_SUCCESS) {
4468 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4469 		conn->info_ident = 0;
4470 
4471 		l2cap_conn_start(conn);
4472 
4473 		return 0;
4474 	}
4475 
4476 	switch (type) {
4477 	case L2CAP_IT_FEAT_MASK:
4478 		conn->feat_mask = get_unaligned_le32(rsp->data);
4479 
4480 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4481 			struct l2cap_info_req req;
4482 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4483 
4484 			conn->info_ident = l2cap_get_ident(conn);
4485 
4486 			l2cap_send_cmd(conn, conn->info_ident,
4487 				       L2CAP_INFO_REQ, sizeof(req), &req);
4488 		} else {
4489 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4490 			conn->info_ident = 0;
4491 
4492 			l2cap_conn_start(conn);
4493 		}
4494 		break;
4495 
4496 	case L2CAP_IT_FIXED_CHAN:
4497 		conn->remote_fixed_chan = rsp->data[0];
4498 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4499 		conn->info_ident = 0;
4500 
4501 		l2cap_conn_start(conn);
4502 		break;
4503 	}
4504 
4505 	return 0;
4506 }
4507 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4508 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4509 				    struct l2cap_cmd_hdr *cmd,
4510 				    u16 cmd_len, void *data)
4511 {
4512 	struct l2cap_create_chan_req *req = data;
4513 	struct l2cap_create_chan_rsp rsp;
4514 	struct l2cap_chan *chan;
4515 	struct hci_dev *hdev;
4516 	u16 psm, scid;
4517 
4518 	if (cmd_len != sizeof(*req))
4519 		return -EPROTO;
4520 
4521 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4522 		return -EINVAL;
4523 
4524 	psm = le16_to_cpu(req->psm);
4525 	scid = le16_to_cpu(req->scid);
4526 
4527 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4528 
4529 	/* For controller id 0 make BR/EDR connection */
4530 	if (req->amp_id == AMP_ID_BREDR) {
4531 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4532 			      req->amp_id);
4533 		return 0;
4534 	}
4535 
4536 	/* Validate AMP controller id */
4537 	hdev = hci_dev_get(req->amp_id);
4538 	if (!hdev)
4539 		goto error;
4540 
4541 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4542 		hci_dev_put(hdev);
4543 		goto error;
4544 	}
4545 
4546 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4547 			     req->amp_id);
4548 	if (chan) {
4549 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4550 		struct hci_conn *hs_hcon;
4551 
4552 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4553 						  &conn->hcon->dst);
4554 		if (!hs_hcon) {
4555 			hci_dev_put(hdev);
4556 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4557 					       chan->dcid);
4558 			return 0;
4559 		}
4560 
4561 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4562 
4563 		mgr->bredr_chan = chan;
4564 		chan->hs_hcon = hs_hcon;
4565 		chan->fcs = L2CAP_FCS_NONE;
4566 		conn->mtu = hdev->block_mtu;
4567 	}
4568 
4569 	hci_dev_put(hdev);
4570 
4571 	return 0;
4572 
4573 error:
4574 	rsp.dcid = 0;
4575 	rsp.scid = cpu_to_le16(scid);
4576 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4577 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4578 
4579 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4580 		       sizeof(rsp), &rsp);
4581 
4582 	return 0;
4583 }
4584 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4585 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4586 {
4587 	struct l2cap_move_chan_req req;
4588 	u8 ident;
4589 
4590 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4591 
4592 	ident = l2cap_get_ident(chan->conn);
4593 	chan->ident = ident;
4594 
4595 	req.icid = cpu_to_le16(chan->scid);
4596 	req.dest_amp_id = dest_amp_id;
4597 
4598 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4599 		       &req);
4600 
4601 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4602 }
4603 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4604 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4605 {
4606 	struct l2cap_move_chan_rsp rsp;
4607 
4608 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4609 
4610 	rsp.icid = cpu_to_le16(chan->dcid);
4611 	rsp.result = cpu_to_le16(result);
4612 
4613 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4614 		       sizeof(rsp), &rsp);
4615 }
4616 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4617 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4618 {
4619 	struct l2cap_move_chan_cfm cfm;
4620 
4621 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4622 
4623 	chan->ident = l2cap_get_ident(chan->conn);
4624 
4625 	cfm.icid = cpu_to_le16(chan->scid);
4626 	cfm.result = cpu_to_le16(result);
4627 
4628 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4629 		       sizeof(cfm), &cfm);
4630 
4631 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4632 }
4633 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4634 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4635 {
4636 	struct l2cap_move_chan_cfm cfm;
4637 
4638 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4639 
4640 	cfm.icid = cpu_to_le16(icid);
4641 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4642 
4643 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4644 		       sizeof(cfm), &cfm);
4645 }
4646 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4647 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4648 					 u16 icid)
4649 {
4650 	struct l2cap_move_chan_cfm_rsp rsp;
4651 
4652 	BT_DBG("icid 0x%4.4x", icid);
4653 
4654 	rsp.icid = cpu_to_le16(icid);
4655 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4656 }
4657 
__release_logical_link(struct l2cap_chan * chan)4658 static void __release_logical_link(struct l2cap_chan *chan)
4659 {
4660 	chan->hs_hchan = NULL;
4661 	chan->hs_hcon = NULL;
4662 
4663 	/* Placeholder - release the logical link */
4664 }
4665 
l2cap_logical_fail(struct l2cap_chan * chan)4666 static void l2cap_logical_fail(struct l2cap_chan *chan)
4667 {
4668 	/* Logical link setup failed */
4669 	if (chan->state != BT_CONNECTED) {
4670 		/* Create channel failure, disconnect */
4671 		l2cap_send_disconn_req(chan, ECONNRESET);
4672 		return;
4673 	}
4674 
4675 	switch (chan->move_role) {
4676 	case L2CAP_MOVE_ROLE_RESPONDER:
4677 		l2cap_move_done(chan);
4678 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4679 		break;
4680 	case L2CAP_MOVE_ROLE_INITIATOR:
4681 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4682 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4683 			/* Remote has only sent pending or
4684 			 * success responses, clean up
4685 			 */
4686 			l2cap_move_done(chan);
4687 		}
4688 
4689 		/* Other amp move states imply that the move
4690 		 * has already aborted
4691 		 */
4692 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4693 		break;
4694 	}
4695 }
4696 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4697 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4698 					struct hci_chan *hchan)
4699 {
4700 	struct l2cap_conf_rsp rsp;
4701 
4702 	chan->hs_hchan = hchan;
4703 	chan->hs_hcon->l2cap_data = chan->conn;
4704 
4705 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4706 
4707 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4708 		int err;
4709 
4710 		set_default_fcs(chan);
4711 
4712 		err = l2cap_ertm_init(chan);
4713 		if (err < 0)
4714 			l2cap_send_disconn_req(chan, -err);
4715 		else
4716 			l2cap_chan_ready(chan);
4717 	}
4718 }
4719 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4720 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4721 				      struct hci_chan *hchan)
4722 {
4723 	chan->hs_hcon = hchan->conn;
4724 	chan->hs_hcon->l2cap_data = chan->conn;
4725 
4726 	BT_DBG("move_state %d", chan->move_state);
4727 
4728 	switch (chan->move_state) {
4729 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4730 		/* Move confirm will be sent after a success
4731 		 * response is received
4732 		 */
4733 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4734 		break;
4735 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4736 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4737 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4738 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4739 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4740 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4741 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4742 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4743 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4744 		}
4745 		break;
4746 	default:
4747 		/* Move was not in expected state, free the channel */
4748 		__release_logical_link(chan);
4749 
4750 		chan->move_state = L2CAP_MOVE_STABLE;
4751 	}
4752 }
4753 
4754 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)4755 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4756 		       u8 status)
4757 {
4758 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4759 
4760 	if (status) {
4761 		l2cap_logical_fail(chan);
4762 		__release_logical_link(chan);
4763 		return;
4764 	}
4765 
4766 	if (chan->state != BT_CONNECTED) {
4767 		/* Ignore logical link if channel is on BR/EDR */
4768 		if (chan->local_amp_id != AMP_ID_BREDR)
4769 			l2cap_logical_finish_create(chan, hchan);
4770 	} else {
4771 		l2cap_logical_finish_move(chan, hchan);
4772 	}
4773 }
4774 
l2cap_move_start(struct l2cap_chan * chan)4775 void l2cap_move_start(struct l2cap_chan *chan)
4776 {
4777 	BT_DBG("chan %p", chan);
4778 
4779 	if (chan->local_amp_id == AMP_ID_BREDR) {
4780 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4781 			return;
4782 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4783 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4784 		/* Placeholder - start physical link setup */
4785 	} else {
4786 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4787 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4788 		chan->move_id = 0;
4789 		l2cap_move_setup(chan);
4790 		l2cap_send_move_chan_req(chan, 0);
4791 	}
4792 }
4793 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)4794 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4795 			    u8 local_amp_id, u8 remote_amp_id)
4796 {
4797 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4798 	       local_amp_id, remote_amp_id);
4799 
4800 	chan->fcs = L2CAP_FCS_NONE;
4801 
4802 	/* Outgoing channel on AMP */
4803 	if (chan->state == BT_CONNECT) {
4804 		if (result == L2CAP_CR_SUCCESS) {
4805 			chan->local_amp_id = local_amp_id;
4806 			l2cap_send_create_chan_req(chan, remote_amp_id);
4807 		} else {
4808 			/* Revert to BR/EDR connect */
4809 			l2cap_send_conn_req(chan);
4810 		}
4811 
4812 		return;
4813 	}
4814 
4815 	/* Incoming channel on AMP */
4816 	if (__l2cap_no_conn_pending(chan)) {
4817 		struct l2cap_conn_rsp rsp;
4818 		char buf[128];
4819 		rsp.scid = cpu_to_le16(chan->dcid);
4820 		rsp.dcid = cpu_to_le16(chan->scid);
4821 
4822 		if (result == L2CAP_CR_SUCCESS) {
4823 			/* Send successful response */
4824 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4825 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4826 		} else {
4827 			/* Send negative response */
4828 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4829 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4830 		}
4831 
4832 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4833 			       sizeof(rsp), &rsp);
4834 
4835 		if (result == L2CAP_CR_SUCCESS) {
4836 			l2cap_state_change(chan, BT_CONFIG);
4837 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4838 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4839 				       L2CAP_CONF_REQ,
4840 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4841 			chan->num_conf_req++;
4842 		}
4843 	}
4844 }
4845 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)4846 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4847 				   u8 remote_amp_id)
4848 {
4849 	l2cap_move_setup(chan);
4850 	chan->move_id = local_amp_id;
4851 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4852 
4853 	l2cap_send_move_chan_req(chan, remote_amp_id);
4854 }
4855 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)4856 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4857 {
4858 	struct hci_chan *hchan = NULL;
4859 
4860 	/* Placeholder - get hci_chan for logical link */
4861 
4862 	if (hchan) {
4863 		if (hchan->state == BT_CONNECTED) {
4864 			/* Logical link is ready to go */
4865 			chan->hs_hcon = hchan->conn;
4866 			chan->hs_hcon->l2cap_data = chan->conn;
4867 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4868 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4869 
4870 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4871 		} else {
4872 			/* Wait for logical link to be ready */
4873 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4874 		}
4875 	} else {
4876 		/* Logical link not available */
4877 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4878 	}
4879 }
4880 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)4881 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4882 {
4883 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4884 		u8 rsp_result;
4885 		if (result == -EINVAL)
4886 			rsp_result = L2CAP_MR_BAD_ID;
4887 		else
4888 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4889 
4890 		l2cap_send_move_chan_rsp(chan, rsp_result);
4891 	}
4892 
4893 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4894 	chan->move_state = L2CAP_MOVE_STABLE;
4895 
4896 	/* Restart data transmission */
4897 	l2cap_ertm_send(chan);
4898 }
4899 
4900 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)4901 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4902 {
4903 	u8 local_amp_id = chan->local_amp_id;
4904 	u8 remote_amp_id = chan->remote_amp_id;
4905 
4906 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4907 	       chan, result, local_amp_id, remote_amp_id);
4908 
4909 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4910 		return;
4911 
4912 	if (chan->state != BT_CONNECTED) {
4913 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4914 	} else if (result != L2CAP_MR_SUCCESS) {
4915 		l2cap_do_move_cancel(chan, result);
4916 	} else {
4917 		switch (chan->move_role) {
4918 		case L2CAP_MOVE_ROLE_INITIATOR:
4919 			l2cap_do_move_initiate(chan, local_amp_id,
4920 					       remote_amp_id);
4921 			break;
4922 		case L2CAP_MOVE_ROLE_RESPONDER:
4923 			l2cap_do_move_respond(chan, result);
4924 			break;
4925 		default:
4926 			l2cap_do_move_cancel(chan, result);
4927 			break;
4928 		}
4929 	}
4930 }
4931 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4932 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4933 					 struct l2cap_cmd_hdr *cmd,
4934 					 u16 cmd_len, void *data)
4935 {
4936 	struct l2cap_move_chan_req *req = data;
4937 	struct l2cap_move_chan_rsp rsp;
4938 	struct l2cap_chan *chan;
4939 	u16 icid = 0;
4940 	u16 result = L2CAP_MR_NOT_ALLOWED;
4941 
4942 	if (cmd_len != sizeof(*req))
4943 		return -EPROTO;
4944 
4945 	icid = le16_to_cpu(req->icid);
4946 
4947 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4948 
4949 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4950 		return -EINVAL;
4951 
4952 	chan = l2cap_get_chan_by_dcid(conn, icid);
4953 	if (!chan) {
4954 		rsp.icid = cpu_to_le16(icid);
4955 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4956 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4957 			       sizeof(rsp), &rsp);
4958 		return 0;
4959 	}
4960 
4961 	chan->ident = cmd->ident;
4962 
4963 	if (chan->scid < L2CAP_CID_DYN_START ||
4964 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4965 	    (chan->mode != L2CAP_MODE_ERTM &&
4966 	     chan->mode != L2CAP_MODE_STREAMING)) {
4967 		result = L2CAP_MR_NOT_ALLOWED;
4968 		goto send_move_response;
4969 	}
4970 
4971 	if (chan->local_amp_id == req->dest_amp_id) {
4972 		result = L2CAP_MR_SAME_ID;
4973 		goto send_move_response;
4974 	}
4975 
4976 	if (req->dest_amp_id != AMP_ID_BREDR) {
4977 		struct hci_dev *hdev;
4978 		hdev = hci_dev_get(req->dest_amp_id);
4979 		if (!hdev || hdev->dev_type != HCI_AMP ||
4980 		    !test_bit(HCI_UP, &hdev->flags)) {
4981 			if (hdev)
4982 				hci_dev_put(hdev);
4983 
4984 			result = L2CAP_MR_BAD_ID;
4985 			goto send_move_response;
4986 		}
4987 		hci_dev_put(hdev);
4988 	}
4989 
4990 	/* Detect a move collision.  Only send a collision response
4991 	 * if this side has "lost", otherwise proceed with the move.
4992 	 * The winner has the larger bd_addr.
4993 	 */
4994 	if ((__chan_is_moving(chan) ||
4995 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4996 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4997 		result = L2CAP_MR_COLLISION;
4998 		goto send_move_response;
4999 	}
5000 
5001 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5002 	l2cap_move_setup(chan);
5003 	chan->move_id = req->dest_amp_id;
5004 	icid = chan->dcid;
5005 
5006 	if (req->dest_amp_id == AMP_ID_BREDR) {
5007 		/* Moving to BR/EDR */
5008 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5009 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5010 			result = L2CAP_MR_PEND;
5011 		} else {
5012 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5013 			result = L2CAP_MR_SUCCESS;
5014 		}
5015 	} else {
5016 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5017 		/* Placeholder - uncomment when amp functions are available */
5018 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5019 		result = L2CAP_MR_PEND;
5020 	}
5021 
5022 send_move_response:
5023 	l2cap_send_move_chan_rsp(chan, result);
5024 
5025 	l2cap_chan_unlock(chan);
5026 
5027 	return 0;
5028 }
5029 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5030 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5031 {
5032 	struct l2cap_chan *chan;
5033 	struct hci_chan *hchan = NULL;
5034 
5035 	chan = l2cap_get_chan_by_scid(conn, icid);
5036 	if (!chan) {
5037 		l2cap_send_move_chan_cfm_icid(conn, icid);
5038 		return;
5039 	}
5040 
5041 	__clear_chan_timer(chan);
5042 	if (result == L2CAP_MR_PEND)
5043 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5044 
5045 	switch (chan->move_state) {
5046 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5047 		/* Move confirm will be sent when logical link
5048 		 * is complete.
5049 		 */
5050 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5051 		break;
5052 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5053 		if (result == L2CAP_MR_PEND) {
5054 			break;
5055 		} else if (test_bit(CONN_LOCAL_BUSY,
5056 				    &chan->conn_state)) {
5057 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5058 		} else {
5059 			/* Logical link is up or moving to BR/EDR,
5060 			 * proceed with move
5061 			 */
5062 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5063 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5064 		}
5065 		break;
5066 	case L2CAP_MOVE_WAIT_RSP:
5067 		/* Moving to AMP */
5068 		if (result == L2CAP_MR_SUCCESS) {
5069 			/* Remote is ready, send confirm immediately
5070 			 * after logical link is ready
5071 			 */
5072 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5073 		} else {
5074 			/* Both logical link and move success
5075 			 * are required to confirm
5076 			 */
5077 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5078 		}
5079 
5080 		/* Placeholder - get hci_chan for logical link */
5081 		if (!hchan) {
5082 			/* Logical link not available */
5083 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5084 			break;
5085 		}
5086 
5087 		/* If the logical link is not yet connected, do not
5088 		 * send confirmation.
5089 		 */
5090 		if (hchan->state != BT_CONNECTED)
5091 			break;
5092 
5093 		/* Logical link is already ready to go */
5094 
5095 		chan->hs_hcon = hchan->conn;
5096 		chan->hs_hcon->l2cap_data = chan->conn;
5097 
5098 		if (result == L2CAP_MR_SUCCESS) {
5099 			/* Can confirm now */
5100 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5101 		} else {
5102 			/* Now only need move success
5103 			 * to confirm
5104 			 */
5105 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5106 		}
5107 
5108 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5109 		break;
5110 	default:
5111 		/* Any other amp move state means the move failed. */
5112 		chan->move_id = chan->local_amp_id;
5113 		l2cap_move_done(chan);
5114 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5115 	}
5116 
5117 	l2cap_chan_unlock(chan);
5118 }
5119 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5120 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5121 			    u16 result)
5122 {
5123 	struct l2cap_chan *chan;
5124 
5125 	chan = l2cap_get_chan_by_ident(conn, ident);
5126 	if (!chan) {
5127 		/* Could not locate channel, icid is best guess */
5128 		l2cap_send_move_chan_cfm_icid(conn, icid);
5129 		return;
5130 	}
5131 
5132 	__clear_chan_timer(chan);
5133 
5134 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5135 		if (result == L2CAP_MR_COLLISION) {
5136 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5137 		} else {
5138 			/* Cleanup - cancel move */
5139 			chan->move_id = chan->local_amp_id;
5140 			l2cap_move_done(chan);
5141 		}
5142 	}
5143 
5144 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5145 
5146 	l2cap_chan_unlock(chan);
5147 }
5148 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5149 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5150 				  struct l2cap_cmd_hdr *cmd,
5151 				  u16 cmd_len, void *data)
5152 {
5153 	struct l2cap_move_chan_rsp *rsp = data;
5154 	u16 icid, result;
5155 
5156 	if (cmd_len != sizeof(*rsp))
5157 		return -EPROTO;
5158 
5159 	icid = le16_to_cpu(rsp->icid);
5160 	result = le16_to_cpu(rsp->result);
5161 
5162 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5163 
5164 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5165 		l2cap_move_continue(conn, icid, result);
5166 	else
5167 		l2cap_move_fail(conn, cmd->ident, icid, result);
5168 
5169 	return 0;
5170 }
5171 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5172 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5173 				      struct l2cap_cmd_hdr *cmd,
5174 				      u16 cmd_len, void *data)
5175 {
5176 	struct l2cap_move_chan_cfm *cfm = data;
5177 	struct l2cap_chan *chan;
5178 	u16 icid, result;
5179 
5180 	if (cmd_len != sizeof(*cfm))
5181 		return -EPROTO;
5182 
5183 	icid = le16_to_cpu(cfm->icid);
5184 	result = le16_to_cpu(cfm->result);
5185 
5186 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5187 
5188 	chan = l2cap_get_chan_by_dcid(conn, icid);
5189 	if (!chan) {
5190 		/* Spec requires a response even if the icid was not found */
5191 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5192 		return 0;
5193 	}
5194 
5195 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5196 		if (result == L2CAP_MC_CONFIRMED) {
5197 			chan->local_amp_id = chan->move_id;
5198 			if (chan->local_amp_id == AMP_ID_BREDR)
5199 				__release_logical_link(chan);
5200 		} else {
5201 			chan->move_id = chan->local_amp_id;
5202 		}
5203 
5204 		l2cap_move_done(chan);
5205 	}
5206 
5207 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5208 
5209 	l2cap_chan_unlock(chan);
5210 
5211 	return 0;
5212 }
5213 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5214 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5215 						 struct l2cap_cmd_hdr *cmd,
5216 						 u16 cmd_len, void *data)
5217 {
5218 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5219 	struct l2cap_chan *chan;
5220 	u16 icid;
5221 
5222 	if (cmd_len != sizeof(*rsp))
5223 		return -EPROTO;
5224 
5225 	icid = le16_to_cpu(rsp->icid);
5226 
5227 	BT_DBG("icid 0x%4.4x", icid);
5228 
5229 	chan = l2cap_get_chan_by_scid(conn, icid);
5230 	if (!chan)
5231 		return 0;
5232 
5233 	__clear_chan_timer(chan);
5234 
5235 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5236 		chan->local_amp_id = chan->move_id;
5237 
5238 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5239 			__release_logical_link(chan);
5240 
5241 		l2cap_move_done(chan);
5242 	}
5243 
5244 	l2cap_chan_unlock(chan);
5245 
5246 	return 0;
5247 }
5248 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5249 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5250 					      struct l2cap_cmd_hdr *cmd,
5251 					      u16 cmd_len, u8 *data)
5252 {
5253 	struct hci_conn *hcon = conn->hcon;
5254 	struct l2cap_conn_param_update_req *req;
5255 	struct l2cap_conn_param_update_rsp rsp;
5256 	u16 min, max, latency, to_multiplier;
5257 	int err;
5258 
5259 	if (hcon->role != HCI_ROLE_MASTER)
5260 		return -EINVAL;
5261 
5262 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5263 		return -EPROTO;
5264 
5265 	req = (struct l2cap_conn_param_update_req *) data;
5266 	min		= __le16_to_cpu(req->min);
5267 	max		= __le16_to_cpu(req->max);
5268 	latency		= __le16_to_cpu(req->latency);
5269 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5270 
5271 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5272 	       min, max, latency, to_multiplier);
5273 
5274 	memset(&rsp, 0, sizeof(rsp));
5275 
5276 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5277 	if (err)
5278 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5279 	else
5280 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5281 
5282 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5283 		       sizeof(rsp), &rsp);
5284 
5285 	if (!err) {
5286 		u8 store_hint;
5287 
5288 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5289 						to_multiplier);
5290 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5291 				    store_hint, min, max, latency,
5292 				    to_multiplier);
5293 
5294 	}
5295 
5296 	return 0;
5297 }
5298 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5299 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5300 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5301 				u8 *data)
5302 {
5303 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5304 	struct hci_conn *hcon = conn->hcon;
5305 	u16 dcid, mtu, mps, credits, result;
5306 	struct l2cap_chan *chan;
5307 	int err, sec_level;
5308 
5309 	if (cmd_len < sizeof(*rsp))
5310 		return -EPROTO;
5311 
5312 	dcid    = __le16_to_cpu(rsp->dcid);
5313 	mtu     = __le16_to_cpu(rsp->mtu);
5314 	mps     = __le16_to_cpu(rsp->mps);
5315 	credits = __le16_to_cpu(rsp->credits);
5316 	result  = __le16_to_cpu(rsp->result);
5317 
5318 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5319 					   dcid < L2CAP_CID_DYN_START ||
5320 					   dcid > L2CAP_CID_LE_DYN_END))
5321 		return -EPROTO;
5322 
5323 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5324 	       dcid, mtu, mps, credits, result);
5325 
5326 	mutex_lock(&conn->chan_lock);
5327 
5328 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5329 	if (!chan) {
5330 		err = -EBADSLT;
5331 		goto unlock;
5332 	}
5333 
5334 	err = 0;
5335 
5336 	l2cap_chan_lock(chan);
5337 
5338 	switch (result) {
5339 	case L2CAP_CR_SUCCESS:
5340 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5341 			err = -EBADSLT;
5342 			break;
5343 		}
5344 
5345 		chan->ident = 0;
5346 		chan->dcid = dcid;
5347 		chan->omtu = mtu;
5348 		chan->remote_mps = mps;
5349 		chan->tx_credits = credits;
5350 		l2cap_chan_ready(chan);
5351 		break;
5352 
5353 	case L2CAP_CR_AUTHENTICATION:
5354 	case L2CAP_CR_ENCRYPTION:
5355 		/* If we already have MITM protection we can't do
5356 		 * anything.
5357 		 */
5358 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5359 			l2cap_chan_del(chan, ECONNREFUSED);
5360 			break;
5361 		}
5362 
5363 		sec_level = hcon->sec_level + 1;
5364 		if (chan->sec_level < sec_level)
5365 			chan->sec_level = sec_level;
5366 
5367 		/* We'll need to send a new Connect Request */
5368 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5369 
5370 		smp_conn_security(hcon, chan->sec_level);
5371 		break;
5372 
5373 	default:
5374 		l2cap_chan_del(chan, ECONNREFUSED);
5375 		break;
5376 	}
5377 
5378 	l2cap_chan_unlock(chan);
5379 
5380 unlock:
5381 	mutex_unlock(&conn->chan_lock);
5382 
5383 	return err;
5384 }
5385 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5386 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5387 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5388 				      u8 *data)
5389 {
5390 	int err = 0;
5391 
5392 	switch (cmd->code) {
5393 	case L2CAP_COMMAND_REJ:
5394 		l2cap_command_rej(conn, cmd, cmd_len, data);
5395 		break;
5396 
5397 	case L2CAP_CONN_REQ:
5398 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5399 		break;
5400 
5401 	case L2CAP_CONN_RSP:
5402 	case L2CAP_CREATE_CHAN_RSP:
5403 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5404 		break;
5405 
5406 	case L2CAP_CONF_REQ:
5407 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5408 		break;
5409 
5410 	case L2CAP_CONF_RSP:
5411 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5412 		break;
5413 
5414 	case L2CAP_DISCONN_REQ:
5415 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5416 		break;
5417 
5418 	case L2CAP_DISCONN_RSP:
5419 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5420 		break;
5421 
5422 	case L2CAP_ECHO_REQ:
5423 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5424 		break;
5425 
5426 	case L2CAP_ECHO_RSP:
5427 		break;
5428 
5429 	case L2CAP_INFO_REQ:
5430 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5431 		break;
5432 
5433 	case L2CAP_INFO_RSP:
5434 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5435 		break;
5436 
5437 	case L2CAP_CREATE_CHAN_REQ:
5438 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5439 		break;
5440 
5441 	case L2CAP_MOVE_CHAN_REQ:
5442 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5443 		break;
5444 
5445 	case L2CAP_MOVE_CHAN_RSP:
5446 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5447 		break;
5448 
5449 	case L2CAP_MOVE_CHAN_CFM:
5450 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5451 		break;
5452 
5453 	case L2CAP_MOVE_CHAN_CFM_RSP:
5454 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5455 		break;
5456 
5457 	default:
5458 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5459 		err = -EINVAL;
5460 		break;
5461 	}
5462 
5463 	return err;
5464 }
5465 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5466 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5467 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5468 				u8 *data)
5469 {
5470 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5471 	struct l2cap_le_conn_rsp rsp;
5472 	struct l2cap_chan *chan, *pchan;
5473 	u16 dcid, scid, credits, mtu, mps;
5474 	__le16 psm;
5475 	u8 result;
5476 
5477 	if (cmd_len != sizeof(*req))
5478 		return -EPROTO;
5479 
5480 	scid = __le16_to_cpu(req->scid);
5481 	mtu  = __le16_to_cpu(req->mtu);
5482 	mps  = __le16_to_cpu(req->mps);
5483 	psm  = req->psm;
5484 	dcid = 0;
5485 	credits = 0;
5486 
5487 	if (mtu < 23 || mps < 23)
5488 		return -EPROTO;
5489 
5490 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5491 	       scid, mtu, mps);
5492 
5493 	/* Check if we have socket listening on psm */
5494 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5495 					 &conn->hcon->dst, LE_LINK);
5496 	if (!pchan) {
5497 		result = L2CAP_CR_BAD_PSM;
5498 		chan = NULL;
5499 		goto response;
5500 	}
5501 
5502 	mutex_lock(&conn->chan_lock);
5503 	l2cap_chan_lock(pchan);
5504 
5505 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5506 				     SMP_ALLOW_STK)) {
5507 		result = L2CAP_CR_AUTHENTICATION;
5508 		chan = NULL;
5509 		goto response_unlock;
5510 	}
5511 
5512 	/* Check for valid dynamic CID range */
5513 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5514 		result = L2CAP_CR_INVALID_SCID;
5515 		chan = NULL;
5516 		goto response_unlock;
5517 	}
5518 
5519 	/* Check if we already have channel with that dcid */
5520 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5521 		result = L2CAP_CR_SCID_IN_USE;
5522 		chan = NULL;
5523 		goto response_unlock;
5524 	}
5525 
5526 	chan = pchan->ops->new_connection(pchan);
5527 	if (!chan) {
5528 		result = L2CAP_CR_NO_MEM;
5529 		goto response_unlock;
5530 	}
5531 
5532 	l2cap_le_flowctl_init(chan);
5533 
5534 	bacpy(&chan->src, &conn->hcon->src);
5535 	bacpy(&chan->dst, &conn->hcon->dst);
5536 	chan->src_type = bdaddr_src_type(conn->hcon);
5537 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5538 	chan->psm  = psm;
5539 	chan->dcid = scid;
5540 	chan->omtu = mtu;
5541 	chan->remote_mps = mps;
5542 	chan->tx_credits = __le16_to_cpu(req->credits);
5543 
5544 	__l2cap_chan_add(conn, chan);
5545 	dcid = chan->scid;
5546 	credits = chan->rx_credits;
5547 
5548 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5549 
5550 	chan->ident = cmd->ident;
5551 
5552 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5553 		l2cap_state_change(chan, BT_CONNECT2);
5554 		/* The following result value is actually not defined
5555 		 * for LE CoC but we use it to let the function know
5556 		 * that it should bail out after doing its cleanup
5557 		 * instead of sending a response.
5558 		 */
5559 		result = L2CAP_CR_PEND;
5560 		chan->ops->defer(chan);
5561 	} else {
5562 		l2cap_chan_ready(chan);
5563 		result = L2CAP_CR_SUCCESS;
5564 	}
5565 
5566 response_unlock:
5567 	l2cap_chan_unlock(pchan);
5568 	mutex_unlock(&conn->chan_lock);
5569 	l2cap_chan_put(pchan);
5570 
5571 	if (result == L2CAP_CR_PEND)
5572 		return 0;
5573 
5574 response:
5575 	if (chan) {
5576 		rsp.mtu = cpu_to_le16(chan->imtu);
5577 		rsp.mps = cpu_to_le16(chan->mps);
5578 	} else {
5579 		rsp.mtu = 0;
5580 		rsp.mps = 0;
5581 	}
5582 
5583 	rsp.dcid    = cpu_to_le16(dcid);
5584 	rsp.credits = cpu_to_le16(credits);
5585 	rsp.result  = cpu_to_le16(result);
5586 
5587 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5588 
5589 	return 0;
5590 }
5591 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5592 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5593 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5594 				   u8 *data)
5595 {
5596 	struct l2cap_le_credits *pkt;
5597 	struct l2cap_chan *chan;
5598 	u16 cid, credits, max_credits;
5599 
5600 	if (cmd_len != sizeof(*pkt))
5601 		return -EPROTO;
5602 
5603 	pkt = (struct l2cap_le_credits *) data;
5604 	cid	= __le16_to_cpu(pkt->cid);
5605 	credits	= __le16_to_cpu(pkt->credits);
5606 
5607 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5608 
5609 	chan = l2cap_get_chan_by_dcid(conn, cid);
5610 	if (!chan)
5611 		return -EBADSLT;
5612 
5613 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5614 	if (credits > max_credits) {
5615 		BT_ERR("LE credits overflow");
5616 		l2cap_send_disconn_req(chan, ECONNRESET);
5617 		l2cap_chan_unlock(chan);
5618 
5619 		/* Return 0 so that we don't trigger an unnecessary
5620 		 * command reject packet.
5621 		 */
5622 		return 0;
5623 	}
5624 
5625 	chan->tx_credits += credits;
5626 
5627 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5628 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5629 		chan->tx_credits--;
5630 	}
5631 
5632 	if (chan->tx_credits)
5633 		chan->ops->resume(chan);
5634 
5635 	l2cap_chan_unlock(chan);
5636 
5637 	return 0;
5638 }
5639 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5640 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5641 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5642 				       u8 *data)
5643 {
5644 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5645 	struct l2cap_chan *chan;
5646 
5647 	if (cmd_len < sizeof(*rej))
5648 		return -EPROTO;
5649 
5650 	mutex_lock(&conn->chan_lock);
5651 
5652 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5653 	if (!chan)
5654 		goto done;
5655 
5656 	l2cap_chan_lock(chan);
5657 	l2cap_chan_del(chan, ECONNREFUSED);
5658 	l2cap_chan_unlock(chan);
5659 
5660 done:
5661 	mutex_unlock(&conn->chan_lock);
5662 	return 0;
5663 }
5664 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5665 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5666 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5667 				   u8 *data)
5668 {
5669 	int err = 0;
5670 
5671 	switch (cmd->code) {
5672 	case L2CAP_COMMAND_REJ:
5673 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5674 		break;
5675 
5676 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5677 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5678 		break;
5679 
5680 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5681 		break;
5682 
5683 	case L2CAP_LE_CONN_RSP:
5684 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5685 		break;
5686 
5687 	case L2CAP_LE_CONN_REQ:
5688 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5689 		break;
5690 
5691 	case L2CAP_LE_CREDITS:
5692 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5693 		break;
5694 
5695 	case L2CAP_DISCONN_REQ:
5696 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5697 		break;
5698 
5699 	case L2CAP_DISCONN_RSP:
5700 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5701 		break;
5702 
5703 	default:
5704 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5705 		err = -EINVAL;
5706 		break;
5707 	}
5708 
5709 	return err;
5710 }
5711 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5712 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5713 					struct sk_buff *skb)
5714 {
5715 	struct hci_conn *hcon = conn->hcon;
5716 	struct l2cap_cmd_hdr *cmd;
5717 	u16 len;
5718 	int err;
5719 
5720 	if (hcon->type != LE_LINK)
5721 		goto drop;
5722 
5723 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5724 		goto drop;
5725 
5726 	cmd = (void *) skb->data;
5727 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5728 
5729 	len = le16_to_cpu(cmd->len);
5730 
5731 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5732 
5733 	if (len != skb->len || !cmd->ident) {
5734 		BT_DBG("corrupted command");
5735 		goto drop;
5736 	}
5737 
5738 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5739 	if (err) {
5740 		struct l2cap_cmd_rej_unk rej;
5741 
5742 		BT_ERR("Wrong link type (%d)", err);
5743 
5744 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5745 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5746 			       sizeof(rej), &rej);
5747 	}
5748 
5749 drop:
5750 	kfree_skb(skb);
5751 }
5752 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5753 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5754 				     struct sk_buff *skb)
5755 {
5756 	struct hci_conn *hcon = conn->hcon;
5757 	u8 *data = skb->data;
5758 	int len = skb->len;
5759 	struct l2cap_cmd_hdr cmd;
5760 	int err;
5761 
5762 	l2cap_raw_recv(conn, skb);
5763 
5764 	if (hcon->type != ACL_LINK)
5765 		goto drop;
5766 
5767 	while (len >= L2CAP_CMD_HDR_SIZE) {
5768 		u16 cmd_len;
5769 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5770 		data += L2CAP_CMD_HDR_SIZE;
5771 		len  -= L2CAP_CMD_HDR_SIZE;
5772 
5773 		cmd_len = le16_to_cpu(cmd.len);
5774 
5775 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5776 		       cmd.ident);
5777 
5778 		if (cmd_len > len || !cmd.ident) {
5779 			BT_DBG("corrupted command");
5780 			break;
5781 		}
5782 
5783 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5784 		if (err) {
5785 			struct l2cap_cmd_rej_unk rej;
5786 
5787 			BT_ERR("Wrong link type (%d)", err);
5788 
5789 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5790 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5791 				       sizeof(rej), &rej);
5792 		}
5793 
5794 		data += cmd_len;
5795 		len  -= cmd_len;
5796 	}
5797 
5798 drop:
5799 	kfree_skb(skb);
5800 }
5801 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5802 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5803 {
5804 	u16 our_fcs, rcv_fcs;
5805 	int hdr_size;
5806 
5807 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5808 		hdr_size = L2CAP_EXT_HDR_SIZE;
5809 	else
5810 		hdr_size = L2CAP_ENH_HDR_SIZE;
5811 
5812 	if (chan->fcs == L2CAP_FCS_CRC16) {
5813 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5814 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5815 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5816 
5817 		if (our_fcs != rcv_fcs)
5818 			return -EBADMSG;
5819 	}
5820 	return 0;
5821 }
5822 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5823 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5824 {
5825 	struct l2cap_ctrl control;
5826 
5827 	BT_DBG("chan %p", chan);
5828 
5829 	memset(&control, 0, sizeof(control));
5830 	control.sframe = 1;
5831 	control.final = 1;
5832 	control.reqseq = chan->buffer_seq;
5833 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5834 
5835 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5836 		control.super = L2CAP_SUPER_RNR;
5837 		l2cap_send_sframe(chan, &control);
5838 	}
5839 
5840 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5841 	    chan->unacked_frames > 0)
5842 		__set_retrans_timer(chan);
5843 
5844 	/* Send pending iframes */
5845 	l2cap_ertm_send(chan);
5846 
5847 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5848 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5849 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5850 		 * send it now.
5851 		 */
5852 		control.super = L2CAP_SUPER_RR;
5853 		l2cap_send_sframe(chan, &control);
5854 	}
5855 }
5856 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5857 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5858 			    struct sk_buff **last_frag)
5859 {
5860 	/* skb->len reflects data in skb as well as all fragments
5861 	 * skb->data_len reflects only data in fragments
5862 	 */
5863 	if (!skb_has_frag_list(skb))
5864 		skb_shinfo(skb)->frag_list = new_frag;
5865 
5866 	new_frag->next = NULL;
5867 
5868 	(*last_frag)->next = new_frag;
5869 	*last_frag = new_frag;
5870 
5871 	skb->len += new_frag->len;
5872 	skb->data_len += new_frag->len;
5873 	skb->truesize += new_frag->truesize;
5874 }
5875 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5876 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5877 				struct l2cap_ctrl *control)
5878 {
5879 	int err = -EINVAL;
5880 
5881 	switch (control->sar) {
5882 	case L2CAP_SAR_UNSEGMENTED:
5883 		if (chan->sdu)
5884 			break;
5885 
5886 		err = chan->ops->recv(chan, skb);
5887 		break;
5888 
5889 	case L2CAP_SAR_START:
5890 		if (chan->sdu)
5891 			break;
5892 
5893 		chan->sdu_len = get_unaligned_le16(skb->data);
5894 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5895 
5896 		if (chan->sdu_len > chan->imtu) {
5897 			err = -EMSGSIZE;
5898 			break;
5899 		}
5900 
5901 		if (skb->len >= chan->sdu_len)
5902 			break;
5903 
5904 		chan->sdu = skb;
5905 		chan->sdu_last_frag = skb;
5906 
5907 		skb = NULL;
5908 		err = 0;
5909 		break;
5910 
5911 	case L2CAP_SAR_CONTINUE:
5912 		if (!chan->sdu)
5913 			break;
5914 
5915 		append_skb_frag(chan->sdu, skb,
5916 				&chan->sdu_last_frag);
5917 		skb = NULL;
5918 
5919 		if (chan->sdu->len >= chan->sdu_len)
5920 			break;
5921 
5922 		err = 0;
5923 		break;
5924 
5925 	case L2CAP_SAR_END:
5926 		if (!chan->sdu)
5927 			break;
5928 
5929 		append_skb_frag(chan->sdu, skb,
5930 				&chan->sdu_last_frag);
5931 		skb = NULL;
5932 
5933 		if (chan->sdu->len != chan->sdu_len)
5934 			break;
5935 
5936 		err = chan->ops->recv(chan, chan->sdu);
5937 
5938 		if (!err) {
5939 			/* Reassembly complete */
5940 			chan->sdu = NULL;
5941 			chan->sdu_last_frag = NULL;
5942 			chan->sdu_len = 0;
5943 		}
5944 		break;
5945 	}
5946 
5947 	if (err) {
5948 		kfree_skb(skb);
5949 		kfree_skb(chan->sdu);
5950 		chan->sdu = NULL;
5951 		chan->sdu_last_frag = NULL;
5952 		chan->sdu_len = 0;
5953 	}
5954 
5955 	return err;
5956 }
5957 
l2cap_resegment(struct l2cap_chan * chan)5958 static int l2cap_resegment(struct l2cap_chan *chan)
5959 {
5960 	/* Placeholder */
5961 	return 0;
5962 }
5963 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5964 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5965 {
5966 	u8 event;
5967 
5968 	if (chan->mode != L2CAP_MODE_ERTM)
5969 		return;
5970 
5971 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5972 	l2cap_tx(chan, NULL, NULL, event);
5973 }
5974 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5975 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5976 {
5977 	int err = 0;
5978 	/* Pass sequential frames to l2cap_reassemble_sdu()
5979 	 * until a gap is encountered.
5980 	 */
5981 
5982 	BT_DBG("chan %p", chan);
5983 
5984 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5985 		struct sk_buff *skb;
5986 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5987 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5988 
5989 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5990 
5991 		if (!skb)
5992 			break;
5993 
5994 		skb_unlink(skb, &chan->srej_q);
5995 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5996 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5997 		if (err)
5998 			break;
5999 	}
6000 
6001 	if (skb_queue_empty(&chan->srej_q)) {
6002 		chan->rx_state = L2CAP_RX_STATE_RECV;
6003 		l2cap_send_ack(chan);
6004 	}
6005 
6006 	return err;
6007 }
6008 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6009 static void l2cap_handle_srej(struct l2cap_chan *chan,
6010 			      struct l2cap_ctrl *control)
6011 {
6012 	struct sk_buff *skb;
6013 
6014 	BT_DBG("chan %p, control %p", chan, control);
6015 
6016 	if (control->reqseq == chan->next_tx_seq) {
6017 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6018 		l2cap_send_disconn_req(chan, ECONNRESET);
6019 		return;
6020 	}
6021 
6022 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6023 
6024 	if (skb == NULL) {
6025 		BT_DBG("Seq %d not available for retransmission",
6026 		       control->reqseq);
6027 		return;
6028 	}
6029 
6030 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6031 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6032 		l2cap_send_disconn_req(chan, ECONNRESET);
6033 		return;
6034 	}
6035 
6036 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6037 
6038 	if (control->poll) {
6039 		l2cap_pass_to_tx(chan, control);
6040 
6041 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6042 		l2cap_retransmit(chan, control);
6043 		l2cap_ertm_send(chan);
6044 
6045 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6046 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6047 			chan->srej_save_reqseq = control->reqseq;
6048 		}
6049 	} else {
6050 		l2cap_pass_to_tx_fbit(chan, control);
6051 
6052 		if (control->final) {
6053 			if (chan->srej_save_reqseq != control->reqseq ||
6054 			    !test_and_clear_bit(CONN_SREJ_ACT,
6055 						&chan->conn_state))
6056 				l2cap_retransmit(chan, control);
6057 		} else {
6058 			l2cap_retransmit(chan, control);
6059 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6060 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6061 				chan->srej_save_reqseq = control->reqseq;
6062 			}
6063 		}
6064 	}
6065 }
6066 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6067 static void l2cap_handle_rej(struct l2cap_chan *chan,
6068 			     struct l2cap_ctrl *control)
6069 {
6070 	struct sk_buff *skb;
6071 
6072 	BT_DBG("chan %p, control %p", chan, control);
6073 
6074 	if (control->reqseq == chan->next_tx_seq) {
6075 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6076 		l2cap_send_disconn_req(chan, ECONNRESET);
6077 		return;
6078 	}
6079 
6080 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6081 
6082 	if (chan->max_tx && skb &&
6083 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6084 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6085 		l2cap_send_disconn_req(chan, ECONNRESET);
6086 		return;
6087 	}
6088 
6089 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6090 
6091 	l2cap_pass_to_tx(chan, control);
6092 
6093 	if (control->final) {
6094 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6095 			l2cap_retransmit_all(chan, control);
6096 	} else {
6097 		l2cap_retransmit_all(chan, control);
6098 		l2cap_ertm_send(chan);
6099 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6100 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6101 	}
6102 }
6103 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6104 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6105 {
6106 	BT_DBG("chan %p, txseq %d", chan, txseq);
6107 
6108 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6109 	       chan->expected_tx_seq);
6110 
6111 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6112 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6113 		    chan->tx_win) {
6114 			/* See notes below regarding "double poll" and
6115 			 * invalid packets.
6116 			 */
6117 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6118 				BT_DBG("Invalid/Ignore - after SREJ");
6119 				return L2CAP_TXSEQ_INVALID_IGNORE;
6120 			} else {
6121 				BT_DBG("Invalid - in window after SREJ sent");
6122 				return L2CAP_TXSEQ_INVALID;
6123 			}
6124 		}
6125 
6126 		if (chan->srej_list.head == txseq) {
6127 			BT_DBG("Expected SREJ");
6128 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6129 		}
6130 
6131 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6132 			BT_DBG("Duplicate SREJ - txseq already stored");
6133 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6134 		}
6135 
6136 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6137 			BT_DBG("Unexpected SREJ - not requested");
6138 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6139 		}
6140 	}
6141 
6142 	if (chan->expected_tx_seq == txseq) {
6143 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6144 		    chan->tx_win) {
6145 			BT_DBG("Invalid - txseq outside tx window");
6146 			return L2CAP_TXSEQ_INVALID;
6147 		} else {
6148 			BT_DBG("Expected");
6149 			return L2CAP_TXSEQ_EXPECTED;
6150 		}
6151 	}
6152 
6153 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6154 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6155 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6156 		return L2CAP_TXSEQ_DUPLICATE;
6157 	}
6158 
6159 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6160 		/* A source of invalid packets is a "double poll" condition,
6161 		 * where delays cause us to send multiple poll packets.  If
6162 		 * the remote stack receives and processes both polls,
6163 		 * sequence numbers can wrap around in such a way that a
6164 		 * resent frame has a sequence number that looks like new data
6165 		 * with a sequence gap.  This would trigger an erroneous SREJ
6166 		 * request.
6167 		 *
6168 		 * Fortunately, this is impossible with a tx window that's
6169 		 * less than half of the maximum sequence number, which allows
6170 		 * invalid frames to be safely ignored.
6171 		 *
6172 		 * With tx window sizes greater than half of the tx window
6173 		 * maximum, the frame is invalid and cannot be ignored.  This
6174 		 * causes a disconnect.
6175 		 */
6176 
6177 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6178 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6179 			return L2CAP_TXSEQ_INVALID_IGNORE;
6180 		} else {
6181 			BT_DBG("Invalid - txseq outside tx window");
6182 			return L2CAP_TXSEQ_INVALID;
6183 		}
6184 	} else {
6185 		BT_DBG("Unexpected - txseq indicates missing frames");
6186 		return L2CAP_TXSEQ_UNEXPECTED;
6187 	}
6188 }
6189 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6190 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6191 			       struct l2cap_ctrl *control,
6192 			       struct sk_buff *skb, u8 event)
6193 {
6194 	int err = 0;
6195 	bool skb_in_use = false;
6196 
6197 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6198 	       event);
6199 
6200 	switch (event) {
6201 	case L2CAP_EV_RECV_IFRAME:
6202 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6203 		case L2CAP_TXSEQ_EXPECTED:
6204 			l2cap_pass_to_tx(chan, control);
6205 
6206 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6207 				BT_DBG("Busy, discarding expected seq %d",
6208 				       control->txseq);
6209 				break;
6210 			}
6211 
6212 			chan->expected_tx_seq = __next_seq(chan,
6213 							   control->txseq);
6214 
6215 			chan->buffer_seq = chan->expected_tx_seq;
6216 			skb_in_use = true;
6217 
6218 			err = l2cap_reassemble_sdu(chan, skb, control);
6219 			if (err)
6220 				break;
6221 
6222 			if (control->final) {
6223 				if (!test_and_clear_bit(CONN_REJ_ACT,
6224 							&chan->conn_state)) {
6225 					control->final = 0;
6226 					l2cap_retransmit_all(chan, control);
6227 					l2cap_ertm_send(chan);
6228 				}
6229 			}
6230 
6231 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6232 				l2cap_send_ack(chan);
6233 			break;
6234 		case L2CAP_TXSEQ_UNEXPECTED:
6235 			l2cap_pass_to_tx(chan, control);
6236 
6237 			/* Can't issue SREJ frames in the local busy state.
6238 			 * Drop this frame, it will be seen as missing
6239 			 * when local busy is exited.
6240 			 */
6241 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6242 				BT_DBG("Busy, discarding unexpected seq %d",
6243 				       control->txseq);
6244 				break;
6245 			}
6246 
6247 			/* There was a gap in the sequence, so an SREJ
6248 			 * must be sent for each missing frame.  The
6249 			 * current frame is stored for later use.
6250 			 */
6251 			skb_queue_tail(&chan->srej_q, skb);
6252 			skb_in_use = true;
6253 			BT_DBG("Queued %p (queue len %d)", skb,
6254 			       skb_queue_len(&chan->srej_q));
6255 
6256 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6257 			l2cap_seq_list_clear(&chan->srej_list);
6258 			l2cap_send_srej(chan, control->txseq);
6259 
6260 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6261 			break;
6262 		case L2CAP_TXSEQ_DUPLICATE:
6263 			l2cap_pass_to_tx(chan, control);
6264 			break;
6265 		case L2CAP_TXSEQ_INVALID_IGNORE:
6266 			break;
6267 		case L2CAP_TXSEQ_INVALID:
6268 		default:
6269 			l2cap_send_disconn_req(chan, ECONNRESET);
6270 			break;
6271 		}
6272 		break;
6273 	case L2CAP_EV_RECV_RR:
6274 		l2cap_pass_to_tx(chan, control);
6275 		if (control->final) {
6276 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6277 
6278 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6279 			    !__chan_is_moving(chan)) {
6280 				control->final = 0;
6281 				l2cap_retransmit_all(chan, control);
6282 			}
6283 
6284 			l2cap_ertm_send(chan);
6285 		} else if (control->poll) {
6286 			l2cap_send_i_or_rr_or_rnr(chan);
6287 		} else {
6288 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6289 					       &chan->conn_state) &&
6290 			    chan->unacked_frames)
6291 				__set_retrans_timer(chan);
6292 
6293 			l2cap_ertm_send(chan);
6294 		}
6295 		break;
6296 	case L2CAP_EV_RECV_RNR:
6297 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6298 		l2cap_pass_to_tx(chan, control);
6299 		if (control && control->poll) {
6300 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6301 			l2cap_send_rr_or_rnr(chan, 0);
6302 		}
6303 		__clear_retrans_timer(chan);
6304 		l2cap_seq_list_clear(&chan->retrans_list);
6305 		break;
6306 	case L2CAP_EV_RECV_REJ:
6307 		l2cap_handle_rej(chan, control);
6308 		break;
6309 	case L2CAP_EV_RECV_SREJ:
6310 		l2cap_handle_srej(chan, control);
6311 		break;
6312 	default:
6313 		break;
6314 	}
6315 
6316 	if (skb && !skb_in_use) {
6317 		BT_DBG("Freeing %p", skb);
6318 		kfree_skb(skb);
6319 	}
6320 
6321 	return err;
6322 }
6323 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6324 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6325 				    struct l2cap_ctrl *control,
6326 				    struct sk_buff *skb, u8 event)
6327 {
6328 	int err = 0;
6329 	u16 txseq = control->txseq;
6330 	bool skb_in_use = false;
6331 
6332 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6333 	       event);
6334 
6335 	switch (event) {
6336 	case L2CAP_EV_RECV_IFRAME:
6337 		switch (l2cap_classify_txseq(chan, txseq)) {
6338 		case L2CAP_TXSEQ_EXPECTED:
6339 			/* Keep frame for reassembly later */
6340 			l2cap_pass_to_tx(chan, control);
6341 			skb_queue_tail(&chan->srej_q, skb);
6342 			skb_in_use = true;
6343 			BT_DBG("Queued %p (queue len %d)", skb,
6344 			       skb_queue_len(&chan->srej_q));
6345 
6346 			chan->expected_tx_seq = __next_seq(chan, txseq);
6347 			break;
6348 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6349 			l2cap_seq_list_pop(&chan->srej_list);
6350 
6351 			l2cap_pass_to_tx(chan, control);
6352 			skb_queue_tail(&chan->srej_q, skb);
6353 			skb_in_use = true;
6354 			BT_DBG("Queued %p (queue len %d)", skb,
6355 			       skb_queue_len(&chan->srej_q));
6356 
6357 			err = l2cap_rx_queued_iframes(chan);
6358 			if (err)
6359 				break;
6360 
6361 			break;
6362 		case L2CAP_TXSEQ_UNEXPECTED:
6363 			/* Got a frame that can't be reassembled yet.
6364 			 * Save it for later, and send SREJs to cover
6365 			 * the missing frames.
6366 			 */
6367 			skb_queue_tail(&chan->srej_q, skb);
6368 			skb_in_use = true;
6369 			BT_DBG("Queued %p (queue len %d)", skb,
6370 			       skb_queue_len(&chan->srej_q));
6371 
6372 			l2cap_pass_to_tx(chan, control);
6373 			l2cap_send_srej(chan, control->txseq);
6374 			break;
6375 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6376 			/* This frame was requested with an SREJ, but
6377 			 * some expected retransmitted frames are
6378 			 * missing.  Request retransmission of missing
6379 			 * SREJ'd frames.
6380 			 */
6381 			skb_queue_tail(&chan->srej_q, skb);
6382 			skb_in_use = true;
6383 			BT_DBG("Queued %p (queue len %d)", skb,
6384 			       skb_queue_len(&chan->srej_q));
6385 
6386 			l2cap_pass_to_tx(chan, control);
6387 			l2cap_send_srej_list(chan, control->txseq);
6388 			break;
6389 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6390 			/* We've already queued this frame.  Drop this copy. */
6391 			l2cap_pass_to_tx(chan, control);
6392 			break;
6393 		case L2CAP_TXSEQ_DUPLICATE:
6394 			/* Expecting a later sequence number, so this frame
6395 			 * was already received.  Ignore it completely.
6396 			 */
6397 			break;
6398 		case L2CAP_TXSEQ_INVALID_IGNORE:
6399 			break;
6400 		case L2CAP_TXSEQ_INVALID:
6401 		default:
6402 			l2cap_send_disconn_req(chan, ECONNRESET);
6403 			break;
6404 		}
6405 		break;
6406 	case L2CAP_EV_RECV_RR:
6407 		l2cap_pass_to_tx(chan, control);
6408 		if (control->final) {
6409 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6410 
6411 			if (!test_and_clear_bit(CONN_REJ_ACT,
6412 						&chan->conn_state)) {
6413 				control->final = 0;
6414 				l2cap_retransmit_all(chan, control);
6415 			}
6416 
6417 			l2cap_ertm_send(chan);
6418 		} else if (control->poll) {
6419 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6420 					       &chan->conn_state) &&
6421 			    chan->unacked_frames) {
6422 				__set_retrans_timer(chan);
6423 			}
6424 
6425 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6426 			l2cap_send_srej_tail(chan);
6427 		} else {
6428 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6429 					       &chan->conn_state) &&
6430 			    chan->unacked_frames)
6431 				__set_retrans_timer(chan);
6432 
6433 			l2cap_send_ack(chan);
6434 		}
6435 		break;
6436 	case L2CAP_EV_RECV_RNR:
6437 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6438 		l2cap_pass_to_tx(chan, control);
6439 		if (control->poll) {
6440 			l2cap_send_srej_tail(chan);
6441 		} else {
6442 			struct l2cap_ctrl rr_control;
6443 			memset(&rr_control, 0, sizeof(rr_control));
6444 			rr_control.sframe = 1;
6445 			rr_control.super = L2CAP_SUPER_RR;
6446 			rr_control.reqseq = chan->buffer_seq;
6447 			l2cap_send_sframe(chan, &rr_control);
6448 		}
6449 
6450 		break;
6451 	case L2CAP_EV_RECV_REJ:
6452 		l2cap_handle_rej(chan, control);
6453 		break;
6454 	case L2CAP_EV_RECV_SREJ:
6455 		l2cap_handle_srej(chan, control);
6456 		break;
6457 	}
6458 
6459 	if (skb && !skb_in_use) {
6460 		BT_DBG("Freeing %p", skb);
6461 		kfree_skb(skb);
6462 	}
6463 
6464 	return err;
6465 }
6466 
l2cap_finish_move(struct l2cap_chan * chan)6467 static int l2cap_finish_move(struct l2cap_chan *chan)
6468 {
6469 	BT_DBG("chan %p", chan);
6470 
6471 	chan->rx_state = L2CAP_RX_STATE_RECV;
6472 
6473 	if (chan->hs_hcon)
6474 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6475 	else
6476 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6477 
6478 	return l2cap_resegment(chan);
6479 }
6480 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6481 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6482 				 struct l2cap_ctrl *control,
6483 				 struct sk_buff *skb, u8 event)
6484 {
6485 	int err;
6486 
6487 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6488 	       event);
6489 
6490 	if (!control->poll)
6491 		return -EPROTO;
6492 
6493 	l2cap_process_reqseq(chan, control->reqseq);
6494 
6495 	if (!skb_queue_empty(&chan->tx_q))
6496 		chan->tx_send_head = skb_peek(&chan->tx_q);
6497 	else
6498 		chan->tx_send_head = NULL;
6499 
6500 	/* Rewind next_tx_seq to the point expected
6501 	 * by the receiver.
6502 	 */
6503 	chan->next_tx_seq = control->reqseq;
6504 	chan->unacked_frames = 0;
6505 
6506 	err = l2cap_finish_move(chan);
6507 	if (err)
6508 		return err;
6509 
6510 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6511 	l2cap_send_i_or_rr_or_rnr(chan);
6512 
6513 	if (event == L2CAP_EV_RECV_IFRAME)
6514 		return -EPROTO;
6515 
6516 	return l2cap_rx_state_recv(chan, control, NULL, event);
6517 }
6518 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6519 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6520 				 struct l2cap_ctrl *control,
6521 				 struct sk_buff *skb, u8 event)
6522 {
6523 	int err;
6524 
6525 	if (!control->final)
6526 		return -EPROTO;
6527 
6528 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6529 
6530 	chan->rx_state = L2CAP_RX_STATE_RECV;
6531 	l2cap_process_reqseq(chan, control->reqseq);
6532 
6533 	if (!skb_queue_empty(&chan->tx_q))
6534 		chan->tx_send_head = skb_peek(&chan->tx_q);
6535 	else
6536 		chan->tx_send_head = NULL;
6537 
6538 	/* Rewind next_tx_seq to the point expected
6539 	 * by the receiver.
6540 	 */
6541 	chan->next_tx_seq = control->reqseq;
6542 	chan->unacked_frames = 0;
6543 
6544 	if (chan->hs_hcon)
6545 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6546 	else
6547 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6548 
6549 	err = l2cap_resegment(chan);
6550 
6551 	if (!err)
6552 		err = l2cap_rx_state_recv(chan, control, skb, event);
6553 
6554 	return err;
6555 }
6556 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6557 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6558 {
6559 	/* Make sure reqseq is for a packet that has been sent but not acked */
6560 	u16 unacked;
6561 
6562 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6563 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6564 }
6565 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6566 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6567 		    struct sk_buff *skb, u8 event)
6568 {
6569 	int err = 0;
6570 
6571 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6572 	       control, skb, event, chan->rx_state);
6573 
6574 	if (__valid_reqseq(chan, control->reqseq)) {
6575 		switch (chan->rx_state) {
6576 		case L2CAP_RX_STATE_RECV:
6577 			err = l2cap_rx_state_recv(chan, control, skb, event);
6578 			break;
6579 		case L2CAP_RX_STATE_SREJ_SENT:
6580 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6581 						       event);
6582 			break;
6583 		case L2CAP_RX_STATE_WAIT_P:
6584 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6585 			break;
6586 		case L2CAP_RX_STATE_WAIT_F:
6587 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6588 			break;
6589 		default:
6590 			/* shut it down */
6591 			break;
6592 		}
6593 	} else {
6594 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6595 		       control->reqseq, chan->next_tx_seq,
6596 		       chan->expected_ack_seq);
6597 		l2cap_send_disconn_req(chan, ECONNRESET);
6598 	}
6599 
6600 	return err;
6601 }
6602 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6603 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6604 			   struct sk_buff *skb)
6605 {
6606 	int err = 0;
6607 
6608 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6609 	       chan->rx_state);
6610 
6611 	if (l2cap_classify_txseq(chan, control->txseq) ==
6612 	    L2CAP_TXSEQ_EXPECTED) {
6613 		l2cap_pass_to_tx(chan, control);
6614 
6615 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6616 		       __next_seq(chan, chan->buffer_seq));
6617 
6618 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6619 
6620 		l2cap_reassemble_sdu(chan, skb, control);
6621 	} else {
6622 		if (chan->sdu) {
6623 			kfree_skb(chan->sdu);
6624 			chan->sdu = NULL;
6625 		}
6626 		chan->sdu_last_frag = NULL;
6627 		chan->sdu_len = 0;
6628 
6629 		if (skb) {
6630 			BT_DBG("Freeing %p", skb);
6631 			kfree_skb(skb);
6632 		}
6633 	}
6634 
6635 	chan->last_acked_seq = control->txseq;
6636 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6637 
6638 	return err;
6639 }
6640 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6641 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6642 {
6643 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6644 	u16 len;
6645 	u8 event;
6646 
6647 	__unpack_control(chan, skb);
6648 
6649 	len = skb->len;
6650 
6651 	/*
6652 	 * We can just drop the corrupted I-frame here.
6653 	 * Receiver will miss it and start proper recovery
6654 	 * procedures and ask for retransmission.
6655 	 */
6656 	if (l2cap_check_fcs(chan, skb))
6657 		goto drop;
6658 
6659 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6660 		len -= L2CAP_SDULEN_SIZE;
6661 
6662 	if (chan->fcs == L2CAP_FCS_CRC16)
6663 		len -= L2CAP_FCS_SIZE;
6664 
6665 	if (len > chan->mps) {
6666 		l2cap_send_disconn_req(chan, ECONNRESET);
6667 		goto drop;
6668 	}
6669 
6670 	if (!control->sframe) {
6671 		int err;
6672 
6673 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6674 		       control->sar, control->reqseq, control->final,
6675 		       control->txseq);
6676 
6677 		/* Validate F-bit - F=0 always valid, F=1 only
6678 		 * valid in TX WAIT_F
6679 		 */
6680 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6681 			goto drop;
6682 
6683 		if (chan->mode != L2CAP_MODE_STREAMING) {
6684 			event = L2CAP_EV_RECV_IFRAME;
6685 			err = l2cap_rx(chan, control, skb, event);
6686 		} else {
6687 			err = l2cap_stream_rx(chan, control, skb);
6688 		}
6689 
6690 		if (err)
6691 			l2cap_send_disconn_req(chan, ECONNRESET);
6692 	} else {
6693 		const u8 rx_func_to_event[4] = {
6694 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6695 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6696 		};
6697 
6698 		/* Only I-frames are expected in streaming mode */
6699 		if (chan->mode == L2CAP_MODE_STREAMING)
6700 			goto drop;
6701 
6702 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6703 		       control->reqseq, control->final, control->poll,
6704 		       control->super);
6705 
6706 		if (len != 0) {
6707 			BT_ERR("Trailing bytes: %d in sframe", len);
6708 			l2cap_send_disconn_req(chan, ECONNRESET);
6709 			goto drop;
6710 		}
6711 
6712 		/* Validate F and P bits */
6713 		if (control->final && (control->poll ||
6714 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6715 			goto drop;
6716 
6717 		event = rx_func_to_event[control->super];
6718 		if (l2cap_rx(chan, control, skb, event))
6719 			l2cap_send_disconn_req(chan, ECONNRESET);
6720 	}
6721 
6722 	return 0;
6723 
6724 drop:
6725 	kfree_skb(skb);
6726 	return 0;
6727 }
6728 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6729 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6730 {
6731 	struct l2cap_conn *conn = chan->conn;
6732 	struct l2cap_le_credits pkt;
6733 	u16 return_credits;
6734 
6735 	/* We return more credits to the sender only after the amount of
6736 	 * credits falls below half of the initial amount.
6737 	 */
6738 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6739 		return;
6740 
6741 	return_credits = le_max_credits - chan->rx_credits;
6742 
6743 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6744 
6745 	chan->rx_credits += return_credits;
6746 
6747 	pkt.cid     = cpu_to_le16(chan->scid);
6748 	pkt.credits = cpu_to_le16(return_credits);
6749 
6750 	chan->ident = l2cap_get_ident(conn);
6751 
6752 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6753 }
6754 
l2cap_le_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6755 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6756 {
6757 	int err;
6758 
6759 	if (!chan->rx_credits) {
6760 		BT_ERR("No credits to receive LE L2CAP data");
6761 		l2cap_send_disconn_req(chan, ECONNRESET);
6762 		return -ENOBUFS;
6763 	}
6764 
6765 	if (chan->imtu < skb->len) {
6766 		BT_ERR("Too big LE L2CAP PDU");
6767 		return -ENOBUFS;
6768 	}
6769 
6770 	chan->rx_credits--;
6771 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6772 
6773 	l2cap_chan_le_send_credits(chan);
6774 
6775 	err = 0;
6776 
6777 	if (!chan->sdu) {
6778 		u16 sdu_len;
6779 
6780 		sdu_len = get_unaligned_le16(skb->data);
6781 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6782 
6783 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6784 		       sdu_len, skb->len, chan->imtu);
6785 
6786 		if (sdu_len > chan->imtu) {
6787 			BT_ERR("Too big LE L2CAP SDU length received");
6788 			err = -EMSGSIZE;
6789 			goto failed;
6790 		}
6791 
6792 		if (skb->len > sdu_len) {
6793 			BT_ERR("Too much LE L2CAP data received");
6794 			err = -EINVAL;
6795 			goto failed;
6796 		}
6797 
6798 		if (skb->len == sdu_len)
6799 			return chan->ops->recv(chan, skb);
6800 
6801 		chan->sdu = skb;
6802 		chan->sdu_len = sdu_len;
6803 		chan->sdu_last_frag = skb;
6804 
6805 		/* Detect if remote is not able to use the selected MPS */
6806 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6807 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6808 
6809 			/* Adjust the number of credits */
6810 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6811 			chan->mps = mps_len;
6812 			l2cap_chan_le_send_credits(chan);
6813 		}
6814 
6815 		return 0;
6816 	}
6817 
6818 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6819 	       chan->sdu->len, skb->len, chan->sdu_len);
6820 
6821 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6822 		BT_ERR("Too much LE L2CAP data received");
6823 		err = -EINVAL;
6824 		goto failed;
6825 	}
6826 
6827 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6828 	skb = NULL;
6829 
6830 	if (chan->sdu->len == chan->sdu_len) {
6831 		err = chan->ops->recv(chan, chan->sdu);
6832 		if (!err) {
6833 			chan->sdu = NULL;
6834 			chan->sdu_last_frag = NULL;
6835 			chan->sdu_len = 0;
6836 		}
6837 	}
6838 
6839 failed:
6840 	if (err) {
6841 		kfree_skb(skb);
6842 		kfree_skb(chan->sdu);
6843 		chan->sdu = NULL;
6844 		chan->sdu_last_frag = NULL;
6845 		chan->sdu_len = 0;
6846 	}
6847 
6848 	/* We can't return an error here since we took care of the skb
6849 	 * freeing internally. An error return would cause the caller to
6850 	 * do a double-free of the skb.
6851 	 */
6852 	return 0;
6853 }
6854 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6855 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6856 			       struct sk_buff *skb)
6857 {
6858 	struct l2cap_chan *chan;
6859 
6860 	chan = l2cap_get_chan_by_scid(conn, cid);
6861 	if (!chan) {
6862 		if (cid == L2CAP_CID_A2MP) {
6863 			chan = a2mp_channel_create(conn, skb);
6864 			if (!chan) {
6865 				kfree_skb(skb);
6866 				return;
6867 			}
6868 
6869 			l2cap_chan_lock(chan);
6870 		} else {
6871 			BT_DBG("unknown cid 0x%4.4x", cid);
6872 			/* Drop packet and return */
6873 			kfree_skb(skb);
6874 			return;
6875 		}
6876 	}
6877 
6878 	BT_DBG("chan %p, len %d", chan, skb->len);
6879 
6880 	/* If we receive data on a fixed channel before the info req/rsp
6881 	 * procdure is done simply assume that the channel is supported
6882 	 * and mark it as ready.
6883 	 */
6884 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6885 		l2cap_chan_ready(chan);
6886 
6887 	if (chan->state != BT_CONNECTED)
6888 		goto drop;
6889 
6890 	switch (chan->mode) {
6891 	case L2CAP_MODE_LE_FLOWCTL:
6892 		if (l2cap_le_data_rcv(chan, skb) < 0)
6893 			goto drop;
6894 
6895 		goto done;
6896 
6897 	case L2CAP_MODE_BASIC:
6898 		/* If socket recv buffers overflows we drop data here
6899 		 * which is *bad* because L2CAP has to be reliable.
6900 		 * But we don't have any other choice. L2CAP doesn't
6901 		 * provide flow control mechanism. */
6902 
6903 		if (chan->imtu < skb->len) {
6904 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6905 			goto drop;
6906 		}
6907 
6908 		if (!chan->ops->recv(chan, skb))
6909 			goto done;
6910 		break;
6911 
6912 	case L2CAP_MODE_ERTM:
6913 	case L2CAP_MODE_STREAMING:
6914 		l2cap_data_rcv(chan, skb);
6915 		goto done;
6916 
6917 	default:
6918 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6919 		break;
6920 	}
6921 
6922 drop:
6923 	kfree_skb(skb);
6924 
6925 done:
6926 	l2cap_chan_unlock(chan);
6927 }
6928 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6929 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6930 				  struct sk_buff *skb)
6931 {
6932 	struct hci_conn *hcon = conn->hcon;
6933 	struct l2cap_chan *chan;
6934 
6935 	if (hcon->type != ACL_LINK)
6936 		goto free_skb;
6937 
6938 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6939 					ACL_LINK);
6940 	if (!chan)
6941 		goto free_skb;
6942 
6943 	BT_DBG("chan %p, len %d", chan, skb->len);
6944 
6945 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6946 		goto drop;
6947 
6948 	if (chan->imtu < skb->len)
6949 		goto drop;
6950 
6951 	/* Store remote BD_ADDR and PSM for msg_name */
6952 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6953 	bt_cb(skb)->l2cap.psm = psm;
6954 
6955 	if (!chan->ops->recv(chan, skb)) {
6956 		l2cap_chan_put(chan);
6957 		return;
6958 	}
6959 
6960 drop:
6961 	l2cap_chan_put(chan);
6962 free_skb:
6963 	kfree_skb(skb);
6964 }
6965 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6966 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6967 {
6968 	struct l2cap_hdr *lh = (void *) skb->data;
6969 	struct hci_conn *hcon = conn->hcon;
6970 	u16 cid, len;
6971 	__le16 psm;
6972 
6973 	if (hcon->state != BT_CONNECTED) {
6974 		BT_DBG("queueing pending rx skb");
6975 		skb_queue_tail(&conn->pending_rx, skb);
6976 		return;
6977 	}
6978 
6979 	skb_pull(skb, L2CAP_HDR_SIZE);
6980 	cid = __le16_to_cpu(lh->cid);
6981 	len = __le16_to_cpu(lh->len);
6982 
6983 	if (len != skb->len) {
6984 		kfree_skb(skb);
6985 		return;
6986 	}
6987 
6988 	/* Since we can't actively block incoming LE connections we must
6989 	 * at least ensure that we ignore incoming data from them.
6990 	 */
6991 	if (hcon->type == LE_LINK &&
6992 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6993 				   bdaddr_dst_type(hcon))) {
6994 		kfree_skb(skb);
6995 		return;
6996 	}
6997 
6998 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6999 
7000 	switch (cid) {
7001 	case L2CAP_CID_SIGNALING:
7002 		l2cap_sig_channel(conn, skb);
7003 		break;
7004 
7005 	case L2CAP_CID_CONN_LESS:
7006 		psm = get_unaligned((__le16 *) skb->data);
7007 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7008 		l2cap_conless_channel(conn, psm, skb);
7009 		break;
7010 
7011 	case L2CAP_CID_LE_SIGNALING:
7012 		l2cap_le_sig_channel(conn, skb);
7013 		break;
7014 
7015 	default:
7016 		l2cap_data_channel(conn, cid, skb);
7017 		break;
7018 	}
7019 }
7020 
process_pending_rx(struct work_struct * work)7021 static void process_pending_rx(struct work_struct *work)
7022 {
7023 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7024 					       pending_rx_work);
7025 	struct sk_buff *skb;
7026 
7027 	BT_DBG("");
7028 
7029 	while ((skb = skb_dequeue(&conn->pending_rx)))
7030 		l2cap_recv_frame(conn, skb);
7031 }
7032 
l2cap_conn_add(struct hci_conn * hcon)7033 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7034 {
7035 	struct l2cap_conn *conn = hcon->l2cap_data;
7036 	struct hci_chan *hchan;
7037 
7038 	if (conn)
7039 		return conn;
7040 
7041 	hchan = hci_chan_create(hcon);
7042 	if (!hchan)
7043 		return NULL;
7044 
7045 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7046 	if (!conn) {
7047 		hci_chan_del(hchan);
7048 		return NULL;
7049 	}
7050 
7051 	kref_init(&conn->ref);
7052 	hcon->l2cap_data = conn;
7053 	conn->hcon = hci_conn_get(hcon);
7054 	conn->hchan = hchan;
7055 
7056 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7057 
7058 	switch (hcon->type) {
7059 	case LE_LINK:
7060 		if (hcon->hdev->le_mtu) {
7061 			conn->mtu = hcon->hdev->le_mtu;
7062 			break;
7063 		}
7064 		/* fall through */
7065 	default:
7066 		conn->mtu = hcon->hdev->acl_mtu;
7067 		break;
7068 	}
7069 
7070 	conn->feat_mask = 0;
7071 
7072 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7073 
7074 	if (hcon->type == ACL_LINK &&
7075 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7076 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7077 
7078 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7079 	    (bredr_sc_enabled(hcon->hdev) ||
7080 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7081 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7082 
7083 	mutex_init(&conn->ident_lock);
7084 	mutex_init(&conn->chan_lock);
7085 
7086 	INIT_LIST_HEAD(&conn->chan_l);
7087 	INIT_LIST_HEAD(&conn->users);
7088 
7089 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7090 
7091 	skb_queue_head_init(&conn->pending_rx);
7092 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7093 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7094 
7095 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7096 
7097 	return conn;
7098 }
7099 
is_valid_psm(u16 psm,u8 dst_type)7100 static bool is_valid_psm(u16 psm, u8 dst_type) {
7101 	if (!psm)
7102 		return false;
7103 
7104 	if (bdaddr_type_is_le(dst_type))
7105 		return (psm <= 0x00ff);
7106 
7107 	/* PSM must be odd and lsb of upper byte must be 0 */
7108 	return ((psm & 0x0101) == 0x0001);
7109 }
7110 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7111 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7112 		       bdaddr_t *dst, u8 dst_type)
7113 {
7114 	struct l2cap_conn *conn;
7115 	struct hci_conn *hcon;
7116 	struct hci_dev *hdev;
7117 	int err;
7118 
7119 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7120 	       dst_type, __le16_to_cpu(psm));
7121 
7122 	hdev = hci_get_route(dst, &chan->src);
7123 	if (!hdev)
7124 		return -EHOSTUNREACH;
7125 
7126 	hci_dev_lock(hdev);
7127 
7128 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7129 	    chan->chan_type != L2CAP_CHAN_RAW) {
7130 		err = -EINVAL;
7131 		goto done;
7132 	}
7133 
7134 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7135 		err = -EINVAL;
7136 		goto done;
7137 	}
7138 
7139 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7140 		err = -EINVAL;
7141 		goto done;
7142 	}
7143 
7144 	switch (chan->mode) {
7145 	case L2CAP_MODE_BASIC:
7146 		break;
7147 	case L2CAP_MODE_LE_FLOWCTL:
7148 		l2cap_le_flowctl_init(chan);
7149 		break;
7150 	case L2CAP_MODE_ERTM:
7151 	case L2CAP_MODE_STREAMING:
7152 		if (!disable_ertm)
7153 			break;
7154 		/* fall through */
7155 	default:
7156 		err = -EOPNOTSUPP;
7157 		goto done;
7158 	}
7159 
7160 	switch (chan->state) {
7161 	case BT_CONNECT:
7162 	case BT_CONNECT2:
7163 	case BT_CONFIG:
7164 		/* Already connecting */
7165 		err = 0;
7166 		goto done;
7167 
7168 	case BT_CONNECTED:
7169 		/* Already connected */
7170 		err = -EISCONN;
7171 		goto done;
7172 
7173 	case BT_OPEN:
7174 	case BT_BOUND:
7175 		/* Can connect */
7176 		break;
7177 
7178 	default:
7179 		err = -EBADFD;
7180 		goto done;
7181 	}
7182 
7183 	/* Set destination address and psm */
7184 	bacpy(&chan->dst, dst);
7185 	chan->dst_type = dst_type;
7186 
7187 	chan->psm = psm;
7188 	chan->dcid = cid;
7189 
7190 	if (bdaddr_type_is_le(dst_type)) {
7191 		u8 role;
7192 
7193 		/* Convert from L2CAP channel address type to HCI address type
7194 		 */
7195 		if (dst_type == BDADDR_LE_PUBLIC)
7196 			dst_type = ADDR_LE_DEV_PUBLIC;
7197 		else
7198 			dst_type = ADDR_LE_DEV_RANDOM;
7199 
7200 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7201 			role = HCI_ROLE_SLAVE;
7202 		else
7203 			role = HCI_ROLE_MASTER;
7204 
7205 		hcon = hci_connect_le_scan(hdev, dst, dst_type,
7206 					   chan->sec_level,
7207 					   HCI_LE_CONN_TIMEOUT,
7208 					   role);
7209 	} else {
7210 		u8 auth_type = l2cap_get_auth_type(chan);
7211 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7212 	}
7213 
7214 	if (IS_ERR(hcon)) {
7215 		err = PTR_ERR(hcon);
7216 		goto done;
7217 	}
7218 
7219 	conn = l2cap_conn_add(hcon);
7220 	if (!conn) {
7221 		hci_conn_drop(hcon);
7222 		err = -ENOMEM;
7223 		goto done;
7224 	}
7225 
7226 	mutex_lock(&conn->chan_lock);
7227 	l2cap_chan_lock(chan);
7228 
7229 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7230 		hci_conn_drop(hcon);
7231 		err = -EBUSY;
7232 		goto chan_unlock;
7233 	}
7234 
7235 	/* Update source addr of the socket */
7236 	bacpy(&chan->src, &hcon->src);
7237 	chan->src_type = bdaddr_src_type(hcon);
7238 
7239 	__l2cap_chan_add(conn, chan);
7240 
7241 	/* l2cap_chan_add takes its own ref so we can drop this one */
7242 	hci_conn_drop(hcon);
7243 
7244 	l2cap_state_change(chan, BT_CONNECT);
7245 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7246 
7247 	/* Release chan->sport so that it can be reused by other
7248 	 * sockets (as it's only used for listening sockets).
7249 	 */
7250 	write_lock(&chan_list_lock);
7251 	chan->sport = 0;
7252 	write_unlock(&chan_list_lock);
7253 
7254 	if (hcon->state == BT_CONNECTED) {
7255 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7256 			__clear_chan_timer(chan);
7257 			if (l2cap_chan_check_security(chan, true))
7258 				l2cap_state_change(chan, BT_CONNECTED);
7259 		} else
7260 			l2cap_do_start(chan);
7261 	}
7262 
7263 	err = 0;
7264 
7265 chan_unlock:
7266 	l2cap_chan_unlock(chan);
7267 	mutex_unlock(&conn->chan_lock);
7268 done:
7269 	hci_dev_unlock(hdev);
7270 	hci_dev_put(hdev);
7271 	return err;
7272 }
7273 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7274 
7275 /* ---- L2CAP interface with lower layer (HCI) ---- */
7276 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7277 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7278 {
7279 	int exact = 0, lm1 = 0, lm2 = 0;
7280 	struct l2cap_chan *c;
7281 
7282 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7283 
7284 	/* Find listening sockets and check their link_mode */
7285 	read_lock(&chan_list_lock);
7286 	list_for_each_entry(c, &chan_list, global_l) {
7287 		if (c->state != BT_LISTEN)
7288 			continue;
7289 
7290 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7291 			lm1 |= HCI_LM_ACCEPT;
7292 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7293 				lm1 |= HCI_LM_MASTER;
7294 			exact++;
7295 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7296 			lm2 |= HCI_LM_ACCEPT;
7297 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7298 				lm2 |= HCI_LM_MASTER;
7299 		}
7300 	}
7301 	read_unlock(&chan_list_lock);
7302 
7303 	return exact ? lm1 : lm2;
7304 }
7305 
7306 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7307  * from an existing channel in the list or from the beginning of the
7308  * global list (by passing NULL as first parameter).
7309  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7310 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7311 						  struct hci_conn *hcon)
7312 {
7313 	u8 src_type = bdaddr_src_type(hcon);
7314 
7315 	read_lock(&chan_list_lock);
7316 
7317 	if (c)
7318 		c = list_next_entry(c, global_l);
7319 	else
7320 		c = list_entry(chan_list.next, typeof(*c), global_l);
7321 
7322 	list_for_each_entry_from(c, &chan_list, global_l) {
7323 		if (c->chan_type != L2CAP_CHAN_FIXED)
7324 			continue;
7325 		if (c->state != BT_LISTEN)
7326 			continue;
7327 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7328 			continue;
7329 		if (src_type != c->src_type)
7330 			continue;
7331 
7332 		l2cap_chan_hold(c);
7333 		read_unlock(&chan_list_lock);
7334 		return c;
7335 	}
7336 
7337 	read_unlock(&chan_list_lock);
7338 
7339 	return NULL;
7340 }
7341 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7342 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7343 {
7344 	struct hci_dev *hdev = hcon->hdev;
7345 	struct l2cap_conn *conn;
7346 	struct l2cap_chan *pchan;
7347 	u8 dst_type;
7348 
7349 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7350 		return;
7351 
7352 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7353 
7354 	if (status) {
7355 		l2cap_conn_del(hcon, bt_to_errno(status));
7356 		return;
7357 	}
7358 
7359 	conn = l2cap_conn_add(hcon);
7360 	if (!conn)
7361 		return;
7362 
7363 	dst_type = bdaddr_dst_type(hcon);
7364 
7365 	/* If device is blocked, do not create channels for it */
7366 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7367 		return;
7368 
7369 	/* Find fixed channels and notify them of the new connection. We
7370 	 * use multiple individual lookups, continuing each time where
7371 	 * we left off, because the list lock would prevent calling the
7372 	 * potentially sleeping l2cap_chan_lock() function.
7373 	 */
7374 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7375 	while (pchan) {
7376 		struct l2cap_chan *chan, *next;
7377 
7378 		/* Client fixed channels should override server ones */
7379 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7380 			goto next;
7381 
7382 		l2cap_chan_lock(pchan);
7383 		chan = pchan->ops->new_connection(pchan);
7384 		if (chan) {
7385 			bacpy(&chan->src, &hcon->src);
7386 			bacpy(&chan->dst, &hcon->dst);
7387 			chan->src_type = bdaddr_src_type(hcon);
7388 			chan->dst_type = dst_type;
7389 
7390 			__l2cap_chan_add(conn, chan);
7391 		}
7392 
7393 		l2cap_chan_unlock(pchan);
7394 next:
7395 		next = l2cap_global_fixed_chan(pchan, hcon);
7396 		l2cap_chan_put(pchan);
7397 		pchan = next;
7398 	}
7399 
7400 	l2cap_conn_ready(conn);
7401 }
7402 
l2cap_disconn_ind(struct hci_conn * hcon)7403 int l2cap_disconn_ind(struct hci_conn *hcon)
7404 {
7405 	struct l2cap_conn *conn = hcon->l2cap_data;
7406 
7407 	BT_DBG("hcon %p", hcon);
7408 
7409 	if (!conn)
7410 		return HCI_ERROR_REMOTE_USER_TERM;
7411 	return conn->disc_reason;
7412 }
7413 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7414 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7415 {
7416 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7417 		return;
7418 
7419 	BT_DBG("hcon %p reason %d", hcon, reason);
7420 
7421 	l2cap_conn_del(hcon, bt_to_errno(reason));
7422 }
7423 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7424 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7425 {
7426 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7427 		return;
7428 
7429 	if (encrypt == 0x00) {
7430 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7431 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7432 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7433 			   chan->sec_level == BT_SECURITY_FIPS)
7434 			l2cap_chan_close(chan, ECONNREFUSED);
7435 	} else {
7436 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7437 			__clear_chan_timer(chan);
7438 	}
7439 }
7440 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7441 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7442 {
7443 	struct l2cap_conn *conn = hcon->l2cap_data;
7444 	struct l2cap_chan *chan;
7445 
7446 	if (!conn)
7447 		return;
7448 
7449 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7450 
7451 	mutex_lock(&conn->chan_lock);
7452 
7453 	list_for_each_entry(chan, &conn->chan_l, list) {
7454 		l2cap_chan_lock(chan);
7455 
7456 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7457 		       state_to_string(chan->state));
7458 
7459 		if (chan->scid == L2CAP_CID_A2MP) {
7460 			l2cap_chan_unlock(chan);
7461 			continue;
7462 		}
7463 
7464 		if (!status && encrypt)
7465 			chan->sec_level = hcon->sec_level;
7466 
7467 		if (!__l2cap_no_conn_pending(chan)) {
7468 			l2cap_chan_unlock(chan);
7469 			continue;
7470 		}
7471 
7472 		if (!status && (chan->state == BT_CONNECTED ||
7473 				chan->state == BT_CONFIG)) {
7474 			chan->ops->resume(chan);
7475 			l2cap_check_encryption(chan, encrypt);
7476 			l2cap_chan_unlock(chan);
7477 			continue;
7478 		}
7479 
7480 		if (chan->state == BT_CONNECT) {
7481 			if (!status && l2cap_check_enc_key_size(hcon))
7482 				l2cap_start_connection(chan);
7483 			else
7484 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7485 		} else if (chan->state == BT_CONNECT2 &&
7486 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7487 			struct l2cap_conn_rsp rsp;
7488 			__u16 res, stat;
7489 
7490 			if (!status && l2cap_check_enc_key_size(hcon)) {
7491 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7492 					res = L2CAP_CR_PEND;
7493 					stat = L2CAP_CS_AUTHOR_PEND;
7494 					chan->ops->defer(chan);
7495 				} else {
7496 					l2cap_state_change(chan, BT_CONFIG);
7497 					res = L2CAP_CR_SUCCESS;
7498 					stat = L2CAP_CS_NO_INFO;
7499 				}
7500 			} else {
7501 				l2cap_state_change(chan, BT_DISCONN);
7502 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7503 				res = L2CAP_CR_SEC_BLOCK;
7504 				stat = L2CAP_CS_NO_INFO;
7505 			}
7506 
7507 			rsp.scid   = cpu_to_le16(chan->dcid);
7508 			rsp.dcid   = cpu_to_le16(chan->scid);
7509 			rsp.result = cpu_to_le16(res);
7510 			rsp.status = cpu_to_le16(stat);
7511 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7512 				       sizeof(rsp), &rsp);
7513 
7514 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7515 			    res == L2CAP_CR_SUCCESS) {
7516 				char buf[128];
7517 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7518 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7519 					       L2CAP_CONF_REQ,
7520 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7521 					       buf);
7522 				chan->num_conf_req++;
7523 			}
7524 		}
7525 
7526 		l2cap_chan_unlock(chan);
7527 	}
7528 
7529 	mutex_unlock(&conn->chan_lock);
7530 }
7531 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7532 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7533 {
7534 	struct l2cap_conn *conn = hcon->l2cap_data;
7535 	struct l2cap_hdr *hdr;
7536 	int len;
7537 
7538 	/* For AMP controller do not create l2cap conn */
7539 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7540 		goto drop;
7541 
7542 	if (!conn)
7543 		conn = l2cap_conn_add(hcon);
7544 
7545 	if (!conn)
7546 		goto drop;
7547 
7548 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7549 
7550 	switch (flags) {
7551 	case ACL_START:
7552 	case ACL_START_NO_FLUSH:
7553 	case ACL_COMPLETE:
7554 		if (conn->rx_len) {
7555 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7556 			kfree_skb(conn->rx_skb);
7557 			conn->rx_skb = NULL;
7558 			conn->rx_len = 0;
7559 			l2cap_conn_unreliable(conn, ECOMM);
7560 		}
7561 
7562 		/* Start fragment always begin with Basic L2CAP header */
7563 		if (skb->len < L2CAP_HDR_SIZE) {
7564 			BT_ERR("Frame is too short (len %d)", skb->len);
7565 			l2cap_conn_unreliable(conn, ECOMM);
7566 			goto drop;
7567 		}
7568 
7569 		hdr = (struct l2cap_hdr *) skb->data;
7570 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7571 
7572 		if (len == skb->len) {
7573 			/* Complete frame received */
7574 			l2cap_recv_frame(conn, skb);
7575 			return;
7576 		}
7577 
7578 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7579 
7580 		if (skb->len > len) {
7581 			BT_ERR("Frame is too long (len %d, expected len %d)",
7582 			       skb->len, len);
7583 			l2cap_conn_unreliable(conn, ECOMM);
7584 			goto drop;
7585 		}
7586 
7587 		/* Allocate skb for the complete frame (with header) */
7588 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7589 		if (!conn->rx_skb)
7590 			goto drop;
7591 
7592 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7593 					  skb->len);
7594 		conn->rx_len = len - skb->len;
7595 		break;
7596 
7597 	case ACL_CONT:
7598 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7599 
7600 		if (!conn->rx_len) {
7601 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7602 			l2cap_conn_unreliable(conn, ECOMM);
7603 			goto drop;
7604 		}
7605 
7606 		if (skb->len > conn->rx_len) {
7607 			BT_ERR("Fragment is too long (len %d, expected %d)",
7608 			       skb->len, conn->rx_len);
7609 			kfree_skb(conn->rx_skb);
7610 			conn->rx_skb = NULL;
7611 			conn->rx_len = 0;
7612 			l2cap_conn_unreliable(conn, ECOMM);
7613 			goto drop;
7614 		}
7615 
7616 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7617 					  skb->len);
7618 		conn->rx_len -= skb->len;
7619 
7620 		if (!conn->rx_len) {
7621 			/* Complete frame received. l2cap_recv_frame
7622 			 * takes ownership of the skb so set the global
7623 			 * rx_skb pointer to NULL first.
7624 			 */
7625 			struct sk_buff *rx_skb = conn->rx_skb;
7626 			conn->rx_skb = NULL;
7627 			l2cap_recv_frame(conn, rx_skb);
7628 		}
7629 		break;
7630 	}
7631 
7632 drop:
7633 	kfree_skb(skb);
7634 }
7635 
7636 static struct hci_cb l2cap_cb = {
7637 	.name		= "L2CAP",
7638 	.connect_cfm	= l2cap_connect_cfm,
7639 	.disconn_cfm	= l2cap_disconn_cfm,
7640 	.security_cfm	= l2cap_security_cfm,
7641 };
7642 
l2cap_debugfs_show(struct seq_file * f,void * p)7643 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7644 {
7645 	struct l2cap_chan *c;
7646 
7647 	read_lock(&chan_list_lock);
7648 
7649 	list_for_each_entry(c, &chan_list, global_l) {
7650 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7651 			   &c->src, c->src_type, &c->dst, c->dst_type,
7652 			   c->state, __le16_to_cpu(c->psm),
7653 			   c->scid, c->dcid, c->imtu, c->omtu,
7654 			   c->sec_level, c->mode);
7655 	}
7656 
7657 	read_unlock(&chan_list_lock);
7658 
7659 	return 0;
7660 }
7661 
l2cap_debugfs_open(struct inode * inode,struct file * file)7662 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7663 {
7664 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7665 }
7666 
7667 static const struct file_operations l2cap_debugfs_fops = {
7668 	.open		= l2cap_debugfs_open,
7669 	.read		= seq_read,
7670 	.llseek		= seq_lseek,
7671 	.release	= single_release,
7672 };
7673 
7674 static struct dentry *l2cap_debugfs;
7675 
l2cap_init(void)7676 int __init l2cap_init(void)
7677 {
7678 	int err;
7679 
7680 	err = l2cap_init_sockets();
7681 	if (err < 0)
7682 		return err;
7683 
7684 	hci_register_cb(&l2cap_cb);
7685 
7686 	if (IS_ERR_OR_NULL(bt_debugfs))
7687 		return 0;
7688 
7689 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7690 					    NULL, &l2cap_debugfs_fops);
7691 
7692 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7693 			   &le_max_credits);
7694 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7695 			   &le_default_mps);
7696 
7697 	return 0;
7698 }
7699 
l2cap_exit(void)7700 void l2cap_exit(void)
7701 {
7702 	debugfs_remove(l2cap_debugfs);
7703 	hci_unregister_cb(&l2cap_cb);
7704 	l2cap_cleanup_sockets();
7705 }
7706 
7707 module_param(disable_ertm, bool, 0644);
7708 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7709