• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 				       u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 			   void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 		     struct sk_buff_head *skbs, u8 event);
66 
bdaddr_type(u8 link_type,u8 bdaddr_type)67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 {
69 	if (link_type == LE_LINK) {
70 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 			return BDADDR_LE_PUBLIC;
72 		else
73 			return BDADDR_LE_RANDOM;
74 	}
75 
76 	return BDADDR_BREDR;
77 }
78 
bdaddr_src_type(struct hci_conn * hcon)79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 {
81 	return bdaddr_type(hcon->type, hcon->src_type);
82 }
83 
bdaddr_dst_type(struct hci_conn * hcon)84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 {
86 	return bdaddr_type(hcon->type, hcon->dst_type);
87 }
88 
89 /* ---- L2CAP channels ---- */
90 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
92 						   u16 cid)
93 {
94 	struct l2cap_chan *c;
95 
96 	list_for_each_entry(c, &conn->chan_l, list) {
97 		if (c->dcid == cid)
98 			return c;
99 	}
100 	return NULL;
101 }
102 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 						   u16 cid)
105 {
106 	struct l2cap_chan *c;
107 
108 	list_for_each_entry(c, &conn->chan_l, list) {
109 		if (c->scid == cid)
110 			return c;
111 	}
112 	return NULL;
113 }
114 
115 /* Find channel with given SCID.
116  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	mutex_lock(&conn->chan_lock);
123 	c = __l2cap_get_chan_by_scid(conn, cid);
124 	if (c)
125 		l2cap_chan_lock(c);
126 	mutex_unlock(&conn->chan_lock);
127 
128 	return c;
129 }
130 
131 /* Find channel with given DCID.
132  * Returns locked channel.
133  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 						 u16 cid)
136 {
137 	struct l2cap_chan *c;
138 
139 	mutex_lock(&conn->chan_lock);
140 	c = __l2cap_get_chan_by_dcid(conn, cid);
141 	if (c)
142 		l2cap_chan_lock(c);
143 	mutex_unlock(&conn->chan_lock);
144 
145 	return c;
146 }
147 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 						    u8 ident)
150 {
151 	struct l2cap_chan *c;
152 
153 	list_for_each_entry(c, &conn->chan_l, list) {
154 		if (c->ident == ident)
155 			return c;
156 	}
157 	return NULL;
158 }
159 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 						  u8 ident)
162 {
163 	struct l2cap_chan *c;
164 
165 	mutex_lock(&conn->chan_lock);
166 	c = __l2cap_get_chan_by_ident(conn, ident);
167 	if (c)
168 		l2cap_chan_lock(c);
169 	mutex_unlock(&conn->chan_lock);
170 
171 	return c;
172 }
173 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src)174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 {
176 	struct l2cap_chan *c;
177 
178 	list_for_each_entry(c, &chan_list, global_l) {
179 		if (c->sport == psm && !bacmp(&c->src, src))
180 			return c;
181 	}
182 	return NULL;
183 }
184 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 {
187 	int err;
188 
189 	write_lock(&chan_list_lock);
190 
191 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
192 		err = -EADDRINUSE;
193 		goto done;
194 	}
195 
196 	if (psm) {
197 		chan->psm = psm;
198 		chan->sport = psm;
199 		err = 0;
200 	} else {
201 		u16 p, start, end, incr;
202 
203 		if (chan->src_type == BDADDR_BREDR) {
204 			start = L2CAP_PSM_DYN_START;
205 			end = L2CAP_PSM_AUTO_END;
206 			incr = 2;
207 		} else {
208 			start = L2CAP_PSM_LE_DYN_START;
209 			end = L2CAP_PSM_LE_DYN_END;
210 			incr = 1;
211 		}
212 
213 		err = -EINVAL;
214 		for (p = start; p <= end; p += incr)
215 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 				chan->psm   = cpu_to_le16(p);
217 				chan->sport = cpu_to_le16(p);
218 				err = 0;
219 				break;
220 			}
221 	}
222 
223 done:
224 	write_unlock(&chan_list_lock);
225 	return err;
226 }
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
228 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)229 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
230 {
231 	write_lock(&chan_list_lock);
232 
233 	/* Override the defaults (which are for conn-oriented) */
234 	chan->omtu = L2CAP_DEFAULT_MTU;
235 	chan->chan_type = L2CAP_CHAN_FIXED;
236 
237 	chan->scid = scid;
238 
239 	write_unlock(&chan_list_lock);
240 
241 	return 0;
242 }
243 
l2cap_alloc_cid(struct l2cap_conn * conn)244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
245 {
246 	u16 cid, dyn_end;
247 
248 	if (conn->hcon->type == LE_LINK)
249 		dyn_end = L2CAP_CID_LE_DYN_END;
250 	else
251 		dyn_end = L2CAP_CID_DYN_END;
252 
253 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 		if (!__l2cap_get_chan_by_scid(conn, cid))
255 			return cid;
256 	}
257 
258 	return 0;
259 }
260 
l2cap_state_change(struct l2cap_chan * chan,int state)261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
262 {
263 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 	       state_to_string(state));
265 
266 	chan->state = state;
267 	chan->ops->state_change(chan, state, 0);
268 }
269 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 						int state, int err)
272 {
273 	chan->state = state;
274 	chan->ops->state_change(chan, chan->state, err);
275 }
276 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
278 {
279 	chan->ops->state_change(chan, chan->state, err);
280 }
281 
__set_retrans_timer(struct l2cap_chan * chan)282 static void __set_retrans_timer(struct l2cap_chan *chan)
283 {
284 	if (!delayed_work_pending(&chan->monitor_timer) &&
285 	    chan->retrans_timeout) {
286 		l2cap_set_timer(chan, &chan->retrans_timer,
287 				msecs_to_jiffies(chan->retrans_timeout));
288 	}
289 }
290 
__set_monitor_timer(struct l2cap_chan * chan)291 static void __set_monitor_timer(struct l2cap_chan *chan)
292 {
293 	__clear_retrans_timer(chan);
294 	if (chan->monitor_timeout) {
295 		l2cap_set_timer(chan, &chan->monitor_timer,
296 				msecs_to_jiffies(chan->monitor_timeout));
297 	}
298 }
299 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
301 					       u16 seq)
302 {
303 	struct sk_buff *skb;
304 
305 	skb_queue_walk(head, skb) {
306 		if (bt_cb(skb)->l2cap.txseq == seq)
307 			return skb;
308 	}
309 
310 	return NULL;
311 }
312 
313 /* ---- L2CAP sequence number lists ---- */
314 
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316  * SREJ requests that are received and for frames that are to be
317  * retransmitted. These seq_list functions implement a singly-linked
318  * list in an array, where membership in the list can also be checked
319  * in constant time. Items can also be added to the tail of the list
320  * and removed from the head in constant time, without further memory
321  * allocs or frees.
322  */
323 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
325 {
326 	size_t alloc_size, i;
327 
328 	/* Allocated size is a power of 2 to map sequence numbers
329 	 * (which may be up to 14 bits) in to a smaller array that is
330 	 * sized for the negotiated ERTM transmit windows.
331 	 */
332 	alloc_size = roundup_pow_of_two(size);
333 
334 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
335 	if (!seq_list->list)
336 		return -ENOMEM;
337 
338 	seq_list->mask = alloc_size - 1;
339 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 	for (i = 0; i < alloc_size; i++)
342 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343 
344 	return 0;
345 }
346 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
348 {
349 	kfree(seq_list->list);
350 }
351 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
353 					   u16 seq)
354 {
355 	/* Constant-time check for list membership */
356 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
357 }
358 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 {
361 	u16 seq = seq_list->head;
362 	u16 mask = seq_list->mask;
363 
364 	seq_list->head = seq_list->list[seq & mask];
365 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
366 
367 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
370 	}
371 
372 	return seq;
373 }
374 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 {
377 	u16 i;
378 
379 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 		return;
381 
382 	for (i = 0; i <= seq_list->mask; i++)
383 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
384 
385 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
387 }
388 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
390 {
391 	u16 mask = seq_list->mask;
392 
393 	/* All appends happen in constant time */
394 
395 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 		return;
397 
398 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 		seq_list->head = seq;
400 	else
401 		seq_list->list[seq_list->tail & mask] = seq;
402 
403 	seq_list->tail = seq;
404 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
405 }
406 
l2cap_chan_timeout(struct work_struct * work)407 static void l2cap_chan_timeout(struct work_struct *work)
408 {
409 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
410 					       chan_timer.work);
411 	struct l2cap_conn *conn = chan->conn;
412 	int reason;
413 
414 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
415 
416 	mutex_lock(&conn->chan_lock);
417 	l2cap_chan_lock(chan);
418 
419 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
420 		reason = ECONNREFUSED;
421 	else if (chan->state == BT_CONNECT &&
422 		 chan->sec_level != BT_SECURITY_SDP)
423 		reason = ECONNREFUSED;
424 	else
425 		reason = ETIMEDOUT;
426 
427 	l2cap_chan_close(chan, reason);
428 
429 	l2cap_chan_unlock(chan);
430 
431 	chan->ops->close(chan);
432 	mutex_unlock(&conn->chan_lock);
433 
434 	l2cap_chan_put(chan);
435 }
436 
l2cap_chan_create(void)437 struct l2cap_chan *l2cap_chan_create(void)
438 {
439 	struct l2cap_chan *chan;
440 
441 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 	if (!chan)
443 		return NULL;
444 
445 	mutex_init(&chan->lock);
446 
447 	/* Set default lock nesting level */
448 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
449 
450 	write_lock(&chan_list_lock);
451 	list_add(&chan->global_l, &chan_list);
452 	write_unlock(&chan_list_lock);
453 
454 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
455 
456 	chan->state = BT_OPEN;
457 
458 	kref_init(&chan->kref);
459 
460 	/* This flag is cleared in l2cap_chan_ready() */
461 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
462 
463 	BT_DBG("chan %p", chan);
464 
465 	return chan;
466 }
467 EXPORT_SYMBOL_GPL(l2cap_chan_create);
468 
l2cap_chan_destroy(struct kref * kref)469 static void l2cap_chan_destroy(struct kref *kref)
470 {
471 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
472 
473 	BT_DBG("chan %p", chan);
474 
475 	write_lock(&chan_list_lock);
476 	list_del(&chan->global_l);
477 	write_unlock(&chan_list_lock);
478 
479 	kfree(chan);
480 }
481 
l2cap_chan_hold(struct l2cap_chan * c)482 void l2cap_chan_hold(struct l2cap_chan *c)
483 {
484 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
485 
486 	kref_get(&c->kref);
487 }
488 
l2cap_chan_put(struct l2cap_chan * c)489 void l2cap_chan_put(struct l2cap_chan *c)
490 {
491 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
492 
493 	kref_put(&c->kref, l2cap_chan_destroy);
494 }
495 EXPORT_SYMBOL_GPL(l2cap_chan_put);
496 
l2cap_chan_set_defaults(struct l2cap_chan * chan)497 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
498 {
499 	chan->fcs  = L2CAP_FCS_CRC16;
500 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->remote_max_tx = chan->max_tx;
504 	chan->remote_tx_win = chan->tx_win;
505 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
506 	chan->sec_level = BT_SECURITY_LOW;
507 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 	chan->conf_state = 0;
511 
512 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
513 }
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
515 
l2cap_le_flowctl_init(struct l2cap_chan * chan)516 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
517 {
518 	chan->sdu = NULL;
519 	chan->sdu_last_frag = NULL;
520 	chan->sdu_len = 0;
521 	chan->tx_credits = 0;
522 	chan->rx_credits = le_max_credits;
523 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
524 
525 	skb_queue_head_init(&chan->tx_q);
526 }
527 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)528 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
529 {
530 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
531 	       __le16_to_cpu(chan->psm), chan->dcid);
532 
533 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
534 
535 	chan->conn = conn;
536 
537 	switch (chan->chan_type) {
538 	case L2CAP_CHAN_CONN_ORIENTED:
539 		/* Alloc CID for connection-oriented socket */
540 		chan->scid = l2cap_alloc_cid(conn);
541 		if (conn->hcon->type == ACL_LINK)
542 			chan->omtu = L2CAP_DEFAULT_MTU;
543 		break;
544 
545 	case L2CAP_CHAN_CONN_LESS:
546 		/* Connectionless socket */
547 		chan->scid = L2CAP_CID_CONN_LESS;
548 		chan->dcid = L2CAP_CID_CONN_LESS;
549 		chan->omtu = L2CAP_DEFAULT_MTU;
550 		break;
551 
552 	case L2CAP_CHAN_FIXED:
553 		/* Caller will set CID and CID specific MTU values */
554 		break;
555 
556 	default:
557 		/* Raw socket can send/recv signalling messages only */
558 		chan->scid = L2CAP_CID_SIGNALING;
559 		chan->dcid = L2CAP_CID_SIGNALING;
560 		chan->omtu = L2CAP_DEFAULT_MTU;
561 	}
562 
563 	chan->local_id		= L2CAP_BESTEFFORT_ID;
564 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
565 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
566 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
567 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
568 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
569 
570 	l2cap_chan_hold(chan);
571 
572 	/* Only keep a reference for fixed channels if they requested it */
573 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 		hci_conn_hold(conn->hcon);
576 
577 	list_add(&chan->list, &conn->chan_l);
578 }
579 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)580 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
581 {
582 	mutex_lock(&conn->chan_lock);
583 	__l2cap_chan_add(conn, chan);
584 	mutex_unlock(&conn->chan_lock);
585 }
586 
l2cap_chan_del(struct l2cap_chan * chan,int err)587 void l2cap_chan_del(struct l2cap_chan *chan, int err)
588 {
589 	struct l2cap_conn *conn = chan->conn;
590 
591 	__clear_chan_timer(chan);
592 
593 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 	       state_to_string(chan->state));
595 
596 	chan->ops->teardown(chan, err);
597 
598 	if (conn) {
599 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
600 		/* Delete from channel list */
601 		list_del(&chan->list);
602 
603 		l2cap_chan_put(chan);
604 
605 		chan->conn = NULL;
606 
607 		/* Reference was only held for non-fixed channels or
608 		 * fixed channels that explicitly requested it using the
609 		 * FLAG_HOLD_HCI_CONN flag.
610 		 */
611 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 			hci_conn_drop(conn->hcon);
614 
615 		if (mgr && mgr->bredr_chan == chan)
616 			mgr->bredr_chan = NULL;
617 	}
618 
619 	if (chan->hs_hchan) {
620 		struct hci_chan *hs_hchan = chan->hs_hchan;
621 
622 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 		amp_disconnect_logical_link(hs_hchan);
624 	}
625 
626 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
627 		return;
628 
629 	switch(chan->mode) {
630 	case L2CAP_MODE_BASIC:
631 		break;
632 
633 	case L2CAP_MODE_LE_FLOWCTL:
634 		skb_queue_purge(&chan->tx_q);
635 		break;
636 
637 	case L2CAP_MODE_ERTM:
638 		__clear_retrans_timer(chan);
639 		__clear_monitor_timer(chan);
640 		__clear_ack_timer(chan);
641 
642 		skb_queue_purge(&chan->srej_q);
643 
644 		l2cap_seq_list_free(&chan->srej_list);
645 		l2cap_seq_list_free(&chan->retrans_list);
646 
647 		/* fall through */
648 
649 	case L2CAP_MODE_STREAMING:
650 		skb_queue_purge(&chan->tx_q);
651 		break;
652 	}
653 
654 	return;
655 }
656 EXPORT_SYMBOL_GPL(l2cap_chan_del);
657 
l2cap_conn_update_id_addr(struct work_struct * work)658 static void l2cap_conn_update_id_addr(struct work_struct *work)
659 {
660 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 					       id_addr_update_work);
662 	struct hci_conn *hcon = conn->hcon;
663 	struct l2cap_chan *chan;
664 
665 	mutex_lock(&conn->chan_lock);
666 
667 	list_for_each_entry(chan, &conn->chan_l, list) {
668 		l2cap_chan_lock(chan);
669 		bacpy(&chan->dst, &hcon->dst);
670 		chan->dst_type = bdaddr_dst_type(hcon);
671 		l2cap_chan_unlock(chan);
672 	}
673 
674 	mutex_unlock(&conn->chan_lock);
675 }
676 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)677 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
678 {
679 	struct l2cap_conn *conn = chan->conn;
680 	struct l2cap_le_conn_rsp rsp;
681 	u16 result;
682 
683 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 		result = L2CAP_CR_AUTHORIZATION;
685 	else
686 		result = L2CAP_CR_BAD_PSM;
687 
688 	l2cap_state_change(chan, BT_DISCONN);
689 
690 	rsp.dcid    = cpu_to_le16(chan->scid);
691 	rsp.mtu     = cpu_to_le16(chan->imtu);
692 	rsp.mps     = cpu_to_le16(chan->mps);
693 	rsp.credits = cpu_to_le16(chan->rx_credits);
694 	rsp.result  = cpu_to_le16(result);
695 
696 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
697 		       &rsp);
698 }
699 
l2cap_chan_connect_reject(struct l2cap_chan * chan)700 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
701 {
702 	struct l2cap_conn *conn = chan->conn;
703 	struct l2cap_conn_rsp rsp;
704 	u16 result;
705 
706 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 		result = L2CAP_CR_SEC_BLOCK;
708 	else
709 		result = L2CAP_CR_BAD_PSM;
710 
711 	l2cap_state_change(chan, BT_DISCONN);
712 
713 	rsp.scid   = cpu_to_le16(chan->dcid);
714 	rsp.dcid   = cpu_to_le16(chan->scid);
715 	rsp.result = cpu_to_le16(result);
716 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
717 
718 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
719 }
720 
l2cap_chan_close(struct l2cap_chan * chan,int reason)721 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
722 {
723 	struct l2cap_conn *conn = chan->conn;
724 
725 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
726 
727 	switch (chan->state) {
728 	case BT_LISTEN:
729 		chan->ops->teardown(chan, 0);
730 		break;
731 
732 	case BT_CONNECTED:
733 	case BT_CONFIG:
734 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
735 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
736 			l2cap_send_disconn_req(chan, reason);
737 		} else
738 			l2cap_chan_del(chan, reason);
739 		break;
740 
741 	case BT_CONNECT2:
742 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 			if (conn->hcon->type == ACL_LINK)
744 				l2cap_chan_connect_reject(chan);
745 			else if (conn->hcon->type == LE_LINK)
746 				l2cap_chan_le_connect_reject(chan);
747 		}
748 
749 		l2cap_chan_del(chan, reason);
750 		break;
751 
752 	case BT_CONNECT:
753 	case BT_DISCONN:
754 		l2cap_chan_del(chan, reason);
755 		break;
756 
757 	default:
758 		chan->ops->teardown(chan, 0);
759 		break;
760 	}
761 }
762 EXPORT_SYMBOL(l2cap_chan_close);
763 
l2cap_get_auth_type(struct l2cap_chan * chan)764 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
765 {
766 	switch (chan->chan_type) {
767 	case L2CAP_CHAN_RAW:
768 		switch (chan->sec_level) {
769 		case BT_SECURITY_HIGH:
770 		case BT_SECURITY_FIPS:
771 			return HCI_AT_DEDICATED_BONDING_MITM;
772 		case BT_SECURITY_MEDIUM:
773 			return HCI_AT_DEDICATED_BONDING;
774 		default:
775 			return HCI_AT_NO_BONDING;
776 		}
777 		break;
778 	case L2CAP_CHAN_CONN_LESS:
779 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
780 			if (chan->sec_level == BT_SECURITY_LOW)
781 				chan->sec_level = BT_SECURITY_SDP;
782 		}
783 		if (chan->sec_level == BT_SECURITY_HIGH ||
784 		    chan->sec_level == BT_SECURITY_FIPS)
785 			return HCI_AT_NO_BONDING_MITM;
786 		else
787 			return HCI_AT_NO_BONDING;
788 		break;
789 	case L2CAP_CHAN_CONN_ORIENTED:
790 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
791 			if (chan->sec_level == BT_SECURITY_LOW)
792 				chan->sec_level = BT_SECURITY_SDP;
793 
794 			if (chan->sec_level == BT_SECURITY_HIGH ||
795 			    chan->sec_level == BT_SECURITY_FIPS)
796 				return HCI_AT_NO_BONDING_MITM;
797 			else
798 				return HCI_AT_NO_BONDING;
799 		}
800 		/* fall through */
801 	default:
802 		switch (chan->sec_level) {
803 		case BT_SECURITY_HIGH:
804 		case BT_SECURITY_FIPS:
805 			return HCI_AT_GENERAL_BONDING_MITM;
806 		case BT_SECURITY_MEDIUM:
807 			return HCI_AT_GENERAL_BONDING;
808 		default:
809 			return HCI_AT_NO_BONDING;
810 		}
811 		break;
812 	}
813 }
814 
815 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)816 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
817 {
818 	struct l2cap_conn *conn = chan->conn;
819 	__u8 auth_type;
820 
821 	if (conn->hcon->type == LE_LINK)
822 		return smp_conn_security(conn->hcon, chan->sec_level);
823 
824 	auth_type = l2cap_get_auth_type(chan);
825 
826 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
827 				 initiator);
828 }
829 
l2cap_get_ident(struct l2cap_conn * conn)830 static u8 l2cap_get_ident(struct l2cap_conn *conn)
831 {
832 	u8 id;
833 
834 	/* Get next available identificator.
835 	 *    1 - 128 are used by kernel.
836 	 *  129 - 199 are reserved.
837 	 *  200 - 254 are used by utilities like l2ping, etc.
838 	 */
839 
840 	mutex_lock(&conn->ident_lock);
841 
842 	if (++conn->tx_ident > 128)
843 		conn->tx_ident = 1;
844 
845 	id = conn->tx_ident;
846 
847 	mutex_unlock(&conn->ident_lock);
848 
849 	return id;
850 }
851 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)852 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
853 			   void *data)
854 {
855 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
856 	u8 flags;
857 
858 	BT_DBG("code 0x%2.2x", code);
859 
860 	if (!skb)
861 		return;
862 
863 	/* Use NO_FLUSH if supported or we have an LE link (which does
864 	 * not support auto-flushing packets) */
865 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 	    conn->hcon->type == LE_LINK)
867 		flags = ACL_START_NO_FLUSH;
868 	else
869 		flags = ACL_START;
870 
871 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
872 	skb->priority = HCI_PRIO_MAX;
873 
874 	hci_send_acl(conn->hchan, skb, flags);
875 }
876 
__chan_is_moving(struct l2cap_chan * chan)877 static bool __chan_is_moving(struct l2cap_chan *chan)
878 {
879 	return chan->move_state != L2CAP_MOVE_STABLE &&
880 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
881 }
882 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)883 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
884 {
885 	struct hci_conn *hcon = chan->conn->hcon;
886 	u16 flags;
887 
888 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
889 	       skb->priority);
890 
891 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
892 		if (chan->hs_hchan)
893 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
894 		else
895 			kfree_skb(skb);
896 
897 		return;
898 	}
899 
900 	/* Use NO_FLUSH for LE links (where this is the only option) or
901 	 * if the BR/EDR link supports it and flushing has not been
902 	 * explicitly requested (through FLAG_FLUSHABLE).
903 	 */
904 	if (hcon->type == LE_LINK ||
905 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 	     lmp_no_flush_capable(hcon->hdev)))
907 		flags = ACL_START_NO_FLUSH;
908 	else
909 		flags = ACL_START;
910 
911 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 	hci_send_acl(chan->conn->hchan, skb, flags);
913 }
914 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)915 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
916 {
917 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
919 
920 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
921 		/* S-Frame */
922 		control->sframe = 1;
923 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
925 
926 		control->sar = 0;
927 		control->txseq = 0;
928 	} else {
929 		/* I-Frame */
930 		control->sframe = 0;
931 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
933 
934 		control->poll = 0;
935 		control->super = 0;
936 	}
937 }
938 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)939 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
940 {
941 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
943 
944 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
945 		/* S-Frame */
946 		control->sframe = 1;
947 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
949 
950 		control->sar = 0;
951 		control->txseq = 0;
952 	} else {
953 		/* I-Frame */
954 		control->sframe = 0;
955 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
957 
958 		control->poll = 0;
959 		control->super = 0;
960 	}
961 }
962 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)963 static inline void __unpack_control(struct l2cap_chan *chan,
964 				    struct sk_buff *skb)
965 {
966 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 		__unpack_extended_control(get_unaligned_le32(skb->data),
968 					  &bt_cb(skb)->l2cap);
969 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
970 	} else {
971 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
972 					  &bt_cb(skb)->l2cap);
973 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
974 	}
975 }
976 
__pack_extended_control(struct l2cap_ctrl * control)977 static u32 __pack_extended_control(struct l2cap_ctrl *control)
978 {
979 	u32 packed;
980 
981 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
983 
984 	if (control->sframe) {
985 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
988 	} else {
989 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
991 	}
992 
993 	return packed;
994 }
995 
__pack_enhanced_control(struct l2cap_ctrl * control)996 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
997 {
998 	u16 packed;
999 
1000 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1002 
1003 	if (control->sframe) {
1004 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 		packed |= L2CAP_CTRL_FRAME_TYPE;
1007 	} else {
1008 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1010 	}
1011 
1012 	return packed;
1013 }
1014 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1015 static inline void __pack_control(struct l2cap_chan *chan,
1016 				  struct l2cap_ctrl *control,
1017 				  struct sk_buff *skb)
1018 {
1019 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 		put_unaligned_le32(__pack_extended_control(control),
1021 				   skb->data + L2CAP_HDR_SIZE);
1022 	} else {
1023 		put_unaligned_le16(__pack_enhanced_control(control),
1024 				   skb->data + L2CAP_HDR_SIZE);
1025 	}
1026 }
1027 
__ertm_hdr_size(struct l2cap_chan * chan)1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1029 {
1030 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 		return L2CAP_EXT_HDR_SIZE;
1032 	else
1033 		return L2CAP_ENH_HDR_SIZE;
1034 }
1035 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1036 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1037 					       u32 control)
1038 {
1039 	struct sk_buff *skb;
1040 	struct l2cap_hdr *lh;
1041 	int hlen = __ertm_hdr_size(chan);
1042 
1043 	if (chan->fcs == L2CAP_FCS_CRC16)
1044 		hlen += L2CAP_FCS_SIZE;
1045 
1046 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1047 
1048 	if (!skb)
1049 		return ERR_PTR(-ENOMEM);
1050 
1051 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1052 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1053 	lh->cid = cpu_to_le16(chan->dcid);
1054 
1055 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1057 	else
1058 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1059 
1060 	if (chan->fcs == L2CAP_FCS_CRC16) {
1061 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1062 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1063 	}
1064 
1065 	skb->priority = HCI_PRIO_MAX;
1066 	return skb;
1067 }
1068 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1069 static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 			      struct l2cap_ctrl *control)
1071 {
1072 	struct sk_buff *skb;
1073 	u32 control_field;
1074 
1075 	BT_DBG("chan %p, control %p", chan, control);
1076 
1077 	if (!control->sframe)
1078 		return;
1079 
1080 	if (__chan_is_moving(chan))
1081 		return;
1082 
1083 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1084 	    !control->poll)
1085 		control->final = 1;
1086 
1087 	if (control->super == L2CAP_SUPER_RR)
1088 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 	else if (control->super == L2CAP_SUPER_RNR)
1090 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1091 
1092 	if (control->super != L2CAP_SUPER_SREJ) {
1093 		chan->last_acked_seq = control->reqseq;
1094 		__clear_ack_timer(chan);
1095 	}
1096 
1097 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 	       control->final, control->poll, control->super);
1099 
1100 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 		control_field = __pack_extended_control(control);
1102 	else
1103 		control_field = __pack_enhanced_control(control);
1104 
1105 	skb = l2cap_create_sframe_pdu(chan, control_field);
1106 	if (!IS_ERR(skb))
1107 		l2cap_do_send(chan, skb);
1108 }
1109 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1111 {
1112 	struct l2cap_ctrl control;
1113 
1114 	BT_DBG("chan %p, poll %d", chan, poll);
1115 
1116 	memset(&control, 0, sizeof(control));
1117 	control.sframe = 1;
1118 	control.poll = poll;
1119 
1120 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 		control.super = L2CAP_SUPER_RNR;
1122 	else
1123 		control.super = L2CAP_SUPER_RR;
1124 
1125 	control.reqseq = chan->buffer_seq;
1126 	l2cap_send_sframe(chan, &control);
1127 }
1128 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1130 {
1131 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1132 		return true;
1133 
1134 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 }
1136 
__amp_capable(struct l2cap_chan * chan)1137 static bool __amp_capable(struct l2cap_chan *chan)
1138 {
1139 	struct l2cap_conn *conn = chan->conn;
1140 	struct hci_dev *hdev;
1141 	bool amp_available = false;
1142 
1143 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1144 		return false;
1145 
1146 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1147 		return false;
1148 
1149 	read_lock(&hci_dev_list_lock);
1150 	list_for_each_entry(hdev, &hci_dev_list, list) {
1151 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 		    test_bit(HCI_UP, &hdev->flags)) {
1153 			amp_available = true;
1154 			break;
1155 		}
1156 	}
1157 	read_unlock(&hci_dev_list_lock);
1158 
1159 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 		return amp_available;
1161 
1162 	return false;
1163 }
1164 
l2cap_check_efs(struct l2cap_chan * chan)1165 static bool l2cap_check_efs(struct l2cap_chan *chan)
1166 {
1167 	/* Check EFS parameters */
1168 	return true;
1169 }
1170 
l2cap_send_conn_req(struct l2cap_chan * chan)1171 void l2cap_send_conn_req(struct l2cap_chan *chan)
1172 {
1173 	struct l2cap_conn *conn = chan->conn;
1174 	struct l2cap_conn_req req;
1175 
1176 	req.scid = cpu_to_le16(chan->scid);
1177 	req.psm  = chan->psm;
1178 
1179 	chan->ident = l2cap_get_ident(conn);
1180 
1181 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1182 
1183 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1184 }
1185 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1186 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1187 {
1188 	struct l2cap_create_chan_req req;
1189 	req.scid = cpu_to_le16(chan->scid);
1190 	req.psm  = chan->psm;
1191 	req.amp_id = amp_id;
1192 
1193 	chan->ident = l2cap_get_ident(chan->conn);
1194 
1195 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1196 		       sizeof(req), &req);
1197 }
1198 
l2cap_move_setup(struct l2cap_chan * chan)1199 static void l2cap_move_setup(struct l2cap_chan *chan)
1200 {
1201 	struct sk_buff *skb;
1202 
1203 	BT_DBG("chan %p", chan);
1204 
1205 	if (chan->mode != L2CAP_MODE_ERTM)
1206 		return;
1207 
1208 	__clear_retrans_timer(chan);
1209 	__clear_monitor_timer(chan);
1210 	__clear_ack_timer(chan);
1211 
1212 	chan->retry_count = 0;
1213 	skb_queue_walk(&chan->tx_q, skb) {
1214 		if (bt_cb(skb)->l2cap.retries)
1215 			bt_cb(skb)->l2cap.retries = 1;
1216 		else
1217 			break;
1218 	}
1219 
1220 	chan->expected_tx_seq = chan->buffer_seq;
1221 
1222 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 	l2cap_seq_list_clear(&chan->retrans_list);
1225 	l2cap_seq_list_clear(&chan->srej_list);
1226 	skb_queue_purge(&chan->srej_q);
1227 
1228 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1230 
1231 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1232 }
1233 
l2cap_move_done(struct l2cap_chan * chan)1234 static void l2cap_move_done(struct l2cap_chan *chan)
1235 {
1236 	u8 move_role = chan->move_role;
1237 	BT_DBG("chan %p", chan);
1238 
1239 	chan->move_state = L2CAP_MOVE_STABLE;
1240 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1241 
1242 	if (chan->mode != L2CAP_MODE_ERTM)
1243 		return;
1244 
1245 	switch (move_role) {
1246 	case L2CAP_MOVE_ROLE_INITIATOR:
1247 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1249 		break;
1250 	case L2CAP_MOVE_ROLE_RESPONDER:
1251 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1252 		break;
1253 	}
1254 }
1255 
l2cap_chan_ready(struct l2cap_chan * chan)1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 	/* The channel may have already been flagged as connected in
1259 	 * case of receiving data before the L2CAP info req/rsp
1260 	 * procedure is complete.
1261 	 */
1262 	if (chan->state == BT_CONNECTED)
1263 		return;
1264 
1265 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 	chan->conf_state = 0;
1267 	__clear_chan_timer(chan);
1268 
1269 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 		chan->ops->suspend(chan);
1271 
1272 	chan->state = BT_CONNECTED;
1273 
1274 	chan->ops->ready(chan);
1275 }
1276 
l2cap_le_connect(struct l2cap_chan * chan)1277 static void l2cap_le_connect(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_le_conn_req req;
1281 
1282 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1283 		return;
1284 
1285 	req.psm     = chan->psm;
1286 	req.scid    = cpu_to_le16(chan->scid);
1287 	req.mtu     = cpu_to_le16(chan->imtu);
1288 	req.mps     = cpu_to_le16(chan->mps);
1289 	req.credits = cpu_to_le16(chan->rx_credits);
1290 
1291 	chan->ident = l2cap_get_ident(conn);
1292 
1293 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1294 		       sizeof(req), &req);
1295 }
1296 
l2cap_le_start(struct l2cap_chan * chan)1297 static void l2cap_le_start(struct l2cap_chan *chan)
1298 {
1299 	struct l2cap_conn *conn = chan->conn;
1300 
1301 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1302 		return;
1303 
1304 	if (!chan->psm) {
1305 		l2cap_chan_ready(chan);
1306 		return;
1307 	}
1308 
1309 	if (chan->state == BT_CONNECT)
1310 		l2cap_le_connect(chan);
1311 }
1312 
l2cap_start_connection(struct l2cap_chan * chan)1313 static void l2cap_start_connection(struct l2cap_chan *chan)
1314 {
1315 	if (__amp_capable(chan)) {
1316 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 		a2mp_discover_amp(chan);
1318 	} else if (chan->conn->hcon->type == LE_LINK) {
1319 		l2cap_le_start(chan);
1320 	} else {
1321 		l2cap_send_conn_req(chan);
1322 	}
1323 }
1324 
l2cap_request_info(struct l2cap_conn * conn)1325 static void l2cap_request_info(struct l2cap_conn *conn)
1326 {
1327 	struct l2cap_info_req req;
1328 
1329 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1330 		return;
1331 
1332 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1333 
1334 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 	conn->info_ident = l2cap_get_ident(conn);
1336 
1337 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1338 
1339 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1340 		       sizeof(req), &req);
1341 }
1342 
l2cap_do_start(struct l2cap_chan * chan)1343 static void l2cap_do_start(struct l2cap_chan *chan)
1344 {
1345 	struct l2cap_conn *conn = chan->conn;
1346 
1347 	if (conn->hcon->type == LE_LINK) {
1348 		l2cap_le_start(chan);
1349 		return;
1350 	}
1351 
1352 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1353 		l2cap_request_info(conn);
1354 		return;
1355 	}
1356 
1357 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1358 		return;
1359 
1360 	if (l2cap_chan_check_security(chan, true) &&
1361 	    __l2cap_no_conn_pending(chan))
1362 		l2cap_start_connection(chan);
1363 }
1364 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1365 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1366 {
1367 	u32 local_feat_mask = l2cap_feat_mask;
1368 	if (!disable_ertm)
1369 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1370 
1371 	switch (mode) {
1372 	case L2CAP_MODE_ERTM:
1373 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1374 	case L2CAP_MODE_STREAMING:
1375 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1376 	default:
1377 		return 0x00;
1378 	}
1379 }
1380 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1381 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1382 {
1383 	struct l2cap_conn *conn = chan->conn;
1384 	struct l2cap_disconn_req req;
1385 
1386 	if (!conn)
1387 		return;
1388 
1389 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1390 		__clear_retrans_timer(chan);
1391 		__clear_monitor_timer(chan);
1392 		__clear_ack_timer(chan);
1393 	}
1394 
1395 	if (chan->scid == L2CAP_CID_A2MP) {
1396 		l2cap_state_change(chan, BT_DISCONN);
1397 		return;
1398 	}
1399 
1400 	req.dcid = cpu_to_le16(chan->dcid);
1401 	req.scid = cpu_to_le16(chan->scid);
1402 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1403 		       sizeof(req), &req);
1404 
1405 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1406 }
1407 
1408 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1409 static void l2cap_conn_start(struct l2cap_conn *conn)
1410 {
1411 	struct l2cap_chan *chan, *tmp;
1412 
1413 	BT_DBG("conn %p", conn);
1414 
1415 	mutex_lock(&conn->chan_lock);
1416 
1417 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1418 		l2cap_chan_lock(chan);
1419 
1420 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1421 			l2cap_chan_ready(chan);
1422 			l2cap_chan_unlock(chan);
1423 			continue;
1424 		}
1425 
1426 		if (chan->state == BT_CONNECT) {
1427 			if (!l2cap_chan_check_security(chan, true) ||
1428 			    !__l2cap_no_conn_pending(chan)) {
1429 				l2cap_chan_unlock(chan);
1430 				continue;
1431 			}
1432 
1433 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1434 			    && test_bit(CONF_STATE2_DEVICE,
1435 					&chan->conf_state)) {
1436 				l2cap_chan_close(chan, ECONNRESET);
1437 				l2cap_chan_unlock(chan);
1438 				continue;
1439 			}
1440 
1441 			l2cap_start_connection(chan);
1442 
1443 		} else if (chan->state == BT_CONNECT2) {
1444 			struct l2cap_conn_rsp rsp;
1445 			char buf[128];
1446 			rsp.scid = cpu_to_le16(chan->dcid);
1447 			rsp.dcid = cpu_to_le16(chan->scid);
1448 
1449 			if (l2cap_chan_check_security(chan, false)) {
1450 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1451 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1452 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1453 					chan->ops->defer(chan);
1454 
1455 				} else {
1456 					l2cap_state_change(chan, BT_CONFIG);
1457 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1458 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1459 				}
1460 			} else {
1461 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1462 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1463 			}
1464 
1465 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1466 				       sizeof(rsp), &rsp);
1467 
1468 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1469 			    rsp.result != L2CAP_CR_SUCCESS) {
1470 				l2cap_chan_unlock(chan);
1471 				continue;
1472 			}
1473 
1474 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1475 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1476 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1477 			chan->num_conf_req++;
1478 		}
1479 
1480 		l2cap_chan_unlock(chan);
1481 	}
1482 
1483 	mutex_unlock(&conn->chan_lock);
1484 }
1485 
l2cap_le_conn_ready(struct l2cap_conn * conn)1486 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1487 {
1488 	struct hci_conn *hcon = conn->hcon;
1489 	struct hci_dev *hdev = hcon->hdev;
1490 
1491 	BT_DBG("%s conn %p", hdev->name, conn);
1492 
1493 	/* For outgoing pairing which doesn't necessarily have an
1494 	 * associated socket (e.g. mgmt_pair_device).
1495 	 */
1496 	if (hcon->out)
1497 		smp_conn_security(hcon, hcon->pending_sec_level);
1498 
1499 	/* For LE slave connections, make sure the connection interval
1500 	 * is in the range of the minium and maximum interval that has
1501 	 * been configured for this connection. If not, then trigger
1502 	 * the connection update procedure.
1503 	 */
1504 	if (hcon->role == HCI_ROLE_SLAVE &&
1505 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1506 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1507 		struct l2cap_conn_param_update_req req;
1508 
1509 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1510 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1511 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1512 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1513 
1514 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1515 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1516 	}
1517 }
1518 
l2cap_conn_ready(struct l2cap_conn * conn)1519 static void l2cap_conn_ready(struct l2cap_conn *conn)
1520 {
1521 	struct l2cap_chan *chan;
1522 	struct hci_conn *hcon = conn->hcon;
1523 
1524 	BT_DBG("conn %p", conn);
1525 
1526 	if (hcon->type == ACL_LINK)
1527 		l2cap_request_info(conn);
1528 
1529 	mutex_lock(&conn->chan_lock);
1530 
1531 	list_for_each_entry(chan, &conn->chan_l, list) {
1532 
1533 		l2cap_chan_lock(chan);
1534 
1535 		if (chan->scid == L2CAP_CID_A2MP) {
1536 			l2cap_chan_unlock(chan);
1537 			continue;
1538 		}
1539 
1540 		if (hcon->type == LE_LINK) {
1541 			l2cap_le_start(chan);
1542 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1544 				l2cap_chan_ready(chan);
1545 		} else if (chan->state == BT_CONNECT) {
1546 			l2cap_do_start(chan);
1547 		}
1548 
1549 		l2cap_chan_unlock(chan);
1550 	}
1551 
1552 	mutex_unlock(&conn->chan_lock);
1553 
1554 	if (hcon->type == LE_LINK)
1555 		l2cap_le_conn_ready(conn);
1556 
1557 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1558 }
1559 
1560 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1561 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1562 {
1563 	struct l2cap_chan *chan;
1564 
1565 	BT_DBG("conn %p", conn);
1566 
1567 	mutex_lock(&conn->chan_lock);
1568 
1569 	list_for_each_entry(chan, &conn->chan_l, list) {
1570 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1571 			l2cap_chan_set_err(chan, err);
1572 	}
1573 
1574 	mutex_unlock(&conn->chan_lock);
1575 }
1576 
l2cap_info_timeout(struct work_struct * work)1577 static void l2cap_info_timeout(struct work_struct *work)
1578 {
1579 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1580 					       info_timer.work);
1581 
1582 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1583 	conn->info_ident = 0;
1584 
1585 	l2cap_conn_start(conn);
1586 }
1587 
1588 /*
1589  * l2cap_user
1590  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1591  * callback is called during registration. The ->remove callback is called
1592  * during unregistration.
1593  * An l2cap_user object can either be explicitly unregistered or when the
1594  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1595  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1596  * External modules must own a reference to the l2cap_conn object if they intend
1597  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1598  * any time if they don't.
1599  */
1600 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1601 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1602 {
1603 	struct hci_dev *hdev = conn->hcon->hdev;
1604 	int ret;
1605 
1606 	/* We need to check whether l2cap_conn is registered. If it is not, we
1607 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1608 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1609 	 * relies on the parent hci_conn object to be locked. This itself relies
1610 	 * on the hci_dev object to be locked. So we must lock the hci device
1611 	 * here, too. */
1612 
1613 	hci_dev_lock(hdev);
1614 
1615 	if (!list_empty(&user->list)) {
1616 		ret = -EINVAL;
1617 		goto out_unlock;
1618 	}
1619 
1620 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1621 	if (!conn->hchan) {
1622 		ret = -ENODEV;
1623 		goto out_unlock;
1624 	}
1625 
1626 	ret = user->probe(conn, user);
1627 	if (ret)
1628 		goto out_unlock;
1629 
1630 	list_add(&user->list, &conn->users);
1631 	ret = 0;
1632 
1633 out_unlock:
1634 	hci_dev_unlock(hdev);
1635 	return ret;
1636 }
1637 EXPORT_SYMBOL(l2cap_register_user);
1638 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1639 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1640 {
1641 	struct hci_dev *hdev = conn->hcon->hdev;
1642 
1643 	hci_dev_lock(hdev);
1644 
1645 	if (list_empty(&user->list))
1646 		goto out_unlock;
1647 
1648 	list_del_init(&user->list);
1649 	user->remove(conn, user);
1650 
1651 out_unlock:
1652 	hci_dev_unlock(hdev);
1653 }
1654 EXPORT_SYMBOL(l2cap_unregister_user);
1655 
l2cap_unregister_all_users(struct l2cap_conn * conn)1656 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1657 {
1658 	struct l2cap_user *user;
1659 
1660 	while (!list_empty(&conn->users)) {
1661 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1662 		list_del_init(&user->list);
1663 		user->remove(conn, user);
1664 	}
1665 }
1666 
l2cap_conn_del(struct hci_conn * hcon,int err)1667 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1668 {
1669 	struct l2cap_conn *conn = hcon->l2cap_data;
1670 	struct l2cap_chan *chan, *l;
1671 
1672 	if (!conn)
1673 		return;
1674 
1675 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1676 
1677 	kfree_skb(conn->rx_skb);
1678 
1679 	skb_queue_purge(&conn->pending_rx);
1680 
1681 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1682 	 * might block if we are running on a worker from the same workqueue
1683 	 * pending_rx_work is waiting on.
1684 	 */
1685 	if (work_pending(&conn->pending_rx_work))
1686 		cancel_work_sync(&conn->pending_rx_work);
1687 
1688 	if (work_pending(&conn->id_addr_update_work))
1689 		cancel_work_sync(&conn->id_addr_update_work);
1690 
1691 	l2cap_unregister_all_users(conn);
1692 
1693 	/* Force the connection to be immediately dropped */
1694 	hcon->disc_timeout = 0;
1695 
1696 	mutex_lock(&conn->chan_lock);
1697 
1698 	/* Kill channels */
1699 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1700 		l2cap_chan_hold(chan);
1701 		l2cap_chan_lock(chan);
1702 
1703 		l2cap_chan_del(chan, err);
1704 
1705 		l2cap_chan_unlock(chan);
1706 
1707 		chan->ops->close(chan);
1708 		l2cap_chan_put(chan);
1709 	}
1710 
1711 	mutex_unlock(&conn->chan_lock);
1712 
1713 	hci_chan_del(conn->hchan);
1714 
1715 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1716 		cancel_delayed_work_sync(&conn->info_timer);
1717 
1718 	hcon->l2cap_data = NULL;
1719 	conn->hchan = NULL;
1720 	l2cap_conn_put(conn);
1721 }
1722 
l2cap_conn_free(struct kref * ref)1723 static void l2cap_conn_free(struct kref *ref)
1724 {
1725 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1726 
1727 	hci_conn_put(conn->hcon);
1728 	kfree(conn);
1729 }
1730 
l2cap_conn_get(struct l2cap_conn * conn)1731 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1732 {
1733 	kref_get(&conn->ref);
1734 	return conn;
1735 }
1736 EXPORT_SYMBOL(l2cap_conn_get);
1737 
l2cap_conn_put(struct l2cap_conn * conn)1738 void l2cap_conn_put(struct l2cap_conn *conn)
1739 {
1740 	kref_put(&conn->ref, l2cap_conn_free);
1741 }
1742 EXPORT_SYMBOL(l2cap_conn_put);
1743 
1744 /* ---- Socket interface ---- */
1745 
1746 /* Find socket with psm and source / destination bdaddr.
1747  * Returns closest match.
1748  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1749 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1750 						   bdaddr_t *src,
1751 						   bdaddr_t *dst,
1752 						   u8 link_type)
1753 {
1754 	struct l2cap_chan *c, *c1 = NULL;
1755 
1756 	read_lock(&chan_list_lock);
1757 
1758 	list_for_each_entry(c, &chan_list, global_l) {
1759 		if (state && c->state != state)
1760 			continue;
1761 
1762 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1763 			continue;
1764 
1765 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1766 			continue;
1767 
1768 		if (c->psm == psm) {
1769 			int src_match, dst_match;
1770 			int src_any, dst_any;
1771 
1772 			/* Exact match. */
1773 			src_match = !bacmp(&c->src, src);
1774 			dst_match = !bacmp(&c->dst, dst);
1775 			if (src_match && dst_match) {
1776 				l2cap_chan_hold(c);
1777 				read_unlock(&chan_list_lock);
1778 				return c;
1779 			}
1780 
1781 			/* Closest match */
1782 			src_any = !bacmp(&c->src, BDADDR_ANY);
1783 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1784 			if ((src_match && dst_any) || (src_any && dst_match) ||
1785 			    (src_any && dst_any))
1786 				c1 = c;
1787 		}
1788 	}
1789 
1790 	if (c1)
1791 		l2cap_chan_hold(c1);
1792 
1793 	read_unlock(&chan_list_lock);
1794 
1795 	return c1;
1796 }
1797 
l2cap_monitor_timeout(struct work_struct * work)1798 static void l2cap_monitor_timeout(struct work_struct *work)
1799 {
1800 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 					       monitor_timer.work);
1802 
1803 	BT_DBG("chan %p", chan);
1804 
1805 	l2cap_chan_lock(chan);
1806 
1807 	if (!chan->conn) {
1808 		l2cap_chan_unlock(chan);
1809 		l2cap_chan_put(chan);
1810 		return;
1811 	}
1812 
1813 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1814 
1815 	l2cap_chan_unlock(chan);
1816 	l2cap_chan_put(chan);
1817 }
1818 
l2cap_retrans_timeout(struct work_struct * work)1819 static void l2cap_retrans_timeout(struct work_struct *work)
1820 {
1821 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1822 					       retrans_timer.work);
1823 
1824 	BT_DBG("chan %p", chan);
1825 
1826 	l2cap_chan_lock(chan);
1827 
1828 	if (!chan->conn) {
1829 		l2cap_chan_unlock(chan);
1830 		l2cap_chan_put(chan);
1831 		return;
1832 	}
1833 
1834 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1835 	l2cap_chan_unlock(chan);
1836 	l2cap_chan_put(chan);
1837 }
1838 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1839 static void l2cap_streaming_send(struct l2cap_chan *chan,
1840 				 struct sk_buff_head *skbs)
1841 {
1842 	struct sk_buff *skb;
1843 	struct l2cap_ctrl *control;
1844 
1845 	BT_DBG("chan %p, skbs %p", chan, skbs);
1846 
1847 	if (__chan_is_moving(chan))
1848 		return;
1849 
1850 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1851 
1852 	while (!skb_queue_empty(&chan->tx_q)) {
1853 
1854 		skb = skb_dequeue(&chan->tx_q);
1855 
1856 		bt_cb(skb)->l2cap.retries = 1;
1857 		control = &bt_cb(skb)->l2cap;
1858 
1859 		control->reqseq = 0;
1860 		control->txseq = chan->next_tx_seq;
1861 
1862 		__pack_control(chan, control, skb);
1863 
1864 		if (chan->fcs == L2CAP_FCS_CRC16) {
1865 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1866 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1867 		}
1868 
1869 		l2cap_do_send(chan, skb);
1870 
1871 		BT_DBG("Sent txseq %u", control->txseq);
1872 
1873 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1874 		chan->frames_sent++;
1875 	}
1876 }
1877 
l2cap_ertm_send(struct l2cap_chan * chan)1878 static int l2cap_ertm_send(struct l2cap_chan *chan)
1879 {
1880 	struct sk_buff *skb, *tx_skb;
1881 	struct l2cap_ctrl *control;
1882 	int sent = 0;
1883 
1884 	BT_DBG("chan %p", chan);
1885 
1886 	if (chan->state != BT_CONNECTED)
1887 		return -ENOTCONN;
1888 
1889 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1890 		return 0;
1891 
1892 	if (__chan_is_moving(chan))
1893 		return 0;
1894 
1895 	while (chan->tx_send_head &&
1896 	       chan->unacked_frames < chan->remote_tx_win &&
1897 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1898 
1899 		skb = chan->tx_send_head;
1900 
1901 		bt_cb(skb)->l2cap.retries = 1;
1902 		control = &bt_cb(skb)->l2cap;
1903 
1904 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1905 			control->final = 1;
1906 
1907 		control->reqseq = chan->buffer_seq;
1908 		chan->last_acked_seq = chan->buffer_seq;
1909 		control->txseq = chan->next_tx_seq;
1910 
1911 		__pack_control(chan, control, skb);
1912 
1913 		if (chan->fcs == L2CAP_FCS_CRC16) {
1914 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1915 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1916 		}
1917 
1918 		/* Clone after data has been modified. Data is assumed to be
1919 		   read-only (for locking purposes) on cloned sk_buffs.
1920 		 */
1921 		tx_skb = skb_clone(skb, GFP_KERNEL);
1922 
1923 		if (!tx_skb)
1924 			break;
1925 
1926 		__set_retrans_timer(chan);
1927 
1928 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 		chan->unacked_frames++;
1930 		chan->frames_sent++;
1931 		sent++;
1932 
1933 		if (skb_queue_is_last(&chan->tx_q, skb))
1934 			chan->tx_send_head = NULL;
1935 		else
1936 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1937 
1938 		l2cap_do_send(chan, tx_skb);
1939 		BT_DBG("Sent txseq %u", control->txseq);
1940 	}
1941 
1942 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1943 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1944 
1945 	return sent;
1946 }
1947 
l2cap_ertm_resend(struct l2cap_chan * chan)1948 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1949 {
1950 	struct l2cap_ctrl control;
1951 	struct sk_buff *skb;
1952 	struct sk_buff *tx_skb;
1953 	u16 seq;
1954 
1955 	BT_DBG("chan %p", chan);
1956 
1957 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1958 		return;
1959 
1960 	if (__chan_is_moving(chan))
1961 		return;
1962 
1963 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1964 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1965 
1966 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1967 		if (!skb) {
1968 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1969 			       seq);
1970 			continue;
1971 		}
1972 
1973 		bt_cb(skb)->l2cap.retries++;
1974 		control = bt_cb(skb)->l2cap;
1975 
1976 		if (chan->max_tx != 0 &&
1977 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1978 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1979 			l2cap_send_disconn_req(chan, ECONNRESET);
1980 			l2cap_seq_list_clear(&chan->retrans_list);
1981 			break;
1982 		}
1983 
1984 		control.reqseq = chan->buffer_seq;
1985 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1986 			control.final = 1;
1987 		else
1988 			control.final = 0;
1989 
1990 		if (skb_cloned(skb)) {
1991 			/* Cloned sk_buffs are read-only, so we need a
1992 			 * writeable copy
1993 			 */
1994 			tx_skb = skb_copy(skb, GFP_KERNEL);
1995 		} else {
1996 			tx_skb = skb_clone(skb, GFP_KERNEL);
1997 		}
1998 
1999 		if (!tx_skb) {
2000 			l2cap_seq_list_clear(&chan->retrans_list);
2001 			break;
2002 		}
2003 
2004 		/* Update skb contents */
2005 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2006 			put_unaligned_le32(__pack_extended_control(&control),
2007 					   tx_skb->data + L2CAP_HDR_SIZE);
2008 		} else {
2009 			put_unaligned_le16(__pack_enhanced_control(&control),
2010 					   tx_skb->data + L2CAP_HDR_SIZE);
2011 		}
2012 
2013 		/* Update FCS */
2014 		if (chan->fcs == L2CAP_FCS_CRC16) {
2015 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2016 					tx_skb->len - L2CAP_FCS_SIZE);
2017 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2018 						L2CAP_FCS_SIZE);
2019 		}
2020 
2021 		l2cap_do_send(chan, tx_skb);
2022 
2023 		BT_DBG("Resent txseq %d", control.txseq);
2024 
2025 		chan->last_acked_seq = chan->buffer_seq;
2026 	}
2027 }
2028 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2029 static void l2cap_retransmit(struct l2cap_chan *chan,
2030 			     struct l2cap_ctrl *control)
2031 {
2032 	BT_DBG("chan %p, control %p", chan, control);
2033 
2034 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2035 	l2cap_ertm_resend(chan);
2036 }
2037 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2038 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2039 				 struct l2cap_ctrl *control)
2040 {
2041 	struct sk_buff *skb;
2042 
2043 	BT_DBG("chan %p, control %p", chan, control);
2044 
2045 	if (control->poll)
2046 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2047 
2048 	l2cap_seq_list_clear(&chan->retrans_list);
2049 
2050 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2051 		return;
2052 
2053 	if (chan->unacked_frames) {
2054 		skb_queue_walk(&chan->tx_q, skb) {
2055 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2056 			    skb == chan->tx_send_head)
2057 				break;
2058 		}
2059 
2060 		skb_queue_walk_from(&chan->tx_q, skb) {
2061 			if (skb == chan->tx_send_head)
2062 				break;
2063 
2064 			l2cap_seq_list_append(&chan->retrans_list,
2065 					      bt_cb(skb)->l2cap.txseq);
2066 		}
2067 
2068 		l2cap_ertm_resend(chan);
2069 	}
2070 }
2071 
l2cap_send_ack(struct l2cap_chan * chan)2072 static void l2cap_send_ack(struct l2cap_chan *chan)
2073 {
2074 	struct l2cap_ctrl control;
2075 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2076 					 chan->last_acked_seq);
2077 	int threshold;
2078 
2079 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2080 	       chan, chan->last_acked_seq, chan->buffer_seq);
2081 
2082 	memset(&control, 0, sizeof(control));
2083 	control.sframe = 1;
2084 
2085 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2086 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2087 		__clear_ack_timer(chan);
2088 		control.super = L2CAP_SUPER_RNR;
2089 		control.reqseq = chan->buffer_seq;
2090 		l2cap_send_sframe(chan, &control);
2091 	} else {
2092 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2093 			l2cap_ertm_send(chan);
2094 			/* If any i-frames were sent, they included an ack */
2095 			if (chan->buffer_seq == chan->last_acked_seq)
2096 				frames_to_ack = 0;
2097 		}
2098 
2099 		/* Ack now if the window is 3/4ths full.
2100 		 * Calculate without mul or div
2101 		 */
2102 		threshold = chan->ack_win;
2103 		threshold += threshold << 1;
2104 		threshold >>= 2;
2105 
2106 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2107 		       threshold);
2108 
2109 		if (frames_to_ack >= threshold) {
2110 			__clear_ack_timer(chan);
2111 			control.super = L2CAP_SUPER_RR;
2112 			control.reqseq = chan->buffer_seq;
2113 			l2cap_send_sframe(chan, &control);
2114 			frames_to_ack = 0;
2115 		}
2116 
2117 		if (frames_to_ack)
2118 			__set_ack_timer(chan);
2119 	}
2120 }
2121 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2122 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2123 					 struct msghdr *msg, int len,
2124 					 int count, struct sk_buff *skb)
2125 {
2126 	struct l2cap_conn *conn = chan->conn;
2127 	struct sk_buff **frag;
2128 	int sent = 0;
2129 
2130 	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2131 		return -EFAULT;
2132 
2133 	sent += count;
2134 	len  -= count;
2135 
2136 	/* Continuation fragments (no L2CAP header) */
2137 	frag = &skb_shinfo(skb)->frag_list;
2138 	while (len) {
2139 		struct sk_buff *tmp;
2140 
2141 		count = min_t(unsigned int, conn->mtu, len);
2142 
2143 		tmp = chan->ops->alloc_skb(chan, 0, count,
2144 					   msg->msg_flags & MSG_DONTWAIT);
2145 		if (IS_ERR(tmp))
2146 			return PTR_ERR(tmp);
2147 
2148 		*frag = tmp;
2149 
2150 		if (copy_from_iter(skb_put(*frag, count), count,
2151 				   &msg->msg_iter) != count)
2152 			return -EFAULT;
2153 
2154 		sent += count;
2155 		len  -= count;
2156 
2157 		skb->len += (*frag)->len;
2158 		skb->data_len += (*frag)->len;
2159 
2160 		frag = &(*frag)->next;
2161 	}
2162 
2163 	return sent;
2164 }
2165 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2166 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2167 						 struct msghdr *msg, size_t len)
2168 {
2169 	struct l2cap_conn *conn = chan->conn;
2170 	struct sk_buff *skb;
2171 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2172 	struct l2cap_hdr *lh;
2173 
2174 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2175 	       __le16_to_cpu(chan->psm), len);
2176 
2177 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2178 
2179 	skb = chan->ops->alloc_skb(chan, hlen, count,
2180 				   msg->msg_flags & MSG_DONTWAIT);
2181 	if (IS_ERR(skb))
2182 		return skb;
2183 
2184 	/* Create L2CAP header */
2185 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 	lh->cid = cpu_to_le16(chan->dcid);
2187 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2188 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2189 
2190 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2191 	if (unlikely(err < 0)) {
2192 		kfree_skb(skb);
2193 		return ERR_PTR(err);
2194 	}
2195 	return skb;
2196 }
2197 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2198 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2199 					      struct msghdr *msg, size_t len)
2200 {
2201 	struct l2cap_conn *conn = chan->conn;
2202 	struct sk_buff *skb;
2203 	int err, count;
2204 	struct l2cap_hdr *lh;
2205 
2206 	BT_DBG("chan %p len %zu", chan, len);
2207 
2208 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2209 
2210 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2211 				   msg->msg_flags & MSG_DONTWAIT);
2212 	if (IS_ERR(skb))
2213 		return skb;
2214 
2215 	/* Create L2CAP header */
2216 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2217 	lh->cid = cpu_to_le16(chan->dcid);
2218 	lh->len = cpu_to_le16(len);
2219 
2220 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 	if (unlikely(err < 0)) {
2222 		kfree_skb(skb);
2223 		return ERR_PTR(err);
2224 	}
2225 	return skb;
2226 }
2227 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2228 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2229 					       struct msghdr *msg, size_t len,
2230 					       u16 sdulen)
2231 {
2232 	struct l2cap_conn *conn = chan->conn;
2233 	struct sk_buff *skb;
2234 	int err, count, hlen;
2235 	struct l2cap_hdr *lh;
2236 
2237 	BT_DBG("chan %p len %zu", chan, len);
2238 
2239 	if (!conn)
2240 		return ERR_PTR(-ENOTCONN);
2241 
2242 	hlen = __ertm_hdr_size(chan);
2243 
2244 	if (sdulen)
2245 		hlen += L2CAP_SDULEN_SIZE;
2246 
2247 	if (chan->fcs == L2CAP_FCS_CRC16)
2248 		hlen += L2CAP_FCS_SIZE;
2249 
2250 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2251 
2252 	skb = chan->ops->alloc_skb(chan, hlen, count,
2253 				   msg->msg_flags & MSG_DONTWAIT);
2254 	if (IS_ERR(skb))
2255 		return skb;
2256 
2257 	/* Create L2CAP header */
2258 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2259 	lh->cid = cpu_to_le16(chan->dcid);
2260 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2261 
2262 	/* Control header is populated later */
2263 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2264 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2265 	else
2266 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2267 
2268 	if (sdulen)
2269 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2270 
2271 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 	if (unlikely(err < 0)) {
2273 		kfree_skb(skb);
2274 		return ERR_PTR(err);
2275 	}
2276 
2277 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2278 	bt_cb(skb)->l2cap.retries = 0;
2279 	return skb;
2280 }
2281 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2282 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2283 			     struct sk_buff_head *seg_queue,
2284 			     struct msghdr *msg, size_t len)
2285 {
2286 	struct sk_buff *skb;
2287 	u16 sdu_len;
2288 	size_t pdu_len;
2289 	u8 sar;
2290 
2291 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2292 
2293 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2294 	 * so fragmented skbs are not used.  The HCI layer's handling
2295 	 * of fragmented skbs is not compatible with ERTM's queueing.
2296 	 */
2297 
2298 	/* PDU size is derived from the HCI MTU */
2299 	pdu_len = chan->conn->mtu;
2300 
2301 	/* Constrain PDU size for BR/EDR connections */
2302 	if (!chan->hs_hcon)
2303 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2304 
2305 	/* Adjust for largest possible L2CAP overhead. */
2306 	if (chan->fcs)
2307 		pdu_len -= L2CAP_FCS_SIZE;
2308 
2309 	pdu_len -= __ertm_hdr_size(chan);
2310 
2311 	/* Remote device may have requested smaller PDUs */
2312 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2313 
2314 	if (len <= pdu_len) {
2315 		sar = L2CAP_SAR_UNSEGMENTED;
2316 		sdu_len = 0;
2317 		pdu_len = len;
2318 	} else {
2319 		sar = L2CAP_SAR_START;
2320 		sdu_len = len;
2321 	}
2322 
2323 	while (len > 0) {
2324 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2325 
2326 		if (IS_ERR(skb)) {
2327 			__skb_queue_purge(seg_queue);
2328 			return PTR_ERR(skb);
2329 		}
2330 
2331 		bt_cb(skb)->l2cap.sar = sar;
2332 		__skb_queue_tail(seg_queue, skb);
2333 
2334 		len -= pdu_len;
2335 		if (sdu_len)
2336 			sdu_len = 0;
2337 
2338 		if (len <= pdu_len) {
2339 			sar = L2CAP_SAR_END;
2340 			pdu_len = len;
2341 		} else {
2342 			sar = L2CAP_SAR_CONTINUE;
2343 		}
2344 	}
2345 
2346 	return 0;
2347 }
2348 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2349 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2350 						   struct msghdr *msg,
2351 						   size_t len, u16 sdulen)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff *skb;
2355 	int err, count, hlen;
2356 	struct l2cap_hdr *lh;
2357 
2358 	BT_DBG("chan %p len %zu", chan, len);
2359 
2360 	if (!conn)
2361 		return ERR_PTR(-ENOTCONN);
2362 
2363 	hlen = L2CAP_HDR_SIZE;
2364 
2365 	if (sdulen)
2366 		hlen += L2CAP_SDULEN_SIZE;
2367 
2368 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2369 
2370 	skb = chan->ops->alloc_skb(chan, hlen, count,
2371 				   msg->msg_flags & MSG_DONTWAIT);
2372 	if (IS_ERR(skb))
2373 		return skb;
2374 
2375 	/* Create L2CAP header */
2376 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2377 	lh->cid = cpu_to_le16(chan->dcid);
2378 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2379 
2380 	if (sdulen)
2381 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2382 
2383 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 	if (unlikely(err < 0)) {
2385 		kfree_skb(skb);
2386 		return ERR_PTR(err);
2387 	}
2388 
2389 	return skb;
2390 }
2391 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2392 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2393 				struct sk_buff_head *seg_queue,
2394 				struct msghdr *msg, size_t len)
2395 {
2396 	struct sk_buff *skb;
2397 	size_t pdu_len;
2398 	u16 sdu_len;
2399 
2400 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2401 
2402 	sdu_len = len;
2403 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2404 
2405 	while (len > 0) {
2406 		if (len <= pdu_len)
2407 			pdu_len = len;
2408 
2409 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2410 		if (IS_ERR(skb)) {
2411 			__skb_queue_purge(seg_queue);
2412 			return PTR_ERR(skb);
2413 		}
2414 
2415 		__skb_queue_tail(seg_queue, skb);
2416 
2417 		len -= pdu_len;
2418 
2419 		if (sdu_len) {
2420 			sdu_len = 0;
2421 			pdu_len += L2CAP_SDULEN_SIZE;
2422 		}
2423 	}
2424 
2425 	return 0;
2426 }
2427 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2428 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2429 {
2430 	struct sk_buff *skb;
2431 	int err;
2432 	struct sk_buff_head seg_queue;
2433 
2434 	if (!chan->conn)
2435 		return -ENOTCONN;
2436 
2437 	/* Connectionless channel */
2438 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2439 		skb = l2cap_create_connless_pdu(chan, msg, len);
2440 		if (IS_ERR(skb))
2441 			return PTR_ERR(skb);
2442 
2443 		/* Channel lock is released before requesting new skb and then
2444 		 * reacquired thus we need to recheck channel state.
2445 		 */
2446 		if (chan->state != BT_CONNECTED) {
2447 			kfree_skb(skb);
2448 			return -ENOTCONN;
2449 		}
2450 
2451 		l2cap_do_send(chan, skb);
2452 		return len;
2453 	}
2454 
2455 	switch (chan->mode) {
2456 	case L2CAP_MODE_LE_FLOWCTL:
2457 		/* Check outgoing MTU */
2458 		if (len > chan->omtu)
2459 			return -EMSGSIZE;
2460 
2461 		if (!chan->tx_credits)
2462 			return -EAGAIN;
2463 
2464 		__skb_queue_head_init(&seg_queue);
2465 
2466 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2467 
2468 		if (chan->state != BT_CONNECTED) {
2469 			__skb_queue_purge(&seg_queue);
2470 			err = -ENOTCONN;
2471 		}
2472 
2473 		if (err)
2474 			return err;
2475 
2476 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2477 
2478 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2479 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2480 			chan->tx_credits--;
2481 		}
2482 
2483 		if (!chan->tx_credits)
2484 			chan->ops->suspend(chan);
2485 
2486 		err = len;
2487 
2488 		break;
2489 
2490 	case L2CAP_MODE_BASIC:
2491 		/* Check outgoing MTU */
2492 		if (len > chan->omtu)
2493 			return -EMSGSIZE;
2494 
2495 		/* Create a basic PDU */
2496 		skb = l2cap_create_basic_pdu(chan, msg, len);
2497 		if (IS_ERR(skb))
2498 			return PTR_ERR(skb);
2499 
2500 		/* Channel lock is released before requesting new skb and then
2501 		 * reacquired thus we need to recheck channel state.
2502 		 */
2503 		if (chan->state != BT_CONNECTED) {
2504 			kfree_skb(skb);
2505 			return -ENOTCONN;
2506 		}
2507 
2508 		l2cap_do_send(chan, skb);
2509 		err = len;
2510 		break;
2511 
2512 	case L2CAP_MODE_ERTM:
2513 	case L2CAP_MODE_STREAMING:
2514 		/* Check outgoing MTU */
2515 		if (len > chan->omtu) {
2516 			err = -EMSGSIZE;
2517 			break;
2518 		}
2519 
2520 		__skb_queue_head_init(&seg_queue);
2521 
2522 		/* Do segmentation before calling in to the state machine,
2523 		 * since it's possible to block while waiting for memory
2524 		 * allocation.
2525 		 */
2526 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2527 
2528 		/* The channel could have been closed while segmenting,
2529 		 * check that it is still connected.
2530 		 */
2531 		if (chan->state != BT_CONNECTED) {
2532 			__skb_queue_purge(&seg_queue);
2533 			err = -ENOTCONN;
2534 		}
2535 
2536 		if (err)
2537 			break;
2538 
2539 		if (chan->mode == L2CAP_MODE_ERTM)
2540 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2541 		else
2542 			l2cap_streaming_send(chan, &seg_queue);
2543 
2544 		err = len;
2545 
2546 		/* If the skbs were not queued for sending, they'll still be in
2547 		 * seg_queue and need to be purged.
2548 		 */
2549 		__skb_queue_purge(&seg_queue);
2550 		break;
2551 
2552 	default:
2553 		BT_DBG("bad state %1.1x", chan->mode);
2554 		err = -EBADFD;
2555 	}
2556 
2557 	return err;
2558 }
2559 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2560 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2561 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2562 {
2563 	struct l2cap_ctrl control;
2564 	u16 seq;
2565 
2566 	BT_DBG("chan %p, txseq %u", chan, txseq);
2567 
2568 	memset(&control, 0, sizeof(control));
2569 	control.sframe = 1;
2570 	control.super = L2CAP_SUPER_SREJ;
2571 
2572 	for (seq = chan->expected_tx_seq; seq != txseq;
2573 	     seq = __next_seq(chan, seq)) {
2574 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2575 			control.reqseq = seq;
2576 			l2cap_send_sframe(chan, &control);
2577 			l2cap_seq_list_append(&chan->srej_list, seq);
2578 		}
2579 	}
2580 
2581 	chan->expected_tx_seq = __next_seq(chan, txseq);
2582 }
2583 
l2cap_send_srej_tail(struct l2cap_chan * chan)2584 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2585 {
2586 	struct l2cap_ctrl control;
2587 
2588 	BT_DBG("chan %p", chan);
2589 
2590 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2591 		return;
2592 
2593 	memset(&control, 0, sizeof(control));
2594 	control.sframe = 1;
2595 	control.super = L2CAP_SUPER_SREJ;
2596 	control.reqseq = chan->srej_list.tail;
2597 	l2cap_send_sframe(chan, &control);
2598 }
2599 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2600 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2601 {
2602 	struct l2cap_ctrl control;
2603 	u16 initial_head;
2604 	u16 seq;
2605 
2606 	BT_DBG("chan %p, txseq %u", chan, txseq);
2607 
2608 	memset(&control, 0, sizeof(control));
2609 	control.sframe = 1;
2610 	control.super = L2CAP_SUPER_SREJ;
2611 
2612 	/* Capture initial list head to allow only one pass through the list. */
2613 	initial_head = chan->srej_list.head;
2614 
2615 	do {
2616 		seq = l2cap_seq_list_pop(&chan->srej_list);
2617 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2618 			break;
2619 
2620 		control.reqseq = seq;
2621 		l2cap_send_sframe(chan, &control);
2622 		l2cap_seq_list_append(&chan->srej_list, seq);
2623 	} while (chan->srej_list.head != initial_head);
2624 }
2625 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2626 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2627 {
2628 	struct sk_buff *acked_skb;
2629 	u16 ackseq;
2630 
2631 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2632 
2633 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2634 		return;
2635 
2636 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2637 	       chan->expected_ack_seq, chan->unacked_frames);
2638 
2639 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2640 	     ackseq = __next_seq(chan, ackseq)) {
2641 
2642 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2643 		if (acked_skb) {
2644 			skb_unlink(acked_skb, &chan->tx_q);
2645 			kfree_skb(acked_skb);
2646 			chan->unacked_frames--;
2647 		}
2648 	}
2649 
2650 	chan->expected_ack_seq = reqseq;
2651 
2652 	if (chan->unacked_frames == 0)
2653 		__clear_retrans_timer(chan);
2654 
2655 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2656 }
2657 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2658 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2659 {
2660 	BT_DBG("chan %p", chan);
2661 
2662 	chan->expected_tx_seq = chan->buffer_seq;
2663 	l2cap_seq_list_clear(&chan->srej_list);
2664 	skb_queue_purge(&chan->srej_q);
2665 	chan->rx_state = L2CAP_RX_STATE_RECV;
2666 }
2667 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2668 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2669 				struct l2cap_ctrl *control,
2670 				struct sk_buff_head *skbs, u8 event)
2671 {
2672 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2673 	       event);
2674 
2675 	switch (event) {
2676 	case L2CAP_EV_DATA_REQUEST:
2677 		if (chan->tx_send_head == NULL)
2678 			chan->tx_send_head = skb_peek(skbs);
2679 
2680 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2681 		l2cap_ertm_send(chan);
2682 		break;
2683 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2684 		BT_DBG("Enter LOCAL_BUSY");
2685 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2686 
2687 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2688 			/* The SREJ_SENT state must be aborted if we are to
2689 			 * enter the LOCAL_BUSY state.
2690 			 */
2691 			l2cap_abort_rx_srej_sent(chan);
2692 		}
2693 
2694 		l2cap_send_ack(chan);
2695 
2696 		break;
2697 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2698 		BT_DBG("Exit LOCAL_BUSY");
2699 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2700 
2701 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2702 			struct l2cap_ctrl local_control;
2703 
2704 			memset(&local_control, 0, sizeof(local_control));
2705 			local_control.sframe = 1;
2706 			local_control.super = L2CAP_SUPER_RR;
2707 			local_control.poll = 1;
2708 			local_control.reqseq = chan->buffer_seq;
2709 			l2cap_send_sframe(chan, &local_control);
2710 
2711 			chan->retry_count = 1;
2712 			__set_monitor_timer(chan);
2713 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 		}
2715 		break;
2716 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2717 		l2cap_process_reqseq(chan, control->reqseq);
2718 		break;
2719 	case L2CAP_EV_EXPLICIT_POLL:
2720 		l2cap_send_rr_or_rnr(chan, 1);
2721 		chan->retry_count = 1;
2722 		__set_monitor_timer(chan);
2723 		__clear_ack_timer(chan);
2724 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 		break;
2726 	case L2CAP_EV_RETRANS_TO:
2727 		l2cap_send_rr_or_rnr(chan, 1);
2728 		chan->retry_count = 1;
2729 		__set_monitor_timer(chan);
2730 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2731 		break;
2732 	case L2CAP_EV_RECV_FBIT:
2733 		/* Nothing to process */
2734 		break;
2735 	default:
2736 		break;
2737 	}
2738 }
2739 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2740 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2741 				  struct l2cap_ctrl *control,
2742 				  struct sk_buff_head *skbs, u8 event)
2743 {
2744 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2745 	       event);
2746 
2747 	switch (event) {
2748 	case L2CAP_EV_DATA_REQUEST:
2749 		if (chan->tx_send_head == NULL)
2750 			chan->tx_send_head = skb_peek(skbs);
2751 		/* Queue data, but don't send. */
2752 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2753 		break;
2754 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2755 		BT_DBG("Enter LOCAL_BUSY");
2756 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2757 
2758 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2759 			/* The SREJ_SENT state must be aborted if we are to
2760 			 * enter the LOCAL_BUSY state.
2761 			 */
2762 			l2cap_abort_rx_srej_sent(chan);
2763 		}
2764 
2765 		l2cap_send_ack(chan);
2766 
2767 		break;
2768 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2769 		BT_DBG("Exit LOCAL_BUSY");
2770 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2771 
2772 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2773 			struct l2cap_ctrl local_control;
2774 			memset(&local_control, 0, sizeof(local_control));
2775 			local_control.sframe = 1;
2776 			local_control.super = L2CAP_SUPER_RR;
2777 			local_control.poll = 1;
2778 			local_control.reqseq = chan->buffer_seq;
2779 			l2cap_send_sframe(chan, &local_control);
2780 
2781 			chan->retry_count = 1;
2782 			__set_monitor_timer(chan);
2783 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2784 		}
2785 		break;
2786 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2787 		l2cap_process_reqseq(chan, control->reqseq);
2788 
2789 		/* Fall through */
2790 
2791 	case L2CAP_EV_RECV_FBIT:
2792 		if (control && control->final) {
2793 			__clear_monitor_timer(chan);
2794 			if (chan->unacked_frames > 0)
2795 				__set_retrans_timer(chan);
2796 			chan->retry_count = 0;
2797 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2798 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2799 		}
2800 		break;
2801 	case L2CAP_EV_EXPLICIT_POLL:
2802 		/* Ignore */
2803 		break;
2804 	case L2CAP_EV_MONITOR_TO:
2805 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2806 			l2cap_send_rr_or_rnr(chan, 1);
2807 			__set_monitor_timer(chan);
2808 			chan->retry_count++;
2809 		} else {
2810 			l2cap_send_disconn_req(chan, ECONNABORTED);
2811 		}
2812 		break;
2813 	default:
2814 		break;
2815 	}
2816 }
2817 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2818 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2819 		     struct sk_buff_head *skbs, u8 event)
2820 {
2821 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2822 	       chan, control, skbs, event, chan->tx_state);
2823 
2824 	switch (chan->tx_state) {
2825 	case L2CAP_TX_STATE_XMIT:
2826 		l2cap_tx_state_xmit(chan, control, skbs, event);
2827 		break;
2828 	case L2CAP_TX_STATE_WAIT_F:
2829 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2830 		break;
2831 	default:
2832 		/* Ignore event */
2833 		break;
2834 	}
2835 }
2836 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2837 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2838 			     struct l2cap_ctrl *control)
2839 {
2840 	BT_DBG("chan %p, control %p", chan, control);
2841 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2842 }
2843 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2844 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2845 				  struct l2cap_ctrl *control)
2846 {
2847 	BT_DBG("chan %p, control %p", chan, control);
2848 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2849 }
2850 
2851 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2852 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2853 {
2854 	struct sk_buff *nskb;
2855 	struct l2cap_chan *chan;
2856 
2857 	BT_DBG("conn %p", conn);
2858 
2859 	mutex_lock(&conn->chan_lock);
2860 
2861 	list_for_each_entry(chan, &conn->chan_l, list) {
2862 		if (chan->chan_type != L2CAP_CHAN_RAW)
2863 			continue;
2864 
2865 		/* Don't send frame to the channel it came from */
2866 		if (bt_cb(skb)->l2cap.chan == chan)
2867 			continue;
2868 
2869 		nskb = skb_clone(skb, GFP_KERNEL);
2870 		if (!nskb)
2871 			continue;
2872 		if (chan->ops->recv(chan, nskb))
2873 			kfree_skb(nskb);
2874 	}
2875 
2876 	mutex_unlock(&conn->chan_lock);
2877 }
2878 
2879 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2880 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2881 				       u8 ident, u16 dlen, void *data)
2882 {
2883 	struct sk_buff *skb, **frag;
2884 	struct l2cap_cmd_hdr *cmd;
2885 	struct l2cap_hdr *lh;
2886 	int len, count;
2887 
2888 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2889 	       conn, code, ident, dlen);
2890 
2891 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2892 		return NULL;
2893 
2894 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2895 	count = min_t(unsigned int, conn->mtu, len);
2896 
2897 	skb = bt_skb_alloc(count, GFP_KERNEL);
2898 	if (!skb)
2899 		return NULL;
2900 
2901 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2902 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2903 
2904 	if (conn->hcon->type == LE_LINK)
2905 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2906 	else
2907 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2908 
2909 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2910 	cmd->code  = code;
2911 	cmd->ident = ident;
2912 	cmd->len   = cpu_to_le16(dlen);
2913 
2914 	if (dlen) {
2915 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2916 		memcpy(skb_put(skb, count), data, count);
2917 		data += count;
2918 	}
2919 
2920 	len -= skb->len;
2921 
2922 	/* Continuation fragments (no L2CAP header) */
2923 	frag = &skb_shinfo(skb)->frag_list;
2924 	while (len) {
2925 		count = min_t(unsigned int, conn->mtu, len);
2926 
2927 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2928 		if (!*frag)
2929 			goto fail;
2930 
2931 		memcpy(skb_put(*frag, count), data, count);
2932 
2933 		len  -= count;
2934 		data += count;
2935 
2936 		frag = &(*frag)->next;
2937 	}
2938 
2939 	return skb;
2940 
2941 fail:
2942 	kfree_skb(skb);
2943 	return NULL;
2944 }
2945 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2946 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2947 				     unsigned long *val)
2948 {
2949 	struct l2cap_conf_opt *opt = *ptr;
2950 	int len;
2951 
2952 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2953 	*ptr += len;
2954 
2955 	*type = opt->type;
2956 	*olen = opt->len;
2957 
2958 	switch (opt->len) {
2959 	case 1:
2960 		*val = *((u8 *) opt->val);
2961 		break;
2962 
2963 	case 2:
2964 		*val = get_unaligned_le16(opt->val);
2965 		break;
2966 
2967 	case 4:
2968 		*val = get_unaligned_le32(opt->val);
2969 		break;
2970 
2971 	default:
2972 		*val = (unsigned long) opt->val;
2973 		break;
2974 	}
2975 
2976 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2977 	return len;
2978 }
2979 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)2980 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2981 {
2982 	struct l2cap_conf_opt *opt = *ptr;
2983 
2984 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2985 
2986 	if (size < L2CAP_CONF_OPT_SIZE + len)
2987 		return;
2988 
2989 	opt->type = type;
2990 	opt->len  = len;
2991 
2992 	switch (len) {
2993 	case 1:
2994 		*((u8 *) opt->val)  = val;
2995 		break;
2996 
2997 	case 2:
2998 		put_unaligned_le16(val, opt->val);
2999 		break;
3000 
3001 	case 4:
3002 		put_unaligned_le32(val, opt->val);
3003 		break;
3004 
3005 	default:
3006 		memcpy(opt->val, (void *) val, len);
3007 		break;
3008 	}
3009 
3010 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3011 }
3012 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3013 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3014 {
3015 	struct l2cap_conf_efs efs;
3016 
3017 	switch (chan->mode) {
3018 	case L2CAP_MODE_ERTM:
3019 		efs.id		= chan->local_id;
3020 		efs.stype	= chan->local_stype;
3021 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3022 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3023 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3024 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3025 		break;
3026 
3027 	case L2CAP_MODE_STREAMING:
3028 		efs.id		= 1;
3029 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3030 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3031 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3032 		efs.acc_lat	= 0;
3033 		efs.flush_to	= 0;
3034 		break;
3035 
3036 	default:
3037 		return;
3038 	}
3039 
3040 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3041 			   (unsigned long) &efs, size);
3042 }
3043 
l2cap_ack_timeout(struct work_struct * work)3044 static void l2cap_ack_timeout(struct work_struct *work)
3045 {
3046 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3047 					       ack_timer.work);
3048 	u16 frames_to_ack;
3049 
3050 	BT_DBG("chan %p", chan);
3051 
3052 	l2cap_chan_lock(chan);
3053 
3054 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3055 				     chan->last_acked_seq);
3056 
3057 	if (frames_to_ack)
3058 		l2cap_send_rr_or_rnr(chan, 0);
3059 
3060 	l2cap_chan_unlock(chan);
3061 	l2cap_chan_put(chan);
3062 }
3063 
l2cap_ertm_init(struct l2cap_chan * chan)3064 int l2cap_ertm_init(struct l2cap_chan *chan)
3065 {
3066 	int err;
3067 
3068 	chan->next_tx_seq = 0;
3069 	chan->expected_tx_seq = 0;
3070 	chan->expected_ack_seq = 0;
3071 	chan->unacked_frames = 0;
3072 	chan->buffer_seq = 0;
3073 	chan->frames_sent = 0;
3074 	chan->last_acked_seq = 0;
3075 	chan->sdu = NULL;
3076 	chan->sdu_last_frag = NULL;
3077 	chan->sdu_len = 0;
3078 
3079 	skb_queue_head_init(&chan->tx_q);
3080 
3081 	chan->local_amp_id = AMP_ID_BREDR;
3082 	chan->move_id = AMP_ID_BREDR;
3083 	chan->move_state = L2CAP_MOVE_STABLE;
3084 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3085 
3086 	if (chan->mode != L2CAP_MODE_ERTM)
3087 		return 0;
3088 
3089 	chan->rx_state = L2CAP_RX_STATE_RECV;
3090 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3091 
3092 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3093 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3094 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3095 
3096 	skb_queue_head_init(&chan->srej_q);
3097 
3098 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3099 	if (err < 0)
3100 		return err;
3101 
3102 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3103 	if (err < 0)
3104 		l2cap_seq_list_free(&chan->srej_list);
3105 
3106 	return err;
3107 }
3108 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3109 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3110 {
3111 	switch (mode) {
3112 	case L2CAP_MODE_STREAMING:
3113 	case L2CAP_MODE_ERTM:
3114 		if (l2cap_mode_supported(mode, remote_feat_mask))
3115 			return mode;
3116 		/* fall through */
3117 	default:
3118 		return L2CAP_MODE_BASIC;
3119 	}
3120 }
3121 
__l2cap_ews_supported(struct l2cap_conn * conn)3122 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3123 {
3124 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3125 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3126 }
3127 
__l2cap_efs_supported(struct l2cap_conn * conn)3128 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3129 {
3130 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3131 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3132 }
3133 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3134 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3135 				      struct l2cap_conf_rfc *rfc)
3136 {
3137 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3138 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3139 
3140 		/* Class 1 devices have must have ERTM timeouts
3141 		 * exceeding the Link Supervision Timeout.  The
3142 		 * default Link Supervision Timeout for AMP
3143 		 * controllers is 10 seconds.
3144 		 *
3145 		 * Class 1 devices use 0xffffffff for their
3146 		 * best-effort flush timeout, so the clamping logic
3147 		 * will result in a timeout that meets the above
3148 		 * requirement.  ERTM timeouts are 16-bit values, so
3149 		 * the maximum timeout is 65.535 seconds.
3150 		 */
3151 
3152 		/* Convert timeout to milliseconds and round */
3153 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3154 
3155 		/* This is the recommended formula for class 2 devices
3156 		 * that start ERTM timers when packets are sent to the
3157 		 * controller.
3158 		 */
3159 		ertm_to = 3 * ertm_to + 500;
3160 
3161 		if (ertm_to > 0xffff)
3162 			ertm_to = 0xffff;
3163 
3164 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3165 		rfc->monitor_timeout = rfc->retrans_timeout;
3166 	} else {
3167 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3168 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3169 	}
3170 }
3171 
l2cap_txwin_setup(struct l2cap_chan * chan)3172 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3173 {
3174 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3175 	    __l2cap_ews_supported(chan->conn)) {
3176 		/* use extended control field */
3177 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3178 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3179 	} else {
3180 		chan->tx_win = min_t(u16, chan->tx_win,
3181 				     L2CAP_DEFAULT_TX_WINDOW);
3182 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3183 	}
3184 	chan->ack_win = chan->tx_win;
3185 }
3186 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3187 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3188 {
3189 	struct l2cap_conf_req *req = data;
3190 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3191 	void *ptr = req->data;
3192 	void *endptr = data + data_size;
3193 	u16 size;
3194 
3195 	BT_DBG("chan %p", chan);
3196 
3197 	if (chan->num_conf_req || chan->num_conf_rsp)
3198 		goto done;
3199 
3200 	switch (chan->mode) {
3201 	case L2CAP_MODE_STREAMING:
3202 	case L2CAP_MODE_ERTM:
3203 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3204 			break;
3205 
3206 		if (__l2cap_efs_supported(chan->conn))
3207 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3208 
3209 		/* fall through */
3210 	default:
3211 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3212 		break;
3213 	}
3214 
3215 done:
3216 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3217 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3218 
3219 	switch (chan->mode) {
3220 	case L2CAP_MODE_BASIC:
3221 		if (disable_ertm)
3222 			break;
3223 
3224 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3225 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3226 			break;
3227 
3228 		rfc.mode            = L2CAP_MODE_BASIC;
3229 		rfc.txwin_size      = 0;
3230 		rfc.max_transmit    = 0;
3231 		rfc.retrans_timeout = 0;
3232 		rfc.monitor_timeout = 0;
3233 		rfc.max_pdu_size    = 0;
3234 
3235 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3236 				   (unsigned long) &rfc, endptr - ptr);
3237 		break;
3238 
3239 	case L2CAP_MODE_ERTM:
3240 		rfc.mode            = L2CAP_MODE_ERTM;
3241 		rfc.max_transmit    = chan->max_tx;
3242 
3243 		__l2cap_set_ertm_timeouts(chan, &rfc);
3244 
3245 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3246 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 			     L2CAP_FCS_SIZE);
3248 		rfc.max_pdu_size = cpu_to_le16(size);
3249 
3250 		l2cap_txwin_setup(chan);
3251 
3252 		rfc.txwin_size = min_t(u16, chan->tx_win,
3253 				       L2CAP_DEFAULT_TX_WINDOW);
3254 
3255 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3256 				   (unsigned long) &rfc, endptr - ptr);
3257 
3258 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3259 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3260 
3261 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3262 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3263 					   chan->tx_win, endptr - ptr);
3264 
3265 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3266 			if (chan->fcs == L2CAP_FCS_NONE ||
3267 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3268 				chan->fcs = L2CAP_FCS_NONE;
3269 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 						   chan->fcs, endptr - ptr);
3271 			}
3272 		break;
3273 
3274 	case L2CAP_MODE_STREAMING:
3275 		l2cap_txwin_setup(chan);
3276 		rfc.mode            = L2CAP_MODE_STREAMING;
3277 		rfc.txwin_size      = 0;
3278 		rfc.max_transmit    = 0;
3279 		rfc.retrans_timeout = 0;
3280 		rfc.monitor_timeout = 0;
3281 
3282 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3283 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3284 			     L2CAP_FCS_SIZE);
3285 		rfc.max_pdu_size = cpu_to_le16(size);
3286 
3287 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3288 				   (unsigned long) &rfc, endptr - ptr);
3289 
3290 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3291 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3292 
3293 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3294 			if (chan->fcs == L2CAP_FCS_NONE ||
3295 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3296 				chan->fcs = L2CAP_FCS_NONE;
3297 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3298 						   chan->fcs, endptr - ptr);
3299 			}
3300 		break;
3301 	}
3302 
3303 	req->dcid  = cpu_to_le16(chan->dcid);
3304 	req->flags = cpu_to_le16(0);
3305 
3306 	return ptr - data;
3307 }
3308 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3309 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3310 {
3311 	struct l2cap_conf_rsp *rsp = data;
3312 	void *ptr = rsp->data;
3313 	void *endptr = data + data_size;
3314 	void *req = chan->conf_req;
3315 	int len = chan->conf_len;
3316 	int type, hint, olen;
3317 	unsigned long val;
3318 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3319 	struct l2cap_conf_efs efs;
3320 	u8 remote_efs = 0;
3321 	u16 mtu = L2CAP_DEFAULT_MTU;
3322 	u16 result = L2CAP_CONF_SUCCESS;
3323 	u16 size;
3324 
3325 	BT_DBG("chan %p", chan);
3326 
3327 	while (len >= L2CAP_CONF_OPT_SIZE) {
3328 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3329 
3330 		hint  = type & L2CAP_CONF_HINT;
3331 		type &= L2CAP_CONF_MASK;
3332 
3333 		switch (type) {
3334 		case L2CAP_CONF_MTU:
3335 			mtu = val;
3336 			break;
3337 
3338 		case L2CAP_CONF_FLUSH_TO:
3339 			chan->flush_to = val;
3340 			break;
3341 
3342 		case L2CAP_CONF_QOS:
3343 			break;
3344 
3345 		case L2CAP_CONF_RFC:
3346 			if (olen == sizeof(rfc))
3347 				memcpy(&rfc, (void *) val, olen);
3348 			break;
3349 
3350 		case L2CAP_CONF_FCS:
3351 			if (val == L2CAP_FCS_NONE)
3352 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3353 			break;
3354 
3355 		case L2CAP_CONF_EFS:
3356 			if (olen == sizeof(efs)) {
3357 				remote_efs = 1;
3358 				memcpy(&efs, (void *) val, olen);
3359 			}
3360 			break;
3361 
3362 		case L2CAP_CONF_EWS:
3363 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3364 				return -ECONNREFUSED;
3365 
3366 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3367 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3368 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3369 			chan->remote_tx_win = val;
3370 			break;
3371 
3372 		default:
3373 			if (hint)
3374 				break;
3375 
3376 			result = L2CAP_CONF_UNKNOWN;
3377 			*((u8 *) ptr++) = type;
3378 			break;
3379 		}
3380 	}
3381 
3382 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3383 		goto done;
3384 
3385 	switch (chan->mode) {
3386 	case L2CAP_MODE_STREAMING:
3387 	case L2CAP_MODE_ERTM:
3388 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3389 			chan->mode = l2cap_select_mode(rfc.mode,
3390 						       chan->conn->feat_mask);
3391 			break;
3392 		}
3393 
3394 		if (remote_efs) {
3395 			if (__l2cap_efs_supported(chan->conn))
3396 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3397 			else
3398 				return -ECONNREFUSED;
3399 		}
3400 
3401 		if (chan->mode != rfc.mode)
3402 			return -ECONNREFUSED;
3403 
3404 		break;
3405 	}
3406 
3407 done:
3408 	if (chan->mode != rfc.mode) {
3409 		result = L2CAP_CONF_UNACCEPT;
3410 		rfc.mode = chan->mode;
3411 
3412 		if (chan->num_conf_rsp == 1)
3413 			return -ECONNREFUSED;
3414 
3415 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3416 				   (unsigned long) &rfc, endptr - ptr);
3417 	}
3418 
3419 	if (result == L2CAP_CONF_SUCCESS) {
3420 		/* Configure output options and let the other side know
3421 		 * which ones we don't like. */
3422 
3423 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3424 			result = L2CAP_CONF_UNACCEPT;
3425 		else {
3426 			chan->omtu = mtu;
3427 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3428 		}
3429 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3430 
3431 		if (remote_efs) {
3432 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3433 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3434 			    efs.stype != chan->local_stype) {
3435 
3436 				result = L2CAP_CONF_UNACCEPT;
3437 
3438 				if (chan->num_conf_req >= 1)
3439 					return -ECONNREFUSED;
3440 
3441 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3442 						   sizeof(efs),
3443 						   (unsigned long) &efs, endptr - ptr);
3444 			} else {
3445 				/* Send PENDING Conf Rsp */
3446 				result = L2CAP_CONF_PENDING;
3447 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3448 			}
3449 		}
3450 
3451 		switch (rfc.mode) {
3452 		case L2CAP_MODE_BASIC:
3453 			chan->fcs = L2CAP_FCS_NONE;
3454 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3455 			break;
3456 
3457 		case L2CAP_MODE_ERTM:
3458 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3459 				chan->remote_tx_win = rfc.txwin_size;
3460 			else
3461 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3462 
3463 			chan->remote_max_tx = rfc.max_transmit;
3464 
3465 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3466 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3467 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3468 			rfc.max_pdu_size = cpu_to_le16(size);
3469 			chan->remote_mps = size;
3470 
3471 			__l2cap_set_ertm_timeouts(chan, &rfc);
3472 
3473 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3474 
3475 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3476 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3477 
3478 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3479 				chan->remote_id = efs.id;
3480 				chan->remote_stype = efs.stype;
3481 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3482 				chan->remote_flush_to =
3483 					le32_to_cpu(efs.flush_to);
3484 				chan->remote_acc_lat =
3485 					le32_to_cpu(efs.acc_lat);
3486 				chan->remote_sdu_itime =
3487 					le32_to_cpu(efs.sdu_itime);
3488 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3489 						   sizeof(efs),
3490 						   (unsigned long) &efs, endptr - ptr);
3491 			}
3492 			break;
3493 
3494 		case L2CAP_MODE_STREAMING:
3495 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3496 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3497 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3498 			rfc.max_pdu_size = cpu_to_le16(size);
3499 			chan->remote_mps = size;
3500 
3501 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3502 
3503 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3504 					   (unsigned long) &rfc, endptr - ptr);
3505 
3506 			break;
3507 
3508 		default:
3509 			result = L2CAP_CONF_UNACCEPT;
3510 
3511 			memset(&rfc, 0, sizeof(rfc));
3512 			rfc.mode = chan->mode;
3513 		}
3514 
3515 		if (result == L2CAP_CONF_SUCCESS)
3516 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3517 	}
3518 	rsp->scid   = cpu_to_le16(chan->dcid);
3519 	rsp->result = cpu_to_le16(result);
3520 	rsp->flags  = cpu_to_le16(0);
3521 
3522 	return ptr - data;
3523 }
3524 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3525 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3526 				void *data, size_t size, u16 *result)
3527 {
3528 	struct l2cap_conf_req *req = data;
3529 	void *ptr = req->data;
3530 	void *endptr = data + size;
3531 	int type, olen;
3532 	unsigned long val;
3533 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3534 	struct l2cap_conf_efs efs;
3535 
3536 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3537 
3538 	while (len >= L2CAP_CONF_OPT_SIZE) {
3539 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3540 
3541 		switch (type) {
3542 		case L2CAP_CONF_MTU:
3543 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3544 				*result = L2CAP_CONF_UNACCEPT;
3545 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3546 			} else
3547 				chan->imtu = val;
3548 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3549 			break;
3550 
3551 		case L2CAP_CONF_FLUSH_TO:
3552 			chan->flush_to = val;
3553 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3554 					   2, chan->flush_to, endptr - ptr);
3555 			break;
3556 
3557 		case L2CAP_CONF_RFC:
3558 			if (olen == sizeof(rfc))
3559 				memcpy(&rfc, (void *)val, olen);
3560 
3561 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3562 			    rfc.mode != chan->mode)
3563 				return -ECONNREFUSED;
3564 
3565 			chan->fcs = 0;
3566 
3567 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3568 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3569 			break;
3570 
3571 		case L2CAP_CONF_EWS:
3572 			chan->ack_win = min_t(u16, val, chan->ack_win);
3573 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3574 					   chan->tx_win, endptr - ptr);
3575 			break;
3576 
3577 		case L2CAP_CONF_EFS:
3578 			if (olen == sizeof(efs)) {
3579 				memcpy(&efs, (void *)val, olen);
3580 
3581 				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3582 				    efs.stype != L2CAP_SERV_NOTRAFIC &&
3583 				    efs.stype != chan->local_stype)
3584 					return -ECONNREFUSED;
3585 
3586 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3587 						   (unsigned long) &efs, endptr - ptr);
3588 			}
3589 			break;
3590 
3591 		case L2CAP_CONF_FCS:
3592 			if (*result == L2CAP_CONF_PENDING)
3593 				if (val == L2CAP_FCS_NONE)
3594 					set_bit(CONF_RECV_NO_FCS,
3595 						&chan->conf_state);
3596 			break;
3597 		}
3598 	}
3599 
3600 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3601 		return -ECONNREFUSED;
3602 
3603 	chan->mode = rfc.mode;
3604 
3605 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3606 		switch (rfc.mode) {
3607 		case L2CAP_MODE_ERTM:
3608 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3609 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3610 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3611 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3612 				chan->ack_win = min_t(u16, chan->ack_win,
3613 						      rfc.txwin_size);
3614 
3615 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3616 				chan->local_msdu = le16_to_cpu(efs.msdu);
3617 				chan->local_sdu_itime =
3618 					le32_to_cpu(efs.sdu_itime);
3619 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3620 				chan->local_flush_to =
3621 					le32_to_cpu(efs.flush_to);
3622 			}
3623 			break;
3624 
3625 		case L2CAP_MODE_STREAMING:
3626 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3627 		}
3628 	}
3629 
3630 	req->dcid   = cpu_to_le16(chan->dcid);
3631 	req->flags  = cpu_to_le16(0);
3632 
3633 	return ptr - data;
3634 }
3635 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3636 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3637 				u16 result, u16 flags)
3638 {
3639 	struct l2cap_conf_rsp *rsp = data;
3640 	void *ptr = rsp->data;
3641 
3642 	BT_DBG("chan %p", chan);
3643 
3644 	rsp->scid   = cpu_to_le16(chan->dcid);
3645 	rsp->result = cpu_to_le16(result);
3646 	rsp->flags  = cpu_to_le16(flags);
3647 
3648 	return ptr - data;
3649 }
3650 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3651 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3652 {
3653 	struct l2cap_le_conn_rsp rsp;
3654 	struct l2cap_conn *conn = chan->conn;
3655 
3656 	BT_DBG("chan %p", chan);
3657 
3658 	rsp.dcid    = cpu_to_le16(chan->scid);
3659 	rsp.mtu     = cpu_to_le16(chan->imtu);
3660 	rsp.mps     = cpu_to_le16(chan->mps);
3661 	rsp.credits = cpu_to_le16(chan->rx_credits);
3662 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3663 
3664 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3665 		       &rsp);
3666 }
3667 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3668 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3669 {
3670 	struct l2cap_conn_rsp rsp;
3671 	struct l2cap_conn *conn = chan->conn;
3672 	u8 buf[128];
3673 	u8 rsp_code;
3674 
3675 	rsp.scid   = cpu_to_le16(chan->dcid);
3676 	rsp.dcid   = cpu_to_le16(chan->scid);
3677 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3678 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3679 
3680 	if (chan->hs_hcon)
3681 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3682 	else
3683 		rsp_code = L2CAP_CONN_RSP;
3684 
3685 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3686 
3687 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3688 
3689 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3690 		return;
3691 
3692 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3693 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3694 	chan->num_conf_req++;
3695 }
3696 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3697 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3698 {
3699 	int type, olen;
3700 	unsigned long val;
3701 	/* Use sane default values in case a misbehaving remote device
3702 	 * did not send an RFC or extended window size option.
3703 	 */
3704 	u16 txwin_ext = chan->ack_win;
3705 	struct l2cap_conf_rfc rfc = {
3706 		.mode = chan->mode,
3707 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3708 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3709 		.max_pdu_size = cpu_to_le16(chan->imtu),
3710 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3711 	};
3712 
3713 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3714 
3715 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3716 		return;
3717 
3718 	while (len >= L2CAP_CONF_OPT_SIZE) {
3719 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3720 
3721 		switch (type) {
3722 		case L2CAP_CONF_RFC:
3723 			if (olen == sizeof(rfc))
3724 				memcpy(&rfc, (void *)val, olen);
3725 			break;
3726 		case L2CAP_CONF_EWS:
3727 			txwin_ext = val;
3728 			break;
3729 		}
3730 	}
3731 
3732 	switch (rfc.mode) {
3733 	case L2CAP_MODE_ERTM:
3734 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3735 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3736 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3737 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3738 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3739 		else
3740 			chan->ack_win = min_t(u16, chan->ack_win,
3741 					      rfc.txwin_size);
3742 		break;
3743 	case L2CAP_MODE_STREAMING:
3744 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3745 	}
3746 }
3747 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3748 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3749 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3750 				    u8 *data)
3751 {
3752 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3753 
3754 	if (cmd_len < sizeof(*rej))
3755 		return -EPROTO;
3756 
3757 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3758 		return 0;
3759 
3760 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3761 	    cmd->ident == conn->info_ident) {
3762 		cancel_delayed_work(&conn->info_timer);
3763 
3764 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3765 		conn->info_ident = 0;
3766 
3767 		l2cap_conn_start(conn);
3768 	}
3769 
3770 	return 0;
3771 }
3772 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3773 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3774 					struct l2cap_cmd_hdr *cmd,
3775 					u8 *data, u8 rsp_code, u8 amp_id)
3776 {
3777 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3778 	struct l2cap_conn_rsp rsp;
3779 	struct l2cap_chan *chan = NULL, *pchan;
3780 	int result, status = L2CAP_CS_NO_INFO;
3781 
3782 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3783 	__le16 psm = req->psm;
3784 
3785 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3786 
3787 	/* Check if we have socket listening on psm */
3788 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3789 					 &conn->hcon->dst, ACL_LINK);
3790 	if (!pchan) {
3791 		result = L2CAP_CR_BAD_PSM;
3792 		goto sendresp;
3793 	}
3794 
3795 	mutex_lock(&conn->chan_lock);
3796 	l2cap_chan_lock(pchan);
3797 
3798 	/* Check if the ACL is secure enough (if not SDP) */
3799 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3800 	    !hci_conn_check_link_mode(conn->hcon)) {
3801 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3802 		result = L2CAP_CR_SEC_BLOCK;
3803 		goto response;
3804 	}
3805 
3806 	result = L2CAP_CR_NO_MEM;
3807 
3808 	/* Check if we already have channel with that dcid */
3809 	if (__l2cap_get_chan_by_dcid(conn, scid))
3810 		goto response;
3811 
3812 	chan = pchan->ops->new_connection(pchan);
3813 	if (!chan)
3814 		goto response;
3815 
3816 	/* For certain devices (ex: HID mouse), support for authentication,
3817 	 * pairing and bonding is optional. For such devices, inorder to avoid
3818 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3819 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3820 	 */
3821 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3822 
3823 	bacpy(&chan->src, &conn->hcon->src);
3824 	bacpy(&chan->dst, &conn->hcon->dst);
3825 	chan->src_type = bdaddr_src_type(conn->hcon);
3826 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3827 	chan->psm  = psm;
3828 	chan->dcid = scid;
3829 	chan->local_amp_id = amp_id;
3830 
3831 	__l2cap_chan_add(conn, chan);
3832 
3833 	dcid = chan->scid;
3834 
3835 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3836 
3837 	chan->ident = cmd->ident;
3838 
3839 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3840 		if (l2cap_chan_check_security(chan, false)) {
3841 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3842 				l2cap_state_change(chan, BT_CONNECT2);
3843 				result = L2CAP_CR_PEND;
3844 				status = L2CAP_CS_AUTHOR_PEND;
3845 				chan->ops->defer(chan);
3846 			} else {
3847 				/* Force pending result for AMP controllers.
3848 				 * The connection will succeed after the
3849 				 * physical link is up.
3850 				 */
3851 				if (amp_id == AMP_ID_BREDR) {
3852 					l2cap_state_change(chan, BT_CONFIG);
3853 					result = L2CAP_CR_SUCCESS;
3854 				} else {
3855 					l2cap_state_change(chan, BT_CONNECT2);
3856 					result = L2CAP_CR_PEND;
3857 				}
3858 				status = L2CAP_CS_NO_INFO;
3859 			}
3860 		} else {
3861 			l2cap_state_change(chan, BT_CONNECT2);
3862 			result = L2CAP_CR_PEND;
3863 			status = L2CAP_CS_AUTHEN_PEND;
3864 		}
3865 	} else {
3866 		l2cap_state_change(chan, BT_CONNECT2);
3867 		result = L2CAP_CR_PEND;
3868 		status = L2CAP_CS_NO_INFO;
3869 	}
3870 
3871 response:
3872 	l2cap_chan_unlock(pchan);
3873 	mutex_unlock(&conn->chan_lock);
3874 	l2cap_chan_put(pchan);
3875 
3876 sendresp:
3877 	rsp.scid   = cpu_to_le16(scid);
3878 	rsp.dcid   = cpu_to_le16(dcid);
3879 	rsp.result = cpu_to_le16(result);
3880 	rsp.status = cpu_to_le16(status);
3881 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3882 
3883 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3884 		struct l2cap_info_req info;
3885 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3886 
3887 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3888 		conn->info_ident = l2cap_get_ident(conn);
3889 
3890 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3891 
3892 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3893 			       sizeof(info), &info);
3894 	}
3895 
3896 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3897 	    result == L2CAP_CR_SUCCESS) {
3898 		u8 buf[128];
3899 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3900 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3901 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3902 		chan->num_conf_req++;
3903 	}
3904 
3905 	return chan;
3906 }
3907 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3908 static int l2cap_connect_req(struct l2cap_conn *conn,
3909 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3910 {
3911 	struct hci_dev *hdev = conn->hcon->hdev;
3912 	struct hci_conn *hcon = conn->hcon;
3913 
3914 	if (cmd_len < sizeof(struct l2cap_conn_req))
3915 		return -EPROTO;
3916 
3917 	hci_dev_lock(hdev);
3918 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3919 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3920 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3921 	hci_dev_unlock(hdev);
3922 
3923 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3924 	return 0;
3925 }
3926 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3927 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3928 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3929 				    u8 *data)
3930 {
3931 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3932 	u16 scid, dcid, result, status;
3933 	struct l2cap_chan *chan;
3934 	u8 req[128];
3935 	int err;
3936 
3937 	if (cmd_len < sizeof(*rsp))
3938 		return -EPROTO;
3939 
3940 	scid   = __le16_to_cpu(rsp->scid);
3941 	dcid   = __le16_to_cpu(rsp->dcid);
3942 	result = __le16_to_cpu(rsp->result);
3943 	status = __le16_to_cpu(rsp->status);
3944 
3945 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3946 	       dcid, scid, result, status);
3947 
3948 	mutex_lock(&conn->chan_lock);
3949 
3950 	if (scid) {
3951 		chan = __l2cap_get_chan_by_scid(conn, scid);
3952 		if (!chan) {
3953 			err = -EBADSLT;
3954 			goto unlock;
3955 		}
3956 	} else {
3957 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3958 		if (!chan) {
3959 			err = -EBADSLT;
3960 			goto unlock;
3961 		}
3962 	}
3963 
3964 	err = 0;
3965 
3966 	l2cap_chan_lock(chan);
3967 
3968 	switch (result) {
3969 	case L2CAP_CR_SUCCESS:
3970 		l2cap_state_change(chan, BT_CONFIG);
3971 		chan->ident = 0;
3972 		chan->dcid = dcid;
3973 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3974 
3975 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3976 			break;
3977 
3978 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3979 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
3980 		chan->num_conf_req++;
3981 		break;
3982 
3983 	case L2CAP_CR_PEND:
3984 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3985 		break;
3986 
3987 	default:
3988 		l2cap_chan_del(chan, ECONNREFUSED);
3989 		break;
3990 	}
3991 
3992 	l2cap_chan_unlock(chan);
3993 
3994 unlock:
3995 	mutex_unlock(&conn->chan_lock);
3996 
3997 	return err;
3998 }
3999 
set_default_fcs(struct l2cap_chan * chan)4000 static inline void set_default_fcs(struct l2cap_chan *chan)
4001 {
4002 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4003 	 * sides request it.
4004 	 */
4005 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4006 		chan->fcs = L2CAP_FCS_NONE;
4007 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4008 		chan->fcs = L2CAP_FCS_CRC16;
4009 }
4010 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4011 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4012 				    u8 ident, u16 flags)
4013 {
4014 	struct l2cap_conn *conn = chan->conn;
4015 
4016 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4017 	       flags);
4018 
4019 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4020 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4021 
4022 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4023 		       l2cap_build_conf_rsp(chan, data,
4024 					    L2CAP_CONF_SUCCESS, flags), data);
4025 }
4026 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4027 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4028 				   u16 scid, u16 dcid)
4029 {
4030 	struct l2cap_cmd_rej_cid rej;
4031 
4032 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4033 	rej.scid = __cpu_to_le16(scid);
4034 	rej.dcid = __cpu_to_le16(dcid);
4035 
4036 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4037 }
4038 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4039 static inline int l2cap_config_req(struct l2cap_conn *conn,
4040 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4041 				   u8 *data)
4042 {
4043 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4044 	u16 dcid, flags;
4045 	u8 rsp[64];
4046 	struct l2cap_chan *chan;
4047 	int len, err = 0;
4048 
4049 	if (cmd_len < sizeof(*req))
4050 		return -EPROTO;
4051 
4052 	dcid  = __le16_to_cpu(req->dcid);
4053 	flags = __le16_to_cpu(req->flags);
4054 
4055 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4056 
4057 	chan = l2cap_get_chan_by_scid(conn, dcid);
4058 	if (!chan) {
4059 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4060 		return 0;
4061 	}
4062 
4063 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4064 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4065 				       chan->dcid);
4066 		goto unlock;
4067 	}
4068 
4069 	/* Reject if config buffer is too small. */
4070 	len = cmd_len - sizeof(*req);
4071 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4072 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4073 			       l2cap_build_conf_rsp(chan, rsp,
4074 			       L2CAP_CONF_REJECT, flags), rsp);
4075 		goto unlock;
4076 	}
4077 
4078 	/* Store config. */
4079 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4080 	chan->conf_len += len;
4081 
4082 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4083 		/* Incomplete config. Send empty response. */
4084 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4085 			       l2cap_build_conf_rsp(chan, rsp,
4086 			       L2CAP_CONF_SUCCESS, flags), rsp);
4087 		goto unlock;
4088 	}
4089 
4090 	/* Complete config. */
4091 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4092 	if (len < 0) {
4093 		l2cap_send_disconn_req(chan, ECONNRESET);
4094 		goto unlock;
4095 	}
4096 
4097 	chan->ident = cmd->ident;
4098 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4099 	chan->num_conf_rsp++;
4100 
4101 	/* Reset config buffer. */
4102 	chan->conf_len = 0;
4103 
4104 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4105 		goto unlock;
4106 
4107 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4108 		set_default_fcs(chan);
4109 
4110 		if (chan->mode == L2CAP_MODE_ERTM ||
4111 		    chan->mode == L2CAP_MODE_STREAMING)
4112 			err = l2cap_ertm_init(chan);
4113 
4114 		if (err < 0)
4115 			l2cap_send_disconn_req(chan, -err);
4116 		else
4117 			l2cap_chan_ready(chan);
4118 
4119 		goto unlock;
4120 	}
4121 
4122 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4123 		u8 buf[64];
4124 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4125 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4126 		chan->num_conf_req++;
4127 	}
4128 
4129 	/* Got Conf Rsp PENDING from remote side and assume we sent
4130 	   Conf Rsp PENDING in the code above */
4131 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4132 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4133 
4134 		/* check compatibility */
4135 
4136 		/* Send rsp for BR/EDR channel */
4137 		if (!chan->hs_hcon)
4138 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4139 		else
4140 			chan->ident = cmd->ident;
4141 	}
4142 
4143 unlock:
4144 	l2cap_chan_unlock(chan);
4145 	return err;
4146 }
4147 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4148 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4149 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4150 				   u8 *data)
4151 {
4152 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4153 	u16 scid, flags, result;
4154 	struct l2cap_chan *chan;
4155 	int len = cmd_len - sizeof(*rsp);
4156 	int err = 0;
4157 
4158 	if (cmd_len < sizeof(*rsp))
4159 		return -EPROTO;
4160 
4161 	scid   = __le16_to_cpu(rsp->scid);
4162 	flags  = __le16_to_cpu(rsp->flags);
4163 	result = __le16_to_cpu(rsp->result);
4164 
4165 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4166 	       result, len);
4167 
4168 	chan = l2cap_get_chan_by_scid(conn, scid);
4169 	if (!chan)
4170 		return 0;
4171 
4172 	switch (result) {
4173 	case L2CAP_CONF_SUCCESS:
4174 		l2cap_conf_rfc_get(chan, rsp->data, len);
4175 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4176 		break;
4177 
4178 	case L2CAP_CONF_PENDING:
4179 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4180 
4181 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4182 			char buf[64];
4183 
4184 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4185 						   buf, sizeof(buf), &result);
4186 			if (len < 0) {
4187 				l2cap_send_disconn_req(chan, ECONNRESET);
4188 				goto done;
4189 			}
4190 
4191 			if (!chan->hs_hcon) {
4192 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4193 							0);
4194 			} else {
4195 				if (l2cap_check_efs(chan)) {
4196 					amp_create_logical_link(chan);
4197 					chan->ident = cmd->ident;
4198 				}
4199 			}
4200 		}
4201 		goto done;
4202 
4203 	case L2CAP_CONF_UNACCEPT:
4204 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4205 			char req[64];
4206 
4207 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4208 				l2cap_send_disconn_req(chan, ECONNRESET);
4209 				goto done;
4210 			}
4211 
4212 			/* throw out any old stored conf requests */
4213 			result = L2CAP_CONF_SUCCESS;
4214 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4215 						   req, sizeof(req), &result);
4216 			if (len < 0) {
4217 				l2cap_send_disconn_req(chan, ECONNRESET);
4218 				goto done;
4219 			}
4220 
4221 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4222 				       L2CAP_CONF_REQ, len, req);
4223 			chan->num_conf_req++;
4224 			if (result != L2CAP_CONF_SUCCESS)
4225 				goto done;
4226 			break;
4227 		}
4228 
4229 	default:
4230 		l2cap_chan_set_err(chan, ECONNRESET);
4231 
4232 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4233 		l2cap_send_disconn_req(chan, ECONNRESET);
4234 		goto done;
4235 	}
4236 
4237 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4238 		goto done;
4239 
4240 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4241 
4242 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4243 		set_default_fcs(chan);
4244 
4245 		if (chan->mode == L2CAP_MODE_ERTM ||
4246 		    chan->mode == L2CAP_MODE_STREAMING)
4247 			err = l2cap_ertm_init(chan);
4248 
4249 		if (err < 0)
4250 			l2cap_send_disconn_req(chan, -err);
4251 		else
4252 			l2cap_chan_ready(chan);
4253 	}
4254 
4255 done:
4256 	l2cap_chan_unlock(chan);
4257 	return err;
4258 }
4259 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4260 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4261 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4262 				       u8 *data)
4263 {
4264 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4265 	struct l2cap_disconn_rsp rsp;
4266 	u16 dcid, scid;
4267 	struct l2cap_chan *chan;
4268 
4269 	if (cmd_len != sizeof(*req))
4270 		return -EPROTO;
4271 
4272 	scid = __le16_to_cpu(req->scid);
4273 	dcid = __le16_to_cpu(req->dcid);
4274 
4275 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4276 
4277 	mutex_lock(&conn->chan_lock);
4278 
4279 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4280 	if (!chan) {
4281 		mutex_unlock(&conn->chan_lock);
4282 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4283 		return 0;
4284 	}
4285 
4286 	l2cap_chan_lock(chan);
4287 
4288 	rsp.dcid = cpu_to_le16(chan->scid);
4289 	rsp.scid = cpu_to_le16(chan->dcid);
4290 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4291 
4292 	chan->ops->set_shutdown(chan);
4293 
4294 	l2cap_chan_hold(chan);
4295 	l2cap_chan_del(chan, ECONNRESET);
4296 
4297 	l2cap_chan_unlock(chan);
4298 
4299 	chan->ops->close(chan);
4300 	l2cap_chan_put(chan);
4301 
4302 	mutex_unlock(&conn->chan_lock);
4303 
4304 	return 0;
4305 }
4306 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4307 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4308 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4309 				       u8 *data)
4310 {
4311 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4312 	u16 dcid, scid;
4313 	struct l2cap_chan *chan;
4314 
4315 	if (cmd_len != sizeof(*rsp))
4316 		return -EPROTO;
4317 
4318 	scid = __le16_to_cpu(rsp->scid);
4319 	dcid = __le16_to_cpu(rsp->dcid);
4320 
4321 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4322 
4323 	mutex_lock(&conn->chan_lock);
4324 
4325 	chan = __l2cap_get_chan_by_scid(conn, scid);
4326 	if (!chan) {
4327 		mutex_unlock(&conn->chan_lock);
4328 		return 0;
4329 	}
4330 
4331 	l2cap_chan_lock(chan);
4332 
4333 	l2cap_chan_hold(chan);
4334 	l2cap_chan_del(chan, 0);
4335 
4336 	l2cap_chan_unlock(chan);
4337 
4338 	chan->ops->close(chan);
4339 	l2cap_chan_put(chan);
4340 
4341 	mutex_unlock(&conn->chan_lock);
4342 
4343 	return 0;
4344 }
4345 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4346 static inline int l2cap_information_req(struct l2cap_conn *conn,
4347 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4348 					u8 *data)
4349 {
4350 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4351 	u16 type;
4352 
4353 	if (cmd_len != sizeof(*req))
4354 		return -EPROTO;
4355 
4356 	type = __le16_to_cpu(req->type);
4357 
4358 	BT_DBG("type 0x%4.4x", type);
4359 
4360 	if (type == L2CAP_IT_FEAT_MASK) {
4361 		u8 buf[8];
4362 		u32 feat_mask = l2cap_feat_mask;
4363 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4364 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4365 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4366 		if (!disable_ertm)
4367 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4368 				| L2CAP_FEAT_FCS;
4369 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4370 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4371 				| L2CAP_FEAT_EXT_WINDOW;
4372 
4373 		put_unaligned_le32(feat_mask, rsp->data);
4374 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4375 			       buf);
4376 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4377 		u8 buf[12];
4378 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4379 
4380 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4381 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4382 		rsp->data[0] = conn->local_fixed_chan;
4383 		memset(rsp->data + 1, 0, 7);
4384 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4385 			       buf);
4386 	} else {
4387 		struct l2cap_info_rsp rsp;
4388 		rsp.type   = cpu_to_le16(type);
4389 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4390 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4391 			       &rsp);
4392 	}
4393 
4394 	return 0;
4395 }
4396 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4397 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4398 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4399 					u8 *data)
4400 {
4401 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4402 	u16 type, result;
4403 
4404 	if (cmd_len < sizeof(*rsp))
4405 		return -EPROTO;
4406 
4407 	type   = __le16_to_cpu(rsp->type);
4408 	result = __le16_to_cpu(rsp->result);
4409 
4410 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4411 
4412 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4413 	if (cmd->ident != conn->info_ident ||
4414 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4415 		return 0;
4416 
4417 	cancel_delayed_work(&conn->info_timer);
4418 
4419 	if (result != L2CAP_IR_SUCCESS) {
4420 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4421 		conn->info_ident = 0;
4422 
4423 		l2cap_conn_start(conn);
4424 
4425 		return 0;
4426 	}
4427 
4428 	switch (type) {
4429 	case L2CAP_IT_FEAT_MASK:
4430 		conn->feat_mask = get_unaligned_le32(rsp->data);
4431 
4432 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4433 			struct l2cap_info_req req;
4434 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4435 
4436 			conn->info_ident = l2cap_get_ident(conn);
4437 
4438 			l2cap_send_cmd(conn, conn->info_ident,
4439 				       L2CAP_INFO_REQ, sizeof(req), &req);
4440 		} else {
4441 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4442 			conn->info_ident = 0;
4443 
4444 			l2cap_conn_start(conn);
4445 		}
4446 		break;
4447 
4448 	case L2CAP_IT_FIXED_CHAN:
4449 		conn->remote_fixed_chan = rsp->data[0];
4450 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4451 		conn->info_ident = 0;
4452 
4453 		l2cap_conn_start(conn);
4454 		break;
4455 	}
4456 
4457 	return 0;
4458 }
4459 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4460 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4461 				    struct l2cap_cmd_hdr *cmd,
4462 				    u16 cmd_len, void *data)
4463 {
4464 	struct l2cap_create_chan_req *req = data;
4465 	struct l2cap_create_chan_rsp rsp;
4466 	struct l2cap_chan *chan;
4467 	struct hci_dev *hdev;
4468 	u16 psm, scid;
4469 
4470 	if (cmd_len != sizeof(*req))
4471 		return -EPROTO;
4472 
4473 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4474 		return -EINVAL;
4475 
4476 	psm = le16_to_cpu(req->psm);
4477 	scid = le16_to_cpu(req->scid);
4478 
4479 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4480 
4481 	/* For controller id 0 make BR/EDR connection */
4482 	if (req->amp_id == AMP_ID_BREDR) {
4483 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4484 			      req->amp_id);
4485 		return 0;
4486 	}
4487 
4488 	/* Validate AMP controller id */
4489 	hdev = hci_dev_get(req->amp_id);
4490 	if (!hdev)
4491 		goto error;
4492 
4493 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4494 		hci_dev_put(hdev);
4495 		goto error;
4496 	}
4497 
4498 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4499 			     req->amp_id);
4500 	if (chan) {
4501 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4502 		struct hci_conn *hs_hcon;
4503 
4504 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4505 						  &conn->hcon->dst);
4506 		if (!hs_hcon) {
4507 			hci_dev_put(hdev);
4508 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4509 					       chan->dcid);
4510 			return 0;
4511 		}
4512 
4513 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4514 
4515 		mgr->bredr_chan = chan;
4516 		chan->hs_hcon = hs_hcon;
4517 		chan->fcs = L2CAP_FCS_NONE;
4518 		conn->mtu = hdev->block_mtu;
4519 	}
4520 
4521 	hci_dev_put(hdev);
4522 
4523 	return 0;
4524 
4525 error:
4526 	rsp.dcid = 0;
4527 	rsp.scid = cpu_to_le16(scid);
4528 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4529 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4530 
4531 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4532 		       sizeof(rsp), &rsp);
4533 
4534 	return 0;
4535 }
4536 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4537 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4538 {
4539 	struct l2cap_move_chan_req req;
4540 	u8 ident;
4541 
4542 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4543 
4544 	ident = l2cap_get_ident(chan->conn);
4545 	chan->ident = ident;
4546 
4547 	req.icid = cpu_to_le16(chan->scid);
4548 	req.dest_amp_id = dest_amp_id;
4549 
4550 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4551 		       &req);
4552 
4553 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4554 }
4555 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4556 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4557 {
4558 	struct l2cap_move_chan_rsp rsp;
4559 
4560 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4561 
4562 	rsp.icid = cpu_to_le16(chan->dcid);
4563 	rsp.result = cpu_to_le16(result);
4564 
4565 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4566 		       sizeof(rsp), &rsp);
4567 }
4568 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4569 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4570 {
4571 	struct l2cap_move_chan_cfm cfm;
4572 
4573 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4574 
4575 	chan->ident = l2cap_get_ident(chan->conn);
4576 
4577 	cfm.icid = cpu_to_le16(chan->scid);
4578 	cfm.result = cpu_to_le16(result);
4579 
4580 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4581 		       sizeof(cfm), &cfm);
4582 
4583 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4584 }
4585 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4586 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4587 {
4588 	struct l2cap_move_chan_cfm cfm;
4589 
4590 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4591 
4592 	cfm.icid = cpu_to_le16(icid);
4593 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4594 
4595 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4596 		       sizeof(cfm), &cfm);
4597 }
4598 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4599 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4600 					 u16 icid)
4601 {
4602 	struct l2cap_move_chan_cfm_rsp rsp;
4603 
4604 	BT_DBG("icid 0x%4.4x", icid);
4605 
4606 	rsp.icid = cpu_to_le16(icid);
4607 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4608 }
4609 
__release_logical_link(struct l2cap_chan * chan)4610 static void __release_logical_link(struct l2cap_chan *chan)
4611 {
4612 	chan->hs_hchan = NULL;
4613 	chan->hs_hcon = NULL;
4614 
4615 	/* Placeholder - release the logical link */
4616 }
4617 
l2cap_logical_fail(struct l2cap_chan * chan)4618 static void l2cap_logical_fail(struct l2cap_chan *chan)
4619 {
4620 	/* Logical link setup failed */
4621 	if (chan->state != BT_CONNECTED) {
4622 		/* Create channel failure, disconnect */
4623 		l2cap_send_disconn_req(chan, ECONNRESET);
4624 		return;
4625 	}
4626 
4627 	switch (chan->move_role) {
4628 	case L2CAP_MOVE_ROLE_RESPONDER:
4629 		l2cap_move_done(chan);
4630 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4631 		break;
4632 	case L2CAP_MOVE_ROLE_INITIATOR:
4633 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4634 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4635 			/* Remote has only sent pending or
4636 			 * success responses, clean up
4637 			 */
4638 			l2cap_move_done(chan);
4639 		}
4640 
4641 		/* Other amp move states imply that the move
4642 		 * has already aborted
4643 		 */
4644 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4645 		break;
4646 	}
4647 }
4648 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4649 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4650 					struct hci_chan *hchan)
4651 {
4652 	struct l2cap_conf_rsp rsp;
4653 
4654 	chan->hs_hchan = hchan;
4655 	chan->hs_hcon->l2cap_data = chan->conn;
4656 
4657 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4658 
4659 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4660 		int err;
4661 
4662 		set_default_fcs(chan);
4663 
4664 		err = l2cap_ertm_init(chan);
4665 		if (err < 0)
4666 			l2cap_send_disconn_req(chan, -err);
4667 		else
4668 			l2cap_chan_ready(chan);
4669 	}
4670 }
4671 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4672 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4673 				      struct hci_chan *hchan)
4674 {
4675 	chan->hs_hcon = hchan->conn;
4676 	chan->hs_hcon->l2cap_data = chan->conn;
4677 
4678 	BT_DBG("move_state %d", chan->move_state);
4679 
4680 	switch (chan->move_state) {
4681 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4682 		/* Move confirm will be sent after a success
4683 		 * response is received
4684 		 */
4685 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4686 		break;
4687 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4688 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4689 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4690 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4691 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4692 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4693 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4694 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4695 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4696 		}
4697 		break;
4698 	default:
4699 		/* Move was not in expected state, free the channel */
4700 		__release_logical_link(chan);
4701 
4702 		chan->move_state = L2CAP_MOVE_STABLE;
4703 	}
4704 }
4705 
4706 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)4707 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4708 		       u8 status)
4709 {
4710 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4711 
4712 	if (status) {
4713 		l2cap_logical_fail(chan);
4714 		__release_logical_link(chan);
4715 		return;
4716 	}
4717 
4718 	if (chan->state != BT_CONNECTED) {
4719 		/* Ignore logical link if channel is on BR/EDR */
4720 		if (chan->local_amp_id != AMP_ID_BREDR)
4721 			l2cap_logical_finish_create(chan, hchan);
4722 	} else {
4723 		l2cap_logical_finish_move(chan, hchan);
4724 	}
4725 }
4726 
l2cap_move_start(struct l2cap_chan * chan)4727 void l2cap_move_start(struct l2cap_chan *chan)
4728 {
4729 	BT_DBG("chan %p", chan);
4730 
4731 	if (chan->local_amp_id == AMP_ID_BREDR) {
4732 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4733 			return;
4734 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4735 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4736 		/* Placeholder - start physical link setup */
4737 	} else {
4738 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4739 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4740 		chan->move_id = 0;
4741 		l2cap_move_setup(chan);
4742 		l2cap_send_move_chan_req(chan, 0);
4743 	}
4744 }
4745 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)4746 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4747 			    u8 local_amp_id, u8 remote_amp_id)
4748 {
4749 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4750 	       local_amp_id, remote_amp_id);
4751 
4752 	chan->fcs = L2CAP_FCS_NONE;
4753 
4754 	/* Outgoing channel on AMP */
4755 	if (chan->state == BT_CONNECT) {
4756 		if (result == L2CAP_CR_SUCCESS) {
4757 			chan->local_amp_id = local_amp_id;
4758 			l2cap_send_create_chan_req(chan, remote_amp_id);
4759 		} else {
4760 			/* Revert to BR/EDR connect */
4761 			l2cap_send_conn_req(chan);
4762 		}
4763 
4764 		return;
4765 	}
4766 
4767 	/* Incoming channel on AMP */
4768 	if (__l2cap_no_conn_pending(chan)) {
4769 		struct l2cap_conn_rsp rsp;
4770 		char buf[128];
4771 		rsp.scid = cpu_to_le16(chan->dcid);
4772 		rsp.dcid = cpu_to_le16(chan->scid);
4773 
4774 		if (result == L2CAP_CR_SUCCESS) {
4775 			/* Send successful response */
4776 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4777 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4778 		} else {
4779 			/* Send negative response */
4780 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4781 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4782 		}
4783 
4784 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4785 			       sizeof(rsp), &rsp);
4786 
4787 		if (result == L2CAP_CR_SUCCESS) {
4788 			l2cap_state_change(chan, BT_CONFIG);
4789 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4790 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4791 				       L2CAP_CONF_REQ,
4792 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4793 			chan->num_conf_req++;
4794 		}
4795 	}
4796 }
4797 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)4798 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4799 				   u8 remote_amp_id)
4800 {
4801 	l2cap_move_setup(chan);
4802 	chan->move_id = local_amp_id;
4803 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4804 
4805 	l2cap_send_move_chan_req(chan, remote_amp_id);
4806 }
4807 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)4808 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4809 {
4810 	struct hci_chan *hchan = NULL;
4811 
4812 	/* Placeholder - get hci_chan for logical link */
4813 
4814 	if (hchan) {
4815 		if (hchan->state == BT_CONNECTED) {
4816 			/* Logical link is ready to go */
4817 			chan->hs_hcon = hchan->conn;
4818 			chan->hs_hcon->l2cap_data = chan->conn;
4819 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4820 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4821 
4822 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4823 		} else {
4824 			/* Wait for logical link to be ready */
4825 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4826 		}
4827 	} else {
4828 		/* Logical link not available */
4829 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4830 	}
4831 }
4832 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)4833 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4834 {
4835 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4836 		u8 rsp_result;
4837 		if (result == -EINVAL)
4838 			rsp_result = L2CAP_MR_BAD_ID;
4839 		else
4840 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4841 
4842 		l2cap_send_move_chan_rsp(chan, rsp_result);
4843 	}
4844 
4845 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4846 	chan->move_state = L2CAP_MOVE_STABLE;
4847 
4848 	/* Restart data transmission */
4849 	l2cap_ertm_send(chan);
4850 }
4851 
4852 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)4853 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4854 {
4855 	u8 local_amp_id = chan->local_amp_id;
4856 	u8 remote_amp_id = chan->remote_amp_id;
4857 
4858 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4859 	       chan, result, local_amp_id, remote_amp_id);
4860 
4861 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4862 		l2cap_chan_unlock(chan);
4863 		return;
4864 	}
4865 
4866 	if (chan->state != BT_CONNECTED) {
4867 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4868 	} else if (result != L2CAP_MR_SUCCESS) {
4869 		l2cap_do_move_cancel(chan, result);
4870 	} else {
4871 		switch (chan->move_role) {
4872 		case L2CAP_MOVE_ROLE_INITIATOR:
4873 			l2cap_do_move_initiate(chan, local_amp_id,
4874 					       remote_amp_id);
4875 			break;
4876 		case L2CAP_MOVE_ROLE_RESPONDER:
4877 			l2cap_do_move_respond(chan, result);
4878 			break;
4879 		default:
4880 			l2cap_do_move_cancel(chan, result);
4881 			break;
4882 		}
4883 	}
4884 }
4885 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4886 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4887 					 struct l2cap_cmd_hdr *cmd,
4888 					 u16 cmd_len, void *data)
4889 {
4890 	struct l2cap_move_chan_req *req = data;
4891 	struct l2cap_move_chan_rsp rsp;
4892 	struct l2cap_chan *chan;
4893 	u16 icid = 0;
4894 	u16 result = L2CAP_MR_NOT_ALLOWED;
4895 
4896 	if (cmd_len != sizeof(*req))
4897 		return -EPROTO;
4898 
4899 	icid = le16_to_cpu(req->icid);
4900 
4901 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4902 
4903 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4904 		return -EINVAL;
4905 
4906 	chan = l2cap_get_chan_by_dcid(conn, icid);
4907 	if (!chan) {
4908 		rsp.icid = cpu_to_le16(icid);
4909 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4910 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4911 			       sizeof(rsp), &rsp);
4912 		return 0;
4913 	}
4914 
4915 	chan->ident = cmd->ident;
4916 
4917 	if (chan->scid < L2CAP_CID_DYN_START ||
4918 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4919 	    (chan->mode != L2CAP_MODE_ERTM &&
4920 	     chan->mode != L2CAP_MODE_STREAMING)) {
4921 		result = L2CAP_MR_NOT_ALLOWED;
4922 		goto send_move_response;
4923 	}
4924 
4925 	if (chan->local_amp_id == req->dest_amp_id) {
4926 		result = L2CAP_MR_SAME_ID;
4927 		goto send_move_response;
4928 	}
4929 
4930 	if (req->dest_amp_id != AMP_ID_BREDR) {
4931 		struct hci_dev *hdev;
4932 		hdev = hci_dev_get(req->dest_amp_id);
4933 		if (!hdev || hdev->dev_type != HCI_AMP ||
4934 		    !test_bit(HCI_UP, &hdev->flags)) {
4935 			if (hdev)
4936 				hci_dev_put(hdev);
4937 
4938 			result = L2CAP_MR_BAD_ID;
4939 			goto send_move_response;
4940 		}
4941 		hci_dev_put(hdev);
4942 	}
4943 
4944 	/* Detect a move collision.  Only send a collision response
4945 	 * if this side has "lost", otherwise proceed with the move.
4946 	 * The winner has the larger bd_addr.
4947 	 */
4948 	if ((__chan_is_moving(chan) ||
4949 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4950 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4951 		result = L2CAP_MR_COLLISION;
4952 		goto send_move_response;
4953 	}
4954 
4955 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4956 	l2cap_move_setup(chan);
4957 	chan->move_id = req->dest_amp_id;
4958 	icid = chan->dcid;
4959 
4960 	if (req->dest_amp_id == AMP_ID_BREDR) {
4961 		/* Moving to BR/EDR */
4962 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4963 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4964 			result = L2CAP_MR_PEND;
4965 		} else {
4966 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4967 			result = L2CAP_MR_SUCCESS;
4968 		}
4969 	} else {
4970 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4971 		/* Placeholder - uncomment when amp functions are available */
4972 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4973 		result = L2CAP_MR_PEND;
4974 	}
4975 
4976 send_move_response:
4977 	l2cap_send_move_chan_rsp(chan, result);
4978 
4979 	l2cap_chan_unlock(chan);
4980 
4981 	return 0;
4982 }
4983 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)4984 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4985 {
4986 	struct l2cap_chan *chan;
4987 	struct hci_chan *hchan = NULL;
4988 
4989 	chan = l2cap_get_chan_by_scid(conn, icid);
4990 	if (!chan) {
4991 		l2cap_send_move_chan_cfm_icid(conn, icid);
4992 		return;
4993 	}
4994 
4995 	__clear_chan_timer(chan);
4996 	if (result == L2CAP_MR_PEND)
4997 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4998 
4999 	switch (chan->move_state) {
5000 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5001 		/* Move confirm will be sent when logical link
5002 		 * is complete.
5003 		 */
5004 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5005 		break;
5006 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5007 		if (result == L2CAP_MR_PEND) {
5008 			break;
5009 		} else if (test_bit(CONN_LOCAL_BUSY,
5010 				    &chan->conn_state)) {
5011 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5012 		} else {
5013 			/* Logical link is up or moving to BR/EDR,
5014 			 * proceed with move
5015 			 */
5016 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5017 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5018 		}
5019 		break;
5020 	case L2CAP_MOVE_WAIT_RSP:
5021 		/* Moving to AMP */
5022 		if (result == L2CAP_MR_SUCCESS) {
5023 			/* Remote is ready, send confirm immediately
5024 			 * after logical link is ready
5025 			 */
5026 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5027 		} else {
5028 			/* Both logical link and move success
5029 			 * are required to confirm
5030 			 */
5031 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5032 		}
5033 
5034 		/* Placeholder - get hci_chan for logical link */
5035 		if (!hchan) {
5036 			/* Logical link not available */
5037 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5038 			break;
5039 		}
5040 
5041 		/* If the logical link is not yet connected, do not
5042 		 * send confirmation.
5043 		 */
5044 		if (hchan->state != BT_CONNECTED)
5045 			break;
5046 
5047 		/* Logical link is already ready to go */
5048 
5049 		chan->hs_hcon = hchan->conn;
5050 		chan->hs_hcon->l2cap_data = chan->conn;
5051 
5052 		if (result == L2CAP_MR_SUCCESS) {
5053 			/* Can confirm now */
5054 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5055 		} else {
5056 			/* Now only need move success
5057 			 * to confirm
5058 			 */
5059 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5060 		}
5061 
5062 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5063 		break;
5064 	default:
5065 		/* Any other amp move state means the move failed. */
5066 		chan->move_id = chan->local_amp_id;
5067 		l2cap_move_done(chan);
5068 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5069 	}
5070 
5071 	l2cap_chan_unlock(chan);
5072 }
5073 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5074 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5075 			    u16 result)
5076 {
5077 	struct l2cap_chan *chan;
5078 
5079 	chan = l2cap_get_chan_by_ident(conn, ident);
5080 	if (!chan) {
5081 		/* Could not locate channel, icid is best guess */
5082 		l2cap_send_move_chan_cfm_icid(conn, icid);
5083 		return;
5084 	}
5085 
5086 	__clear_chan_timer(chan);
5087 
5088 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5089 		if (result == L2CAP_MR_COLLISION) {
5090 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5091 		} else {
5092 			/* Cleanup - cancel move */
5093 			chan->move_id = chan->local_amp_id;
5094 			l2cap_move_done(chan);
5095 		}
5096 	}
5097 
5098 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5099 
5100 	l2cap_chan_unlock(chan);
5101 }
5102 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5103 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5104 				  struct l2cap_cmd_hdr *cmd,
5105 				  u16 cmd_len, void *data)
5106 {
5107 	struct l2cap_move_chan_rsp *rsp = data;
5108 	u16 icid, result;
5109 
5110 	if (cmd_len != sizeof(*rsp))
5111 		return -EPROTO;
5112 
5113 	icid = le16_to_cpu(rsp->icid);
5114 	result = le16_to_cpu(rsp->result);
5115 
5116 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5117 
5118 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5119 		l2cap_move_continue(conn, icid, result);
5120 	else
5121 		l2cap_move_fail(conn, cmd->ident, icid, result);
5122 
5123 	return 0;
5124 }
5125 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5126 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5127 				      struct l2cap_cmd_hdr *cmd,
5128 				      u16 cmd_len, void *data)
5129 {
5130 	struct l2cap_move_chan_cfm *cfm = data;
5131 	struct l2cap_chan *chan;
5132 	u16 icid, result;
5133 
5134 	if (cmd_len != sizeof(*cfm))
5135 		return -EPROTO;
5136 
5137 	icid = le16_to_cpu(cfm->icid);
5138 	result = le16_to_cpu(cfm->result);
5139 
5140 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5141 
5142 	chan = l2cap_get_chan_by_dcid(conn, icid);
5143 	if (!chan) {
5144 		/* Spec requires a response even if the icid was not found */
5145 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5146 		return 0;
5147 	}
5148 
5149 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5150 		if (result == L2CAP_MC_CONFIRMED) {
5151 			chan->local_amp_id = chan->move_id;
5152 			if (chan->local_amp_id == AMP_ID_BREDR)
5153 				__release_logical_link(chan);
5154 		} else {
5155 			chan->move_id = chan->local_amp_id;
5156 		}
5157 
5158 		l2cap_move_done(chan);
5159 	}
5160 
5161 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5162 
5163 	l2cap_chan_unlock(chan);
5164 
5165 	return 0;
5166 }
5167 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5168 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5169 						 struct l2cap_cmd_hdr *cmd,
5170 						 u16 cmd_len, void *data)
5171 {
5172 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5173 	struct l2cap_chan *chan;
5174 	u16 icid;
5175 
5176 	if (cmd_len != sizeof(*rsp))
5177 		return -EPROTO;
5178 
5179 	icid = le16_to_cpu(rsp->icid);
5180 
5181 	BT_DBG("icid 0x%4.4x", icid);
5182 
5183 	chan = l2cap_get_chan_by_scid(conn, icid);
5184 	if (!chan)
5185 		return 0;
5186 
5187 	__clear_chan_timer(chan);
5188 
5189 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5190 		chan->local_amp_id = chan->move_id;
5191 
5192 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5193 			__release_logical_link(chan);
5194 
5195 		l2cap_move_done(chan);
5196 	}
5197 
5198 	l2cap_chan_unlock(chan);
5199 
5200 	return 0;
5201 }
5202 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5203 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5204 					      struct l2cap_cmd_hdr *cmd,
5205 					      u16 cmd_len, u8 *data)
5206 {
5207 	struct hci_conn *hcon = conn->hcon;
5208 	struct l2cap_conn_param_update_req *req;
5209 	struct l2cap_conn_param_update_rsp rsp;
5210 	u16 min, max, latency, to_multiplier;
5211 	int err;
5212 
5213 	if (hcon->role != HCI_ROLE_MASTER)
5214 		return -EINVAL;
5215 
5216 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5217 		return -EPROTO;
5218 
5219 	req = (struct l2cap_conn_param_update_req *) data;
5220 	min		= __le16_to_cpu(req->min);
5221 	max		= __le16_to_cpu(req->max);
5222 	latency		= __le16_to_cpu(req->latency);
5223 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5224 
5225 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5226 	       min, max, latency, to_multiplier);
5227 
5228 	memset(&rsp, 0, sizeof(rsp));
5229 
5230 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5231 	if (err)
5232 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5233 	else
5234 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5235 
5236 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5237 		       sizeof(rsp), &rsp);
5238 
5239 	if (!err) {
5240 		u8 store_hint;
5241 
5242 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5243 						to_multiplier);
5244 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5245 				    store_hint, min, max, latency,
5246 				    to_multiplier);
5247 
5248 	}
5249 
5250 	return 0;
5251 }
5252 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5253 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5254 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5255 				u8 *data)
5256 {
5257 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5258 	struct hci_conn *hcon = conn->hcon;
5259 	u16 dcid, mtu, mps, credits, result;
5260 	struct l2cap_chan *chan;
5261 	int err, sec_level;
5262 
5263 	if (cmd_len < sizeof(*rsp))
5264 		return -EPROTO;
5265 
5266 	dcid    = __le16_to_cpu(rsp->dcid);
5267 	mtu     = __le16_to_cpu(rsp->mtu);
5268 	mps     = __le16_to_cpu(rsp->mps);
5269 	credits = __le16_to_cpu(rsp->credits);
5270 	result  = __le16_to_cpu(rsp->result);
5271 
5272 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5273 					   dcid < L2CAP_CID_DYN_START ||
5274 					   dcid > L2CAP_CID_LE_DYN_END))
5275 		return -EPROTO;
5276 
5277 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5278 	       dcid, mtu, mps, credits, result);
5279 
5280 	mutex_lock(&conn->chan_lock);
5281 
5282 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5283 	if (!chan) {
5284 		err = -EBADSLT;
5285 		goto unlock;
5286 	}
5287 
5288 	err = 0;
5289 
5290 	l2cap_chan_lock(chan);
5291 
5292 	switch (result) {
5293 	case L2CAP_CR_SUCCESS:
5294 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5295 			err = -EBADSLT;
5296 			break;
5297 		}
5298 
5299 		chan->ident = 0;
5300 		chan->dcid = dcid;
5301 		chan->omtu = mtu;
5302 		chan->remote_mps = mps;
5303 		chan->tx_credits = credits;
5304 		l2cap_chan_ready(chan);
5305 		break;
5306 
5307 	case L2CAP_CR_AUTHENTICATION:
5308 	case L2CAP_CR_ENCRYPTION:
5309 		/* If we already have MITM protection we can't do
5310 		 * anything.
5311 		 */
5312 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5313 			l2cap_chan_del(chan, ECONNREFUSED);
5314 			break;
5315 		}
5316 
5317 		sec_level = hcon->sec_level + 1;
5318 		if (chan->sec_level < sec_level)
5319 			chan->sec_level = sec_level;
5320 
5321 		/* We'll need to send a new Connect Request */
5322 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5323 
5324 		smp_conn_security(hcon, chan->sec_level);
5325 		break;
5326 
5327 	default:
5328 		l2cap_chan_del(chan, ECONNREFUSED);
5329 		break;
5330 	}
5331 
5332 	l2cap_chan_unlock(chan);
5333 
5334 unlock:
5335 	mutex_unlock(&conn->chan_lock);
5336 
5337 	return err;
5338 }
5339 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5340 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5341 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5342 				      u8 *data)
5343 {
5344 	int err = 0;
5345 
5346 	switch (cmd->code) {
5347 	case L2CAP_COMMAND_REJ:
5348 		l2cap_command_rej(conn, cmd, cmd_len, data);
5349 		break;
5350 
5351 	case L2CAP_CONN_REQ:
5352 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5353 		break;
5354 
5355 	case L2CAP_CONN_RSP:
5356 	case L2CAP_CREATE_CHAN_RSP:
5357 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5358 		break;
5359 
5360 	case L2CAP_CONF_REQ:
5361 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5362 		break;
5363 
5364 	case L2CAP_CONF_RSP:
5365 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5366 		break;
5367 
5368 	case L2CAP_DISCONN_REQ:
5369 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5370 		break;
5371 
5372 	case L2CAP_DISCONN_RSP:
5373 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5374 		break;
5375 
5376 	case L2CAP_ECHO_REQ:
5377 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5378 		break;
5379 
5380 	case L2CAP_ECHO_RSP:
5381 		break;
5382 
5383 	case L2CAP_INFO_REQ:
5384 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5385 		break;
5386 
5387 	case L2CAP_INFO_RSP:
5388 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5389 		break;
5390 
5391 	case L2CAP_CREATE_CHAN_REQ:
5392 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5393 		break;
5394 
5395 	case L2CAP_MOVE_CHAN_REQ:
5396 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5397 		break;
5398 
5399 	case L2CAP_MOVE_CHAN_RSP:
5400 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5401 		break;
5402 
5403 	case L2CAP_MOVE_CHAN_CFM:
5404 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5405 		break;
5406 
5407 	case L2CAP_MOVE_CHAN_CFM_RSP:
5408 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5409 		break;
5410 
5411 	default:
5412 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5413 		err = -EINVAL;
5414 		break;
5415 	}
5416 
5417 	return err;
5418 }
5419 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5420 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5421 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5422 				u8 *data)
5423 {
5424 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5425 	struct l2cap_le_conn_rsp rsp;
5426 	struct l2cap_chan *chan, *pchan;
5427 	u16 dcid, scid, credits, mtu, mps;
5428 	__le16 psm;
5429 	u8 result;
5430 
5431 	if (cmd_len != sizeof(*req))
5432 		return -EPROTO;
5433 
5434 	scid = __le16_to_cpu(req->scid);
5435 	mtu  = __le16_to_cpu(req->mtu);
5436 	mps  = __le16_to_cpu(req->mps);
5437 	psm  = req->psm;
5438 	dcid = 0;
5439 	credits = 0;
5440 
5441 	if (mtu < 23 || mps < 23)
5442 		return -EPROTO;
5443 
5444 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5445 	       scid, mtu, mps);
5446 
5447 	/* Check if we have socket listening on psm */
5448 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5449 					 &conn->hcon->dst, LE_LINK);
5450 	if (!pchan) {
5451 		result = L2CAP_CR_BAD_PSM;
5452 		chan = NULL;
5453 		goto response;
5454 	}
5455 
5456 	mutex_lock(&conn->chan_lock);
5457 	l2cap_chan_lock(pchan);
5458 
5459 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5460 				     SMP_ALLOW_STK)) {
5461 		result = L2CAP_CR_AUTHENTICATION;
5462 		chan = NULL;
5463 		goto response_unlock;
5464 	}
5465 
5466 	/* Check for valid dynamic CID range */
5467 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5468 		result = L2CAP_CR_INVALID_SCID;
5469 		chan = NULL;
5470 		goto response_unlock;
5471 	}
5472 
5473 	/* Check if we already have channel with that dcid */
5474 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5475 		result = L2CAP_CR_SCID_IN_USE;
5476 		chan = NULL;
5477 		goto response_unlock;
5478 	}
5479 
5480 	chan = pchan->ops->new_connection(pchan);
5481 	if (!chan) {
5482 		result = L2CAP_CR_NO_MEM;
5483 		goto response_unlock;
5484 	}
5485 
5486 	l2cap_le_flowctl_init(chan);
5487 
5488 	bacpy(&chan->src, &conn->hcon->src);
5489 	bacpy(&chan->dst, &conn->hcon->dst);
5490 	chan->src_type = bdaddr_src_type(conn->hcon);
5491 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5492 	chan->psm  = psm;
5493 	chan->dcid = scid;
5494 	chan->omtu = mtu;
5495 	chan->remote_mps = mps;
5496 	chan->tx_credits = __le16_to_cpu(req->credits);
5497 
5498 	__l2cap_chan_add(conn, chan);
5499 	dcid = chan->scid;
5500 	credits = chan->rx_credits;
5501 
5502 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5503 
5504 	chan->ident = cmd->ident;
5505 
5506 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5507 		l2cap_state_change(chan, BT_CONNECT2);
5508 		/* The following result value is actually not defined
5509 		 * for LE CoC but we use it to let the function know
5510 		 * that it should bail out after doing its cleanup
5511 		 * instead of sending a response.
5512 		 */
5513 		result = L2CAP_CR_PEND;
5514 		chan->ops->defer(chan);
5515 	} else {
5516 		l2cap_chan_ready(chan);
5517 		result = L2CAP_CR_SUCCESS;
5518 	}
5519 
5520 response_unlock:
5521 	l2cap_chan_unlock(pchan);
5522 	mutex_unlock(&conn->chan_lock);
5523 	l2cap_chan_put(pchan);
5524 
5525 	if (result == L2CAP_CR_PEND)
5526 		return 0;
5527 
5528 response:
5529 	if (chan) {
5530 		rsp.mtu = cpu_to_le16(chan->imtu);
5531 		rsp.mps = cpu_to_le16(chan->mps);
5532 	} else {
5533 		rsp.mtu = 0;
5534 		rsp.mps = 0;
5535 	}
5536 
5537 	rsp.dcid    = cpu_to_le16(dcid);
5538 	rsp.credits = cpu_to_le16(credits);
5539 	rsp.result  = cpu_to_le16(result);
5540 
5541 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5542 
5543 	return 0;
5544 }
5545 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5546 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5547 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5548 				   u8 *data)
5549 {
5550 	struct l2cap_le_credits *pkt;
5551 	struct l2cap_chan *chan;
5552 	u16 cid, credits, max_credits;
5553 
5554 	if (cmd_len != sizeof(*pkt))
5555 		return -EPROTO;
5556 
5557 	pkt = (struct l2cap_le_credits *) data;
5558 	cid	= __le16_to_cpu(pkt->cid);
5559 	credits	= __le16_to_cpu(pkt->credits);
5560 
5561 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5562 
5563 	chan = l2cap_get_chan_by_dcid(conn, cid);
5564 	if (!chan)
5565 		return -EBADSLT;
5566 
5567 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5568 	if (credits > max_credits) {
5569 		BT_ERR("LE credits overflow");
5570 		l2cap_send_disconn_req(chan, ECONNRESET);
5571 		l2cap_chan_unlock(chan);
5572 
5573 		/* Return 0 so that we don't trigger an unnecessary
5574 		 * command reject packet.
5575 		 */
5576 		return 0;
5577 	}
5578 
5579 	chan->tx_credits += credits;
5580 
5581 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5582 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5583 		chan->tx_credits--;
5584 	}
5585 
5586 	if (chan->tx_credits)
5587 		chan->ops->resume(chan);
5588 
5589 	l2cap_chan_unlock(chan);
5590 
5591 	return 0;
5592 }
5593 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5594 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5595 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 				       u8 *data)
5597 {
5598 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5599 	struct l2cap_chan *chan;
5600 
5601 	if (cmd_len < sizeof(*rej))
5602 		return -EPROTO;
5603 
5604 	mutex_lock(&conn->chan_lock);
5605 
5606 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5607 	if (!chan)
5608 		goto done;
5609 
5610 	l2cap_chan_lock(chan);
5611 	l2cap_chan_del(chan, ECONNREFUSED);
5612 	l2cap_chan_unlock(chan);
5613 
5614 done:
5615 	mutex_unlock(&conn->chan_lock);
5616 	return 0;
5617 }
5618 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5619 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5620 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5621 				   u8 *data)
5622 {
5623 	int err = 0;
5624 
5625 	switch (cmd->code) {
5626 	case L2CAP_COMMAND_REJ:
5627 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5628 		break;
5629 
5630 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5631 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5632 		break;
5633 
5634 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5635 		break;
5636 
5637 	case L2CAP_LE_CONN_RSP:
5638 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5639 		break;
5640 
5641 	case L2CAP_LE_CONN_REQ:
5642 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5643 		break;
5644 
5645 	case L2CAP_LE_CREDITS:
5646 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5647 		break;
5648 
5649 	case L2CAP_DISCONN_REQ:
5650 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5651 		break;
5652 
5653 	case L2CAP_DISCONN_RSP:
5654 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5655 		break;
5656 
5657 	default:
5658 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5659 		err = -EINVAL;
5660 		break;
5661 	}
5662 
5663 	return err;
5664 }
5665 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5666 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5667 					struct sk_buff *skb)
5668 {
5669 	struct hci_conn *hcon = conn->hcon;
5670 	struct l2cap_cmd_hdr *cmd;
5671 	u16 len;
5672 	int err;
5673 
5674 	if (hcon->type != LE_LINK)
5675 		goto drop;
5676 
5677 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5678 		goto drop;
5679 
5680 	cmd = (void *) skb->data;
5681 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5682 
5683 	len = le16_to_cpu(cmd->len);
5684 
5685 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5686 
5687 	if (len != skb->len || !cmd->ident) {
5688 		BT_DBG("corrupted command");
5689 		goto drop;
5690 	}
5691 
5692 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5693 	if (err) {
5694 		struct l2cap_cmd_rej_unk rej;
5695 
5696 		BT_ERR("Wrong link type (%d)", err);
5697 
5698 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5699 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5700 			       sizeof(rej), &rej);
5701 	}
5702 
5703 drop:
5704 	kfree_skb(skb);
5705 }
5706 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5707 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5708 				     struct sk_buff *skb)
5709 {
5710 	struct hci_conn *hcon = conn->hcon;
5711 	u8 *data = skb->data;
5712 	int len = skb->len;
5713 	struct l2cap_cmd_hdr cmd;
5714 	int err;
5715 
5716 	l2cap_raw_recv(conn, skb);
5717 
5718 	if (hcon->type != ACL_LINK)
5719 		goto drop;
5720 
5721 	while (len >= L2CAP_CMD_HDR_SIZE) {
5722 		u16 cmd_len;
5723 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5724 		data += L2CAP_CMD_HDR_SIZE;
5725 		len  -= L2CAP_CMD_HDR_SIZE;
5726 
5727 		cmd_len = le16_to_cpu(cmd.len);
5728 
5729 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5730 		       cmd.ident);
5731 
5732 		if (cmd_len > len || !cmd.ident) {
5733 			BT_DBG("corrupted command");
5734 			break;
5735 		}
5736 
5737 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5738 		if (err) {
5739 			struct l2cap_cmd_rej_unk rej;
5740 
5741 			BT_ERR("Wrong link type (%d)", err);
5742 
5743 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5744 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5745 				       sizeof(rej), &rej);
5746 		}
5747 
5748 		data += cmd_len;
5749 		len  -= cmd_len;
5750 	}
5751 
5752 drop:
5753 	kfree_skb(skb);
5754 }
5755 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5756 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5757 {
5758 	u16 our_fcs, rcv_fcs;
5759 	int hdr_size;
5760 
5761 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5762 		hdr_size = L2CAP_EXT_HDR_SIZE;
5763 	else
5764 		hdr_size = L2CAP_ENH_HDR_SIZE;
5765 
5766 	if (chan->fcs == L2CAP_FCS_CRC16) {
5767 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5768 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5769 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5770 
5771 		if (our_fcs != rcv_fcs)
5772 			return -EBADMSG;
5773 	}
5774 	return 0;
5775 }
5776 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5777 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5778 {
5779 	struct l2cap_ctrl control;
5780 
5781 	BT_DBG("chan %p", chan);
5782 
5783 	memset(&control, 0, sizeof(control));
5784 	control.sframe = 1;
5785 	control.final = 1;
5786 	control.reqseq = chan->buffer_seq;
5787 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5788 
5789 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5790 		control.super = L2CAP_SUPER_RNR;
5791 		l2cap_send_sframe(chan, &control);
5792 	}
5793 
5794 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5795 	    chan->unacked_frames > 0)
5796 		__set_retrans_timer(chan);
5797 
5798 	/* Send pending iframes */
5799 	l2cap_ertm_send(chan);
5800 
5801 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5802 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5803 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5804 		 * send it now.
5805 		 */
5806 		control.super = L2CAP_SUPER_RR;
5807 		l2cap_send_sframe(chan, &control);
5808 	}
5809 }
5810 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5811 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5812 			    struct sk_buff **last_frag)
5813 {
5814 	/* skb->len reflects data in skb as well as all fragments
5815 	 * skb->data_len reflects only data in fragments
5816 	 */
5817 	if (!skb_has_frag_list(skb))
5818 		skb_shinfo(skb)->frag_list = new_frag;
5819 
5820 	new_frag->next = NULL;
5821 
5822 	(*last_frag)->next = new_frag;
5823 	*last_frag = new_frag;
5824 
5825 	skb->len += new_frag->len;
5826 	skb->data_len += new_frag->len;
5827 	skb->truesize += new_frag->truesize;
5828 }
5829 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5830 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5831 				struct l2cap_ctrl *control)
5832 {
5833 	int err = -EINVAL;
5834 
5835 	switch (control->sar) {
5836 	case L2CAP_SAR_UNSEGMENTED:
5837 		if (chan->sdu)
5838 			break;
5839 
5840 		err = chan->ops->recv(chan, skb);
5841 		break;
5842 
5843 	case L2CAP_SAR_START:
5844 		if (chan->sdu)
5845 			break;
5846 
5847 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5848 			break;
5849 
5850 		chan->sdu_len = get_unaligned_le16(skb->data);
5851 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5852 
5853 		if (chan->sdu_len > chan->imtu) {
5854 			err = -EMSGSIZE;
5855 			break;
5856 		}
5857 
5858 		if (skb->len >= chan->sdu_len)
5859 			break;
5860 
5861 		chan->sdu = skb;
5862 		chan->sdu_last_frag = skb;
5863 
5864 		skb = NULL;
5865 		err = 0;
5866 		break;
5867 
5868 	case L2CAP_SAR_CONTINUE:
5869 		if (!chan->sdu)
5870 			break;
5871 
5872 		append_skb_frag(chan->sdu, skb,
5873 				&chan->sdu_last_frag);
5874 		skb = NULL;
5875 
5876 		if (chan->sdu->len >= chan->sdu_len)
5877 			break;
5878 
5879 		err = 0;
5880 		break;
5881 
5882 	case L2CAP_SAR_END:
5883 		if (!chan->sdu)
5884 			break;
5885 
5886 		append_skb_frag(chan->sdu, skb,
5887 				&chan->sdu_last_frag);
5888 		skb = NULL;
5889 
5890 		if (chan->sdu->len != chan->sdu_len)
5891 			break;
5892 
5893 		err = chan->ops->recv(chan, chan->sdu);
5894 
5895 		if (!err) {
5896 			/* Reassembly complete */
5897 			chan->sdu = NULL;
5898 			chan->sdu_last_frag = NULL;
5899 			chan->sdu_len = 0;
5900 		}
5901 		break;
5902 	}
5903 
5904 	if (err) {
5905 		kfree_skb(skb);
5906 		kfree_skb(chan->sdu);
5907 		chan->sdu = NULL;
5908 		chan->sdu_last_frag = NULL;
5909 		chan->sdu_len = 0;
5910 	}
5911 
5912 	return err;
5913 }
5914 
l2cap_resegment(struct l2cap_chan * chan)5915 static int l2cap_resegment(struct l2cap_chan *chan)
5916 {
5917 	/* Placeholder */
5918 	return 0;
5919 }
5920 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5921 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5922 {
5923 	u8 event;
5924 
5925 	if (chan->mode != L2CAP_MODE_ERTM)
5926 		return;
5927 
5928 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5929 	l2cap_tx(chan, NULL, NULL, event);
5930 }
5931 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5932 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5933 {
5934 	int err = 0;
5935 	/* Pass sequential frames to l2cap_reassemble_sdu()
5936 	 * until a gap is encountered.
5937 	 */
5938 
5939 	BT_DBG("chan %p", chan);
5940 
5941 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5942 		struct sk_buff *skb;
5943 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5944 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5945 
5946 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5947 
5948 		if (!skb)
5949 			break;
5950 
5951 		skb_unlink(skb, &chan->srej_q);
5952 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5953 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5954 		if (err)
5955 			break;
5956 	}
5957 
5958 	if (skb_queue_empty(&chan->srej_q)) {
5959 		chan->rx_state = L2CAP_RX_STATE_RECV;
5960 		l2cap_send_ack(chan);
5961 	}
5962 
5963 	return err;
5964 }
5965 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5966 static void l2cap_handle_srej(struct l2cap_chan *chan,
5967 			      struct l2cap_ctrl *control)
5968 {
5969 	struct sk_buff *skb;
5970 
5971 	BT_DBG("chan %p, control %p", chan, control);
5972 
5973 	if (control->reqseq == chan->next_tx_seq) {
5974 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5975 		l2cap_send_disconn_req(chan, ECONNRESET);
5976 		return;
5977 	}
5978 
5979 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5980 
5981 	if (skb == NULL) {
5982 		BT_DBG("Seq %d not available for retransmission",
5983 		       control->reqseq);
5984 		return;
5985 	}
5986 
5987 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5988 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5989 		l2cap_send_disconn_req(chan, ECONNRESET);
5990 		return;
5991 	}
5992 
5993 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5994 
5995 	if (control->poll) {
5996 		l2cap_pass_to_tx(chan, control);
5997 
5998 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5999 		l2cap_retransmit(chan, control);
6000 		l2cap_ertm_send(chan);
6001 
6002 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6003 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6004 			chan->srej_save_reqseq = control->reqseq;
6005 		}
6006 	} else {
6007 		l2cap_pass_to_tx_fbit(chan, control);
6008 
6009 		if (control->final) {
6010 			if (chan->srej_save_reqseq != control->reqseq ||
6011 			    !test_and_clear_bit(CONN_SREJ_ACT,
6012 						&chan->conn_state))
6013 				l2cap_retransmit(chan, control);
6014 		} else {
6015 			l2cap_retransmit(chan, control);
6016 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6017 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6018 				chan->srej_save_reqseq = control->reqseq;
6019 			}
6020 		}
6021 	}
6022 }
6023 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6024 static void l2cap_handle_rej(struct l2cap_chan *chan,
6025 			     struct l2cap_ctrl *control)
6026 {
6027 	struct sk_buff *skb;
6028 
6029 	BT_DBG("chan %p, control %p", chan, control);
6030 
6031 	if (control->reqseq == chan->next_tx_seq) {
6032 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6033 		l2cap_send_disconn_req(chan, ECONNRESET);
6034 		return;
6035 	}
6036 
6037 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6038 
6039 	if (chan->max_tx && skb &&
6040 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6041 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6042 		l2cap_send_disconn_req(chan, ECONNRESET);
6043 		return;
6044 	}
6045 
6046 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6047 
6048 	l2cap_pass_to_tx(chan, control);
6049 
6050 	if (control->final) {
6051 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6052 			l2cap_retransmit_all(chan, control);
6053 	} else {
6054 		l2cap_retransmit_all(chan, control);
6055 		l2cap_ertm_send(chan);
6056 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6057 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6058 	}
6059 }
6060 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6061 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6062 {
6063 	BT_DBG("chan %p, txseq %d", chan, txseq);
6064 
6065 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6066 	       chan->expected_tx_seq);
6067 
6068 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6069 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6070 		    chan->tx_win) {
6071 			/* See notes below regarding "double poll" and
6072 			 * invalid packets.
6073 			 */
6074 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6075 				BT_DBG("Invalid/Ignore - after SREJ");
6076 				return L2CAP_TXSEQ_INVALID_IGNORE;
6077 			} else {
6078 				BT_DBG("Invalid - in window after SREJ sent");
6079 				return L2CAP_TXSEQ_INVALID;
6080 			}
6081 		}
6082 
6083 		if (chan->srej_list.head == txseq) {
6084 			BT_DBG("Expected SREJ");
6085 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6086 		}
6087 
6088 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6089 			BT_DBG("Duplicate SREJ - txseq already stored");
6090 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6091 		}
6092 
6093 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6094 			BT_DBG("Unexpected SREJ - not requested");
6095 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6096 		}
6097 	}
6098 
6099 	if (chan->expected_tx_seq == txseq) {
6100 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6101 		    chan->tx_win) {
6102 			BT_DBG("Invalid - txseq outside tx window");
6103 			return L2CAP_TXSEQ_INVALID;
6104 		} else {
6105 			BT_DBG("Expected");
6106 			return L2CAP_TXSEQ_EXPECTED;
6107 		}
6108 	}
6109 
6110 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6111 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6112 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6113 		return L2CAP_TXSEQ_DUPLICATE;
6114 	}
6115 
6116 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6117 		/* A source of invalid packets is a "double poll" condition,
6118 		 * where delays cause us to send multiple poll packets.  If
6119 		 * the remote stack receives and processes both polls,
6120 		 * sequence numbers can wrap around in such a way that a
6121 		 * resent frame has a sequence number that looks like new data
6122 		 * with a sequence gap.  This would trigger an erroneous SREJ
6123 		 * request.
6124 		 *
6125 		 * Fortunately, this is impossible with a tx window that's
6126 		 * less than half of the maximum sequence number, which allows
6127 		 * invalid frames to be safely ignored.
6128 		 *
6129 		 * With tx window sizes greater than half of the tx window
6130 		 * maximum, the frame is invalid and cannot be ignored.  This
6131 		 * causes a disconnect.
6132 		 */
6133 
6134 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6135 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6136 			return L2CAP_TXSEQ_INVALID_IGNORE;
6137 		} else {
6138 			BT_DBG("Invalid - txseq outside tx window");
6139 			return L2CAP_TXSEQ_INVALID;
6140 		}
6141 	} else {
6142 		BT_DBG("Unexpected - txseq indicates missing frames");
6143 		return L2CAP_TXSEQ_UNEXPECTED;
6144 	}
6145 }
6146 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6147 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6148 			       struct l2cap_ctrl *control,
6149 			       struct sk_buff *skb, u8 event)
6150 {
6151 	int err = 0;
6152 	bool skb_in_use = false;
6153 
6154 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6155 	       event);
6156 
6157 	switch (event) {
6158 	case L2CAP_EV_RECV_IFRAME:
6159 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6160 		case L2CAP_TXSEQ_EXPECTED:
6161 			l2cap_pass_to_tx(chan, control);
6162 
6163 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6164 				BT_DBG("Busy, discarding expected seq %d",
6165 				       control->txseq);
6166 				break;
6167 			}
6168 
6169 			chan->expected_tx_seq = __next_seq(chan,
6170 							   control->txseq);
6171 
6172 			chan->buffer_seq = chan->expected_tx_seq;
6173 			skb_in_use = true;
6174 
6175 			err = l2cap_reassemble_sdu(chan, skb, control);
6176 			if (err)
6177 				break;
6178 
6179 			if (control->final) {
6180 				if (!test_and_clear_bit(CONN_REJ_ACT,
6181 							&chan->conn_state)) {
6182 					control->final = 0;
6183 					l2cap_retransmit_all(chan, control);
6184 					l2cap_ertm_send(chan);
6185 				}
6186 			}
6187 
6188 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6189 				l2cap_send_ack(chan);
6190 			break;
6191 		case L2CAP_TXSEQ_UNEXPECTED:
6192 			l2cap_pass_to_tx(chan, control);
6193 
6194 			/* Can't issue SREJ frames in the local busy state.
6195 			 * Drop this frame, it will be seen as missing
6196 			 * when local busy is exited.
6197 			 */
6198 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6199 				BT_DBG("Busy, discarding unexpected seq %d",
6200 				       control->txseq);
6201 				break;
6202 			}
6203 
6204 			/* There was a gap in the sequence, so an SREJ
6205 			 * must be sent for each missing frame.  The
6206 			 * current frame is stored for later use.
6207 			 */
6208 			skb_queue_tail(&chan->srej_q, skb);
6209 			skb_in_use = true;
6210 			BT_DBG("Queued %p (queue len %d)", skb,
6211 			       skb_queue_len(&chan->srej_q));
6212 
6213 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6214 			l2cap_seq_list_clear(&chan->srej_list);
6215 			l2cap_send_srej(chan, control->txseq);
6216 
6217 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6218 			break;
6219 		case L2CAP_TXSEQ_DUPLICATE:
6220 			l2cap_pass_to_tx(chan, control);
6221 			break;
6222 		case L2CAP_TXSEQ_INVALID_IGNORE:
6223 			break;
6224 		case L2CAP_TXSEQ_INVALID:
6225 		default:
6226 			l2cap_send_disconn_req(chan, ECONNRESET);
6227 			break;
6228 		}
6229 		break;
6230 	case L2CAP_EV_RECV_RR:
6231 		l2cap_pass_to_tx(chan, control);
6232 		if (control->final) {
6233 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6234 
6235 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6236 			    !__chan_is_moving(chan)) {
6237 				control->final = 0;
6238 				l2cap_retransmit_all(chan, control);
6239 			}
6240 
6241 			l2cap_ertm_send(chan);
6242 		} else if (control->poll) {
6243 			l2cap_send_i_or_rr_or_rnr(chan);
6244 		} else {
6245 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6246 					       &chan->conn_state) &&
6247 			    chan->unacked_frames)
6248 				__set_retrans_timer(chan);
6249 
6250 			l2cap_ertm_send(chan);
6251 		}
6252 		break;
6253 	case L2CAP_EV_RECV_RNR:
6254 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6255 		l2cap_pass_to_tx(chan, control);
6256 		if (control && control->poll) {
6257 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6258 			l2cap_send_rr_or_rnr(chan, 0);
6259 		}
6260 		__clear_retrans_timer(chan);
6261 		l2cap_seq_list_clear(&chan->retrans_list);
6262 		break;
6263 	case L2CAP_EV_RECV_REJ:
6264 		l2cap_handle_rej(chan, control);
6265 		break;
6266 	case L2CAP_EV_RECV_SREJ:
6267 		l2cap_handle_srej(chan, control);
6268 		break;
6269 	default:
6270 		break;
6271 	}
6272 
6273 	if (skb && !skb_in_use) {
6274 		BT_DBG("Freeing %p", skb);
6275 		kfree_skb(skb);
6276 	}
6277 
6278 	return err;
6279 }
6280 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6281 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6282 				    struct l2cap_ctrl *control,
6283 				    struct sk_buff *skb, u8 event)
6284 {
6285 	int err = 0;
6286 	u16 txseq = control->txseq;
6287 	bool skb_in_use = false;
6288 
6289 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6290 	       event);
6291 
6292 	switch (event) {
6293 	case L2CAP_EV_RECV_IFRAME:
6294 		switch (l2cap_classify_txseq(chan, txseq)) {
6295 		case L2CAP_TXSEQ_EXPECTED:
6296 			/* Keep frame for reassembly later */
6297 			l2cap_pass_to_tx(chan, control);
6298 			skb_queue_tail(&chan->srej_q, skb);
6299 			skb_in_use = true;
6300 			BT_DBG("Queued %p (queue len %d)", skb,
6301 			       skb_queue_len(&chan->srej_q));
6302 
6303 			chan->expected_tx_seq = __next_seq(chan, txseq);
6304 			break;
6305 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6306 			l2cap_seq_list_pop(&chan->srej_list);
6307 
6308 			l2cap_pass_to_tx(chan, control);
6309 			skb_queue_tail(&chan->srej_q, skb);
6310 			skb_in_use = true;
6311 			BT_DBG("Queued %p (queue len %d)", skb,
6312 			       skb_queue_len(&chan->srej_q));
6313 
6314 			err = l2cap_rx_queued_iframes(chan);
6315 			if (err)
6316 				break;
6317 
6318 			break;
6319 		case L2CAP_TXSEQ_UNEXPECTED:
6320 			/* Got a frame that can't be reassembled yet.
6321 			 * Save it for later, and send SREJs to cover
6322 			 * the missing frames.
6323 			 */
6324 			skb_queue_tail(&chan->srej_q, skb);
6325 			skb_in_use = true;
6326 			BT_DBG("Queued %p (queue len %d)", skb,
6327 			       skb_queue_len(&chan->srej_q));
6328 
6329 			l2cap_pass_to_tx(chan, control);
6330 			l2cap_send_srej(chan, control->txseq);
6331 			break;
6332 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6333 			/* This frame was requested with an SREJ, but
6334 			 * some expected retransmitted frames are
6335 			 * missing.  Request retransmission of missing
6336 			 * SREJ'd frames.
6337 			 */
6338 			skb_queue_tail(&chan->srej_q, skb);
6339 			skb_in_use = true;
6340 			BT_DBG("Queued %p (queue len %d)", skb,
6341 			       skb_queue_len(&chan->srej_q));
6342 
6343 			l2cap_pass_to_tx(chan, control);
6344 			l2cap_send_srej_list(chan, control->txseq);
6345 			break;
6346 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6347 			/* We've already queued this frame.  Drop this copy. */
6348 			l2cap_pass_to_tx(chan, control);
6349 			break;
6350 		case L2CAP_TXSEQ_DUPLICATE:
6351 			/* Expecting a later sequence number, so this frame
6352 			 * was already received.  Ignore it completely.
6353 			 */
6354 			break;
6355 		case L2CAP_TXSEQ_INVALID_IGNORE:
6356 			break;
6357 		case L2CAP_TXSEQ_INVALID:
6358 		default:
6359 			l2cap_send_disconn_req(chan, ECONNRESET);
6360 			break;
6361 		}
6362 		break;
6363 	case L2CAP_EV_RECV_RR:
6364 		l2cap_pass_to_tx(chan, control);
6365 		if (control->final) {
6366 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6367 
6368 			if (!test_and_clear_bit(CONN_REJ_ACT,
6369 						&chan->conn_state)) {
6370 				control->final = 0;
6371 				l2cap_retransmit_all(chan, control);
6372 			}
6373 
6374 			l2cap_ertm_send(chan);
6375 		} else if (control->poll) {
6376 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6377 					       &chan->conn_state) &&
6378 			    chan->unacked_frames) {
6379 				__set_retrans_timer(chan);
6380 			}
6381 
6382 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6383 			l2cap_send_srej_tail(chan);
6384 		} else {
6385 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6386 					       &chan->conn_state) &&
6387 			    chan->unacked_frames)
6388 				__set_retrans_timer(chan);
6389 
6390 			l2cap_send_ack(chan);
6391 		}
6392 		break;
6393 	case L2CAP_EV_RECV_RNR:
6394 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6395 		l2cap_pass_to_tx(chan, control);
6396 		if (control->poll) {
6397 			l2cap_send_srej_tail(chan);
6398 		} else {
6399 			struct l2cap_ctrl rr_control;
6400 			memset(&rr_control, 0, sizeof(rr_control));
6401 			rr_control.sframe = 1;
6402 			rr_control.super = L2CAP_SUPER_RR;
6403 			rr_control.reqseq = chan->buffer_seq;
6404 			l2cap_send_sframe(chan, &rr_control);
6405 		}
6406 
6407 		break;
6408 	case L2CAP_EV_RECV_REJ:
6409 		l2cap_handle_rej(chan, control);
6410 		break;
6411 	case L2CAP_EV_RECV_SREJ:
6412 		l2cap_handle_srej(chan, control);
6413 		break;
6414 	}
6415 
6416 	if (skb && !skb_in_use) {
6417 		BT_DBG("Freeing %p", skb);
6418 		kfree_skb(skb);
6419 	}
6420 
6421 	return err;
6422 }
6423 
l2cap_finish_move(struct l2cap_chan * chan)6424 static int l2cap_finish_move(struct l2cap_chan *chan)
6425 {
6426 	BT_DBG("chan %p", chan);
6427 
6428 	chan->rx_state = L2CAP_RX_STATE_RECV;
6429 
6430 	if (chan->hs_hcon)
6431 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6432 	else
6433 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6434 
6435 	return l2cap_resegment(chan);
6436 }
6437 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6438 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6439 				 struct l2cap_ctrl *control,
6440 				 struct sk_buff *skb, u8 event)
6441 {
6442 	int err;
6443 
6444 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6445 	       event);
6446 
6447 	if (!control->poll)
6448 		return -EPROTO;
6449 
6450 	l2cap_process_reqseq(chan, control->reqseq);
6451 
6452 	if (!skb_queue_empty(&chan->tx_q))
6453 		chan->tx_send_head = skb_peek(&chan->tx_q);
6454 	else
6455 		chan->tx_send_head = NULL;
6456 
6457 	/* Rewind next_tx_seq to the point expected
6458 	 * by the receiver.
6459 	 */
6460 	chan->next_tx_seq = control->reqseq;
6461 	chan->unacked_frames = 0;
6462 
6463 	err = l2cap_finish_move(chan);
6464 	if (err)
6465 		return err;
6466 
6467 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6468 	l2cap_send_i_or_rr_or_rnr(chan);
6469 
6470 	if (event == L2CAP_EV_RECV_IFRAME)
6471 		return -EPROTO;
6472 
6473 	return l2cap_rx_state_recv(chan, control, NULL, event);
6474 }
6475 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6476 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6477 				 struct l2cap_ctrl *control,
6478 				 struct sk_buff *skb, u8 event)
6479 {
6480 	int err;
6481 
6482 	if (!control->final)
6483 		return -EPROTO;
6484 
6485 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6486 
6487 	chan->rx_state = L2CAP_RX_STATE_RECV;
6488 	l2cap_process_reqseq(chan, control->reqseq);
6489 
6490 	if (!skb_queue_empty(&chan->tx_q))
6491 		chan->tx_send_head = skb_peek(&chan->tx_q);
6492 	else
6493 		chan->tx_send_head = NULL;
6494 
6495 	/* Rewind next_tx_seq to the point expected
6496 	 * by the receiver.
6497 	 */
6498 	chan->next_tx_seq = control->reqseq;
6499 	chan->unacked_frames = 0;
6500 
6501 	if (chan->hs_hcon)
6502 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6503 	else
6504 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6505 
6506 	err = l2cap_resegment(chan);
6507 
6508 	if (!err)
6509 		err = l2cap_rx_state_recv(chan, control, skb, event);
6510 
6511 	return err;
6512 }
6513 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6514 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6515 {
6516 	/* Make sure reqseq is for a packet that has been sent but not acked */
6517 	u16 unacked;
6518 
6519 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6520 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6521 }
6522 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6523 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6524 		    struct sk_buff *skb, u8 event)
6525 {
6526 	int err = 0;
6527 
6528 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6529 	       control, skb, event, chan->rx_state);
6530 
6531 	if (__valid_reqseq(chan, control->reqseq)) {
6532 		switch (chan->rx_state) {
6533 		case L2CAP_RX_STATE_RECV:
6534 			err = l2cap_rx_state_recv(chan, control, skb, event);
6535 			break;
6536 		case L2CAP_RX_STATE_SREJ_SENT:
6537 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6538 						       event);
6539 			break;
6540 		case L2CAP_RX_STATE_WAIT_P:
6541 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6542 			break;
6543 		case L2CAP_RX_STATE_WAIT_F:
6544 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6545 			break;
6546 		default:
6547 			/* shut it down */
6548 			break;
6549 		}
6550 	} else {
6551 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6552 		       control->reqseq, chan->next_tx_seq,
6553 		       chan->expected_ack_seq);
6554 		l2cap_send_disconn_req(chan, ECONNRESET);
6555 	}
6556 
6557 	return err;
6558 }
6559 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6560 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6561 			   struct sk_buff *skb)
6562 {
6563 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6564 	       chan->rx_state);
6565 
6566 	if (l2cap_classify_txseq(chan, control->txseq) ==
6567 	    L2CAP_TXSEQ_EXPECTED) {
6568 		l2cap_pass_to_tx(chan, control);
6569 
6570 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6571 		       __next_seq(chan, chan->buffer_seq));
6572 
6573 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6574 
6575 		l2cap_reassemble_sdu(chan, skb, control);
6576 	} else {
6577 		if (chan->sdu) {
6578 			kfree_skb(chan->sdu);
6579 			chan->sdu = NULL;
6580 		}
6581 		chan->sdu_last_frag = NULL;
6582 		chan->sdu_len = 0;
6583 
6584 		if (skb) {
6585 			BT_DBG("Freeing %p", skb);
6586 			kfree_skb(skb);
6587 		}
6588 	}
6589 
6590 	chan->last_acked_seq = control->txseq;
6591 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6592 
6593 	return 0;
6594 }
6595 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6596 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6597 {
6598 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6599 	u16 len;
6600 	u8 event;
6601 
6602 	__unpack_control(chan, skb);
6603 
6604 	len = skb->len;
6605 
6606 	/*
6607 	 * We can just drop the corrupted I-frame here.
6608 	 * Receiver will miss it and start proper recovery
6609 	 * procedures and ask for retransmission.
6610 	 */
6611 	if (l2cap_check_fcs(chan, skb))
6612 		goto drop;
6613 
6614 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6615 		len -= L2CAP_SDULEN_SIZE;
6616 
6617 	if (chan->fcs == L2CAP_FCS_CRC16)
6618 		len -= L2CAP_FCS_SIZE;
6619 
6620 	if (len > chan->mps) {
6621 		l2cap_send_disconn_req(chan, ECONNRESET);
6622 		goto drop;
6623 	}
6624 
6625 	if ((chan->mode == L2CAP_MODE_ERTM ||
6626 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6627 		goto drop;
6628 
6629 	if (!control->sframe) {
6630 		int err;
6631 
6632 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6633 		       control->sar, control->reqseq, control->final,
6634 		       control->txseq);
6635 
6636 		/* Validate F-bit - F=0 always valid, F=1 only
6637 		 * valid in TX WAIT_F
6638 		 */
6639 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6640 			goto drop;
6641 
6642 		if (chan->mode != L2CAP_MODE_STREAMING) {
6643 			event = L2CAP_EV_RECV_IFRAME;
6644 			err = l2cap_rx(chan, control, skb, event);
6645 		} else {
6646 			err = l2cap_stream_rx(chan, control, skb);
6647 		}
6648 
6649 		if (err)
6650 			l2cap_send_disconn_req(chan, ECONNRESET);
6651 	} else {
6652 		const u8 rx_func_to_event[4] = {
6653 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6654 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6655 		};
6656 
6657 		/* Only I-frames are expected in streaming mode */
6658 		if (chan->mode == L2CAP_MODE_STREAMING)
6659 			goto drop;
6660 
6661 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6662 		       control->reqseq, control->final, control->poll,
6663 		       control->super);
6664 
6665 		if (len != 0) {
6666 			BT_ERR("Trailing bytes: %d in sframe", len);
6667 			l2cap_send_disconn_req(chan, ECONNRESET);
6668 			goto drop;
6669 		}
6670 
6671 		/* Validate F and P bits */
6672 		if (control->final && (control->poll ||
6673 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6674 			goto drop;
6675 
6676 		event = rx_func_to_event[control->super];
6677 		if (l2cap_rx(chan, control, skb, event))
6678 			l2cap_send_disconn_req(chan, ECONNRESET);
6679 	}
6680 
6681 	return 0;
6682 
6683 drop:
6684 	kfree_skb(skb);
6685 	return 0;
6686 }
6687 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6688 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6689 {
6690 	struct l2cap_conn *conn = chan->conn;
6691 	struct l2cap_le_credits pkt;
6692 	u16 return_credits;
6693 
6694 	/* We return more credits to the sender only after the amount of
6695 	 * credits falls below half of the initial amount.
6696 	 */
6697 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6698 		return;
6699 
6700 	return_credits = le_max_credits - chan->rx_credits;
6701 
6702 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6703 
6704 	chan->rx_credits += return_credits;
6705 
6706 	pkt.cid     = cpu_to_le16(chan->scid);
6707 	pkt.credits = cpu_to_le16(return_credits);
6708 
6709 	chan->ident = l2cap_get_ident(conn);
6710 
6711 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6712 }
6713 
l2cap_le_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6714 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6715 {
6716 	int err;
6717 
6718 	if (!chan->rx_credits) {
6719 		BT_ERR("No credits to receive LE L2CAP data");
6720 		l2cap_send_disconn_req(chan, ECONNRESET);
6721 		return -ENOBUFS;
6722 	}
6723 
6724 	if (chan->imtu < skb->len) {
6725 		BT_ERR("Too big LE L2CAP PDU");
6726 		return -ENOBUFS;
6727 	}
6728 
6729 	chan->rx_credits--;
6730 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6731 
6732 	l2cap_chan_le_send_credits(chan);
6733 
6734 	err = 0;
6735 
6736 	if (!chan->sdu) {
6737 		u16 sdu_len;
6738 
6739 		sdu_len = get_unaligned_le16(skb->data);
6740 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6741 
6742 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6743 		       sdu_len, skb->len, chan->imtu);
6744 
6745 		if (sdu_len > chan->imtu) {
6746 			BT_ERR("Too big LE L2CAP SDU length received");
6747 			err = -EMSGSIZE;
6748 			goto failed;
6749 		}
6750 
6751 		if (skb->len > sdu_len) {
6752 			BT_ERR("Too much LE L2CAP data received");
6753 			err = -EINVAL;
6754 			goto failed;
6755 		}
6756 
6757 		if (skb->len == sdu_len)
6758 			return chan->ops->recv(chan, skb);
6759 
6760 		chan->sdu = skb;
6761 		chan->sdu_len = sdu_len;
6762 		chan->sdu_last_frag = skb;
6763 
6764 		return 0;
6765 	}
6766 
6767 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6768 	       chan->sdu->len, skb->len, chan->sdu_len);
6769 
6770 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6771 		BT_ERR("Too much LE L2CAP data received");
6772 		err = -EINVAL;
6773 		goto failed;
6774 	}
6775 
6776 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6777 	skb = NULL;
6778 
6779 	if (chan->sdu->len == chan->sdu_len) {
6780 		err = chan->ops->recv(chan, chan->sdu);
6781 		if (!err) {
6782 			chan->sdu = NULL;
6783 			chan->sdu_last_frag = NULL;
6784 			chan->sdu_len = 0;
6785 		}
6786 	}
6787 
6788 failed:
6789 	if (err) {
6790 		kfree_skb(skb);
6791 		kfree_skb(chan->sdu);
6792 		chan->sdu = NULL;
6793 		chan->sdu_last_frag = NULL;
6794 		chan->sdu_len = 0;
6795 	}
6796 
6797 	/* We can't return an error here since we took care of the skb
6798 	 * freeing internally. An error return would cause the caller to
6799 	 * do a double-free of the skb.
6800 	 */
6801 	return 0;
6802 }
6803 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6804 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6805 			       struct sk_buff *skb)
6806 {
6807 	struct l2cap_chan *chan;
6808 
6809 	chan = l2cap_get_chan_by_scid(conn, cid);
6810 	if (!chan) {
6811 		if (cid == L2CAP_CID_A2MP) {
6812 			chan = a2mp_channel_create(conn, skb);
6813 			if (!chan) {
6814 				kfree_skb(skb);
6815 				return;
6816 			}
6817 
6818 			l2cap_chan_lock(chan);
6819 		} else {
6820 			BT_DBG("unknown cid 0x%4.4x", cid);
6821 			/* Drop packet and return */
6822 			kfree_skb(skb);
6823 			return;
6824 		}
6825 	}
6826 
6827 	BT_DBG("chan %p, len %d", chan, skb->len);
6828 
6829 	/* If we receive data on a fixed channel before the info req/rsp
6830 	 * procdure is done simply assume that the channel is supported
6831 	 * and mark it as ready.
6832 	 */
6833 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6834 		l2cap_chan_ready(chan);
6835 
6836 	if (chan->state != BT_CONNECTED)
6837 		goto drop;
6838 
6839 	switch (chan->mode) {
6840 	case L2CAP_MODE_LE_FLOWCTL:
6841 		if (l2cap_le_data_rcv(chan, skb) < 0)
6842 			goto drop;
6843 
6844 		goto done;
6845 
6846 	case L2CAP_MODE_BASIC:
6847 		/* If socket recv buffers overflows we drop data here
6848 		 * which is *bad* because L2CAP has to be reliable.
6849 		 * But we don't have any other choice. L2CAP doesn't
6850 		 * provide flow control mechanism. */
6851 
6852 		if (chan->imtu < skb->len) {
6853 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6854 			goto drop;
6855 		}
6856 
6857 		if (!chan->ops->recv(chan, skb))
6858 			goto done;
6859 		break;
6860 
6861 	case L2CAP_MODE_ERTM:
6862 	case L2CAP_MODE_STREAMING:
6863 		l2cap_data_rcv(chan, skb);
6864 		goto done;
6865 
6866 	default:
6867 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6868 		break;
6869 	}
6870 
6871 drop:
6872 	kfree_skb(skb);
6873 
6874 done:
6875 	l2cap_chan_unlock(chan);
6876 }
6877 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6878 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6879 				  struct sk_buff *skb)
6880 {
6881 	struct hci_conn *hcon = conn->hcon;
6882 	struct l2cap_chan *chan;
6883 
6884 	if (hcon->type != ACL_LINK)
6885 		goto free_skb;
6886 
6887 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6888 					ACL_LINK);
6889 	if (!chan)
6890 		goto free_skb;
6891 
6892 	BT_DBG("chan %p, len %d", chan, skb->len);
6893 
6894 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6895 		goto drop;
6896 
6897 	if (chan->imtu < skb->len)
6898 		goto drop;
6899 
6900 	/* Store remote BD_ADDR and PSM for msg_name */
6901 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6902 	bt_cb(skb)->l2cap.psm = psm;
6903 
6904 	if (!chan->ops->recv(chan, skb)) {
6905 		l2cap_chan_put(chan);
6906 		return;
6907 	}
6908 
6909 drop:
6910 	l2cap_chan_put(chan);
6911 free_skb:
6912 	kfree_skb(skb);
6913 }
6914 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6915 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6916 {
6917 	struct l2cap_hdr *lh = (void *) skb->data;
6918 	struct hci_conn *hcon = conn->hcon;
6919 	u16 cid, len;
6920 	__le16 psm;
6921 
6922 	if (hcon->state != BT_CONNECTED) {
6923 		BT_DBG("queueing pending rx skb");
6924 		skb_queue_tail(&conn->pending_rx, skb);
6925 		return;
6926 	}
6927 
6928 	skb_pull(skb, L2CAP_HDR_SIZE);
6929 	cid = __le16_to_cpu(lh->cid);
6930 	len = __le16_to_cpu(lh->len);
6931 
6932 	if (len != skb->len) {
6933 		kfree_skb(skb);
6934 		return;
6935 	}
6936 
6937 	/* Since we can't actively block incoming LE connections we must
6938 	 * at least ensure that we ignore incoming data from them.
6939 	 */
6940 	if (hcon->type == LE_LINK &&
6941 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6942 				   bdaddr_dst_type(hcon))) {
6943 		kfree_skb(skb);
6944 		return;
6945 	}
6946 
6947 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6948 
6949 	switch (cid) {
6950 	case L2CAP_CID_SIGNALING:
6951 		l2cap_sig_channel(conn, skb);
6952 		break;
6953 
6954 	case L2CAP_CID_CONN_LESS:
6955 		psm = get_unaligned((__le16 *) skb->data);
6956 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6957 		l2cap_conless_channel(conn, psm, skb);
6958 		break;
6959 
6960 	case L2CAP_CID_LE_SIGNALING:
6961 		l2cap_le_sig_channel(conn, skb);
6962 		break;
6963 
6964 	default:
6965 		l2cap_data_channel(conn, cid, skb);
6966 		break;
6967 	}
6968 }
6969 
process_pending_rx(struct work_struct * work)6970 static void process_pending_rx(struct work_struct *work)
6971 {
6972 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6973 					       pending_rx_work);
6974 	struct sk_buff *skb;
6975 
6976 	BT_DBG("");
6977 
6978 	while ((skb = skb_dequeue(&conn->pending_rx)))
6979 		l2cap_recv_frame(conn, skb);
6980 }
6981 
l2cap_conn_add(struct hci_conn * hcon)6982 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6983 {
6984 	struct l2cap_conn *conn = hcon->l2cap_data;
6985 	struct hci_chan *hchan;
6986 
6987 	if (conn)
6988 		return conn;
6989 
6990 	hchan = hci_chan_create(hcon);
6991 	if (!hchan)
6992 		return NULL;
6993 
6994 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6995 	if (!conn) {
6996 		hci_chan_del(hchan);
6997 		return NULL;
6998 	}
6999 
7000 	kref_init(&conn->ref);
7001 	hcon->l2cap_data = conn;
7002 	conn->hcon = hci_conn_get(hcon);
7003 	conn->hchan = hchan;
7004 
7005 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7006 
7007 	switch (hcon->type) {
7008 	case LE_LINK:
7009 		if (hcon->hdev->le_mtu) {
7010 			conn->mtu = hcon->hdev->le_mtu;
7011 			break;
7012 		}
7013 		/* fall through */
7014 	default:
7015 		conn->mtu = hcon->hdev->acl_mtu;
7016 		break;
7017 	}
7018 
7019 	conn->feat_mask = 0;
7020 
7021 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7022 
7023 	if (hcon->type == ACL_LINK &&
7024 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7025 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7026 
7027 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7028 	    (bredr_sc_enabled(hcon->hdev) ||
7029 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7030 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7031 
7032 	mutex_init(&conn->ident_lock);
7033 	mutex_init(&conn->chan_lock);
7034 
7035 	INIT_LIST_HEAD(&conn->chan_l);
7036 	INIT_LIST_HEAD(&conn->users);
7037 
7038 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7039 
7040 	skb_queue_head_init(&conn->pending_rx);
7041 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7042 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7043 
7044 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7045 
7046 	return conn;
7047 }
7048 
is_valid_psm(u16 psm,u8 dst_type)7049 static bool is_valid_psm(u16 psm, u8 dst_type) {
7050 	if (!psm)
7051 		return false;
7052 
7053 	if (bdaddr_type_is_le(dst_type))
7054 		return (psm <= 0x00ff);
7055 
7056 	/* PSM must be odd and lsb of upper byte must be 0 */
7057 	return ((psm & 0x0101) == 0x0001);
7058 }
7059 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7060 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7061 		       bdaddr_t *dst, u8 dst_type)
7062 {
7063 	struct l2cap_conn *conn;
7064 	struct hci_conn *hcon;
7065 	struct hci_dev *hdev;
7066 	int err;
7067 
7068 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7069 	       dst_type, __le16_to_cpu(psm));
7070 
7071 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7072 	if (!hdev)
7073 		return -EHOSTUNREACH;
7074 
7075 	hci_dev_lock(hdev);
7076 
7077 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7078 	    chan->chan_type != L2CAP_CHAN_RAW) {
7079 		err = -EINVAL;
7080 		goto done;
7081 	}
7082 
7083 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7084 		err = -EINVAL;
7085 		goto done;
7086 	}
7087 
7088 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7089 		err = -EINVAL;
7090 		goto done;
7091 	}
7092 
7093 	switch (chan->mode) {
7094 	case L2CAP_MODE_BASIC:
7095 		break;
7096 	case L2CAP_MODE_LE_FLOWCTL:
7097 		l2cap_le_flowctl_init(chan);
7098 		break;
7099 	case L2CAP_MODE_ERTM:
7100 	case L2CAP_MODE_STREAMING:
7101 		if (!disable_ertm)
7102 			break;
7103 		/* fall through */
7104 	default:
7105 		err = -EOPNOTSUPP;
7106 		goto done;
7107 	}
7108 
7109 	switch (chan->state) {
7110 	case BT_CONNECT:
7111 	case BT_CONNECT2:
7112 	case BT_CONFIG:
7113 		/* Already connecting */
7114 		err = 0;
7115 		goto done;
7116 
7117 	case BT_CONNECTED:
7118 		/* Already connected */
7119 		err = -EISCONN;
7120 		goto done;
7121 
7122 	case BT_OPEN:
7123 	case BT_BOUND:
7124 		/* Can connect */
7125 		break;
7126 
7127 	default:
7128 		err = -EBADFD;
7129 		goto done;
7130 	}
7131 
7132 	/* Set destination address and psm */
7133 	bacpy(&chan->dst, dst);
7134 	chan->dst_type = dst_type;
7135 
7136 	chan->psm = psm;
7137 	chan->dcid = cid;
7138 
7139 	if (bdaddr_type_is_le(dst_type)) {
7140 		/* Convert from L2CAP channel address type to HCI address type
7141 		 */
7142 		if (dst_type == BDADDR_LE_PUBLIC)
7143 			dst_type = ADDR_LE_DEV_PUBLIC;
7144 		else
7145 			dst_type = ADDR_LE_DEV_RANDOM;
7146 
7147 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7148 			hcon = hci_connect_le(hdev, dst, dst_type,
7149 					      chan->sec_level,
7150 					      HCI_LE_CONN_TIMEOUT,
7151 					      HCI_ROLE_SLAVE, NULL);
7152 		else
7153 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7154 						   chan->sec_level,
7155 						   HCI_LE_CONN_TIMEOUT);
7156 
7157 	} else {
7158 		u8 auth_type = l2cap_get_auth_type(chan);
7159 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7160 	}
7161 
7162 	if (IS_ERR(hcon)) {
7163 		err = PTR_ERR(hcon);
7164 		goto done;
7165 	}
7166 
7167 	conn = l2cap_conn_add(hcon);
7168 	if (!conn) {
7169 		hci_conn_drop(hcon);
7170 		err = -ENOMEM;
7171 		goto done;
7172 	}
7173 
7174 	mutex_lock(&conn->chan_lock);
7175 	l2cap_chan_lock(chan);
7176 
7177 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7178 		hci_conn_drop(hcon);
7179 		err = -EBUSY;
7180 		goto chan_unlock;
7181 	}
7182 
7183 	/* Update source addr of the socket */
7184 	bacpy(&chan->src, &hcon->src);
7185 	chan->src_type = bdaddr_src_type(hcon);
7186 
7187 	__l2cap_chan_add(conn, chan);
7188 
7189 	/* l2cap_chan_add takes its own ref so we can drop this one */
7190 	hci_conn_drop(hcon);
7191 
7192 	l2cap_state_change(chan, BT_CONNECT);
7193 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7194 
7195 	/* Release chan->sport so that it can be reused by other
7196 	 * sockets (as it's only used for listening sockets).
7197 	 */
7198 	write_lock(&chan_list_lock);
7199 	chan->sport = 0;
7200 	write_unlock(&chan_list_lock);
7201 
7202 	if (hcon->state == BT_CONNECTED) {
7203 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7204 			__clear_chan_timer(chan);
7205 			if (l2cap_chan_check_security(chan, true))
7206 				l2cap_state_change(chan, BT_CONNECTED);
7207 		} else
7208 			l2cap_do_start(chan);
7209 	}
7210 
7211 	err = 0;
7212 
7213 chan_unlock:
7214 	l2cap_chan_unlock(chan);
7215 	mutex_unlock(&conn->chan_lock);
7216 done:
7217 	hci_dev_unlock(hdev);
7218 	hci_dev_put(hdev);
7219 	return err;
7220 }
7221 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7222 
7223 /* ---- L2CAP interface with lower layer (HCI) ---- */
7224 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7225 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7226 {
7227 	int exact = 0, lm1 = 0, lm2 = 0;
7228 	struct l2cap_chan *c;
7229 
7230 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7231 
7232 	/* Find listening sockets and check their link_mode */
7233 	read_lock(&chan_list_lock);
7234 	list_for_each_entry(c, &chan_list, global_l) {
7235 		if (c->state != BT_LISTEN)
7236 			continue;
7237 
7238 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7239 			lm1 |= HCI_LM_ACCEPT;
7240 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7241 				lm1 |= HCI_LM_MASTER;
7242 			exact++;
7243 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7244 			lm2 |= HCI_LM_ACCEPT;
7245 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7246 				lm2 |= HCI_LM_MASTER;
7247 		}
7248 	}
7249 	read_unlock(&chan_list_lock);
7250 
7251 	return exact ? lm1 : lm2;
7252 }
7253 
7254 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7255  * from an existing channel in the list or from the beginning of the
7256  * global list (by passing NULL as first parameter).
7257  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7258 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7259 						  struct hci_conn *hcon)
7260 {
7261 	u8 src_type = bdaddr_src_type(hcon);
7262 
7263 	read_lock(&chan_list_lock);
7264 
7265 	if (c)
7266 		c = list_next_entry(c, global_l);
7267 	else
7268 		c = list_entry(chan_list.next, typeof(*c), global_l);
7269 
7270 	list_for_each_entry_from(c, &chan_list, global_l) {
7271 		if (c->chan_type != L2CAP_CHAN_FIXED)
7272 			continue;
7273 		if (c->state != BT_LISTEN)
7274 			continue;
7275 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7276 			continue;
7277 		if (src_type != c->src_type)
7278 			continue;
7279 
7280 		l2cap_chan_hold(c);
7281 		read_unlock(&chan_list_lock);
7282 		return c;
7283 	}
7284 
7285 	read_unlock(&chan_list_lock);
7286 
7287 	return NULL;
7288 }
7289 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7290 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7291 {
7292 	struct hci_dev *hdev = hcon->hdev;
7293 	struct l2cap_conn *conn;
7294 	struct l2cap_chan *pchan;
7295 	u8 dst_type;
7296 
7297 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7298 		return;
7299 
7300 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7301 
7302 	if (status) {
7303 		l2cap_conn_del(hcon, bt_to_errno(status));
7304 		return;
7305 	}
7306 
7307 	conn = l2cap_conn_add(hcon);
7308 	if (!conn)
7309 		return;
7310 
7311 	dst_type = bdaddr_dst_type(hcon);
7312 
7313 	/* If device is blocked, do not create channels for it */
7314 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7315 		return;
7316 
7317 	/* Find fixed channels and notify them of the new connection. We
7318 	 * use multiple individual lookups, continuing each time where
7319 	 * we left off, because the list lock would prevent calling the
7320 	 * potentially sleeping l2cap_chan_lock() function.
7321 	 */
7322 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7323 	while (pchan) {
7324 		struct l2cap_chan *chan, *next;
7325 
7326 		/* Client fixed channels should override server ones */
7327 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7328 			goto next;
7329 
7330 		l2cap_chan_lock(pchan);
7331 		chan = pchan->ops->new_connection(pchan);
7332 		if (chan) {
7333 			bacpy(&chan->src, &hcon->src);
7334 			bacpy(&chan->dst, &hcon->dst);
7335 			chan->src_type = bdaddr_src_type(hcon);
7336 			chan->dst_type = dst_type;
7337 
7338 			__l2cap_chan_add(conn, chan);
7339 		}
7340 
7341 		l2cap_chan_unlock(pchan);
7342 next:
7343 		next = l2cap_global_fixed_chan(pchan, hcon);
7344 		l2cap_chan_put(pchan);
7345 		pchan = next;
7346 	}
7347 
7348 	l2cap_conn_ready(conn);
7349 }
7350 
l2cap_disconn_ind(struct hci_conn * hcon)7351 int l2cap_disconn_ind(struct hci_conn *hcon)
7352 {
7353 	struct l2cap_conn *conn = hcon->l2cap_data;
7354 
7355 	BT_DBG("hcon %p", hcon);
7356 
7357 	if (!conn)
7358 		return HCI_ERROR_REMOTE_USER_TERM;
7359 	return conn->disc_reason;
7360 }
7361 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7362 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7363 {
7364 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7365 		return;
7366 
7367 	BT_DBG("hcon %p reason %d", hcon, reason);
7368 
7369 	l2cap_conn_del(hcon, bt_to_errno(reason));
7370 }
7371 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7372 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7373 {
7374 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7375 		return;
7376 
7377 	if (encrypt == 0x00) {
7378 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7379 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7380 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7381 			   chan->sec_level == BT_SECURITY_FIPS)
7382 			l2cap_chan_close(chan, ECONNREFUSED);
7383 	} else {
7384 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7385 			__clear_chan_timer(chan);
7386 	}
7387 }
7388 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7389 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7390 {
7391 	struct l2cap_conn *conn = hcon->l2cap_data;
7392 	struct l2cap_chan *chan;
7393 
7394 	if (!conn)
7395 		return;
7396 
7397 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7398 
7399 	mutex_lock(&conn->chan_lock);
7400 
7401 	list_for_each_entry(chan, &conn->chan_l, list) {
7402 		l2cap_chan_lock(chan);
7403 
7404 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7405 		       state_to_string(chan->state));
7406 
7407 		if (chan->scid == L2CAP_CID_A2MP) {
7408 			l2cap_chan_unlock(chan);
7409 			continue;
7410 		}
7411 
7412 		if (!status && encrypt)
7413 			chan->sec_level = hcon->sec_level;
7414 
7415 		if (!__l2cap_no_conn_pending(chan)) {
7416 			l2cap_chan_unlock(chan);
7417 			continue;
7418 		}
7419 
7420 		if (!status && (chan->state == BT_CONNECTED ||
7421 				chan->state == BT_CONFIG)) {
7422 			chan->ops->resume(chan);
7423 			l2cap_check_encryption(chan, encrypt);
7424 			l2cap_chan_unlock(chan);
7425 			continue;
7426 		}
7427 
7428 		if (chan->state == BT_CONNECT) {
7429 			if (!status)
7430 				l2cap_start_connection(chan);
7431 			else
7432 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7433 		} else if (chan->state == BT_CONNECT2 &&
7434 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7435 			struct l2cap_conn_rsp rsp;
7436 			__u16 res, stat;
7437 
7438 			if (!status) {
7439 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7440 					res = L2CAP_CR_PEND;
7441 					stat = L2CAP_CS_AUTHOR_PEND;
7442 					chan->ops->defer(chan);
7443 				} else {
7444 					l2cap_state_change(chan, BT_CONFIG);
7445 					res = L2CAP_CR_SUCCESS;
7446 					stat = L2CAP_CS_NO_INFO;
7447 				}
7448 			} else {
7449 				l2cap_state_change(chan, BT_DISCONN);
7450 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7451 				res = L2CAP_CR_SEC_BLOCK;
7452 				stat = L2CAP_CS_NO_INFO;
7453 			}
7454 
7455 			rsp.scid   = cpu_to_le16(chan->dcid);
7456 			rsp.dcid   = cpu_to_le16(chan->scid);
7457 			rsp.result = cpu_to_le16(res);
7458 			rsp.status = cpu_to_le16(stat);
7459 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7460 				       sizeof(rsp), &rsp);
7461 
7462 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7463 			    res == L2CAP_CR_SUCCESS) {
7464 				char buf[128];
7465 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7466 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7467 					       L2CAP_CONF_REQ,
7468 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7469 					       buf);
7470 				chan->num_conf_req++;
7471 			}
7472 		}
7473 
7474 		l2cap_chan_unlock(chan);
7475 	}
7476 
7477 	mutex_unlock(&conn->chan_lock);
7478 }
7479 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7480 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7481 {
7482 	struct l2cap_conn *conn = hcon->l2cap_data;
7483 	struct l2cap_hdr *hdr;
7484 	int len;
7485 
7486 	/* For AMP controller do not create l2cap conn */
7487 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7488 		goto drop;
7489 
7490 	if (!conn)
7491 		conn = l2cap_conn_add(hcon);
7492 
7493 	if (!conn)
7494 		goto drop;
7495 
7496 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7497 
7498 	switch (flags) {
7499 	case ACL_START:
7500 	case ACL_START_NO_FLUSH:
7501 	case ACL_COMPLETE:
7502 		if (conn->rx_len) {
7503 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7504 			kfree_skb(conn->rx_skb);
7505 			conn->rx_skb = NULL;
7506 			conn->rx_len = 0;
7507 			l2cap_conn_unreliable(conn, ECOMM);
7508 		}
7509 
7510 		/* Start fragment always begin with Basic L2CAP header */
7511 		if (skb->len < L2CAP_HDR_SIZE) {
7512 			BT_ERR("Frame is too short (len %d)", skb->len);
7513 			l2cap_conn_unreliable(conn, ECOMM);
7514 			goto drop;
7515 		}
7516 
7517 		hdr = (struct l2cap_hdr *) skb->data;
7518 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7519 
7520 		if (len == skb->len) {
7521 			/* Complete frame received */
7522 			l2cap_recv_frame(conn, skb);
7523 			return;
7524 		}
7525 
7526 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7527 
7528 		if (skb->len > len) {
7529 			BT_ERR("Frame is too long (len %d, expected len %d)",
7530 			       skb->len, len);
7531 			l2cap_conn_unreliable(conn, ECOMM);
7532 			goto drop;
7533 		}
7534 
7535 		/* Allocate skb for the complete frame (with header) */
7536 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7537 		if (!conn->rx_skb)
7538 			goto drop;
7539 
7540 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7541 					  skb->len);
7542 		conn->rx_len = len - skb->len;
7543 		break;
7544 
7545 	case ACL_CONT:
7546 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7547 
7548 		if (!conn->rx_len) {
7549 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7550 			l2cap_conn_unreliable(conn, ECOMM);
7551 			goto drop;
7552 		}
7553 
7554 		if (skb->len > conn->rx_len) {
7555 			BT_ERR("Fragment is too long (len %d, expected %d)",
7556 			       skb->len, conn->rx_len);
7557 			kfree_skb(conn->rx_skb);
7558 			conn->rx_skb = NULL;
7559 			conn->rx_len = 0;
7560 			l2cap_conn_unreliable(conn, ECOMM);
7561 			goto drop;
7562 		}
7563 
7564 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7565 					  skb->len);
7566 		conn->rx_len -= skb->len;
7567 
7568 		if (!conn->rx_len) {
7569 			/* Complete frame received. l2cap_recv_frame
7570 			 * takes ownership of the skb so set the global
7571 			 * rx_skb pointer to NULL first.
7572 			 */
7573 			struct sk_buff *rx_skb = conn->rx_skb;
7574 			conn->rx_skb = NULL;
7575 			l2cap_recv_frame(conn, rx_skb);
7576 		}
7577 		break;
7578 	}
7579 
7580 drop:
7581 	kfree_skb(skb);
7582 }
7583 
7584 static struct hci_cb l2cap_cb = {
7585 	.name		= "L2CAP",
7586 	.connect_cfm	= l2cap_connect_cfm,
7587 	.disconn_cfm	= l2cap_disconn_cfm,
7588 	.security_cfm	= l2cap_security_cfm,
7589 };
7590 
l2cap_debugfs_show(struct seq_file * f,void * p)7591 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7592 {
7593 	struct l2cap_chan *c;
7594 
7595 	read_lock(&chan_list_lock);
7596 
7597 	list_for_each_entry(c, &chan_list, global_l) {
7598 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7599 			   &c->src, c->src_type, &c->dst, c->dst_type,
7600 			   c->state, __le16_to_cpu(c->psm),
7601 			   c->scid, c->dcid, c->imtu, c->omtu,
7602 			   c->sec_level, c->mode);
7603 	}
7604 
7605 	read_unlock(&chan_list_lock);
7606 
7607 	return 0;
7608 }
7609 
l2cap_debugfs_open(struct inode * inode,struct file * file)7610 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7611 {
7612 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7613 }
7614 
7615 static const struct file_operations l2cap_debugfs_fops = {
7616 	.open		= l2cap_debugfs_open,
7617 	.read		= seq_read,
7618 	.llseek		= seq_lseek,
7619 	.release	= single_release,
7620 };
7621 
7622 static struct dentry *l2cap_debugfs;
7623 
l2cap_init(void)7624 int __init l2cap_init(void)
7625 {
7626 	int err;
7627 
7628 	err = l2cap_init_sockets();
7629 	if (err < 0)
7630 		return err;
7631 
7632 	hci_register_cb(&l2cap_cb);
7633 
7634 	if (IS_ERR_OR_NULL(bt_debugfs))
7635 		return 0;
7636 
7637 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7638 					    NULL, &l2cap_debugfs_fops);
7639 
7640 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7641 			   &le_max_credits);
7642 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7643 			   &le_default_mps);
7644 
7645 	return 0;
7646 }
7647 
l2cap_exit(void)7648 void l2cap_exit(void)
7649 {
7650 	debugfs_remove(l2cap_debugfs);
7651 	hci_unregister_cb(&l2cap_cb);
7652 	l2cap_cleanup_sockets();
7653 }
7654 
7655 module_param(disable_ertm, bool, 0644);
7656 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7657