• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 
505 	if (!kref_get_unless_zero(&c->kref))
506 		return NULL;
507 
508 	return c;
509 }
510 
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514 
515 	kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518 
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 	chan->fcs  = L2CAP_FCS_CRC16;
522 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->remote_max_tx = chan->max_tx;
526 	chan->remote_tx_win = chan->tx_win;
527 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 	chan->sec_level = BT_SECURITY_LOW;
529 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532 
533 	chan->conf_state = 0;
534 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535 
536 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539 
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543 
544 	if (chan->mps == 0)
545 		return 0;
546 
547 	/* If we don't know the available space in the receiver buffer, give
548 	 * enough credits for a full packet.
549 	 */
550 	if (chan->rx_avail == -1)
551 		return (chan->imtu / chan->mps) + 1;
552 
553 	/* If we know how much space is available in the receive buffer, give
554 	 * out as many credits as would fill the buffer.
555 	 */
556 	if (chan->rx_avail <= sdu_len)
557 		return 0;
558 
559 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 	chan->sdu = NULL;
565 	chan->sdu_last_frag = NULL;
566 	chan->sdu_len = 0;
567 	chan->tx_credits = tx_credits;
568 	/* Derive MPS from connection MTU to stop HCI fragmentation */
569 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 	chan->rx_credits = l2cap_le_rx_credits(chan);
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = l2cap_le_rx_credits(chan);
583 	}
584 }
585 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	/* Append to the list since the order matters for ECRED */
636 	list_add_tail(&chan->list, &conn->chan_l);
637 }
638 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 	mutex_lock(&conn->lock);
642 	__l2cap_chan_add(conn, chan);
643 	mutex_unlock(&conn->lock);
644 }
645 
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 	struct l2cap_conn *conn = chan->conn;
649 
650 	__clear_chan_timer(chan);
651 
652 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 	       state_to_string(chan->state));
654 
655 	chan->ops->teardown(chan, err);
656 
657 	if (conn) {
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 	}
673 
674 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 		return;
676 
677 	switch (chan->mode) {
678 	case L2CAP_MODE_BASIC:
679 		break;
680 
681 	case L2CAP_MODE_LE_FLOWCTL:
682 	case L2CAP_MODE_EXT_FLOWCTL:
683 		skb_queue_purge(&chan->tx_q);
684 		break;
685 
686 	case L2CAP_MODE_ERTM:
687 		__clear_retrans_timer(chan);
688 		__clear_monitor_timer(chan);
689 		__clear_ack_timer(chan);
690 
691 		skb_queue_purge(&chan->srej_q);
692 
693 		l2cap_seq_list_free(&chan->srej_list);
694 		l2cap_seq_list_free(&chan->retrans_list);
695 		fallthrough;
696 
697 	case L2CAP_MODE_STREAMING:
698 		skb_queue_purge(&chan->tx_q);
699 		break;
700 	}
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 				 l2cap_chan_func_t func, void *data)
706 {
707 	struct l2cap_chan *chan, *l;
708 
709 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 		if (chan->ident == id)
711 			func(chan, data);
712 	}
713 }
714 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 			      void *data)
717 {
718 	struct l2cap_chan *chan;
719 
720 	list_for_each_entry(chan, &conn->chan_l, list) {
721 		func(chan, data);
722 	}
723 }
724 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 		     void *data)
727 {
728 	if (!conn)
729 		return;
730 
731 	mutex_lock(&conn->lock);
732 	__l2cap_chan_list(conn, func, data);
733 	mutex_unlock(&conn->lock);
734 }
735 
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737 
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 					       id_addr_timer.work);
742 	struct hci_conn *hcon = conn->hcon;
743 	struct l2cap_chan *chan;
744 
745 	mutex_lock(&conn->lock);
746 
747 	list_for_each_entry(chan, &conn->chan_l, list) {
748 		l2cap_chan_lock(chan);
749 		bacpy(&chan->dst, &hcon->dst);
750 		chan->dst_type = bdaddr_dst_type(hcon);
751 		l2cap_chan_unlock(chan);
752 	}
753 
754 	mutex_unlock(&conn->lock);
755 }
756 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 	struct l2cap_conn *conn = chan->conn;
760 	struct l2cap_le_conn_rsp rsp;
761 	u16 result;
762 
763 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 		result = L2CAP_CR_LE_AUTHORIZATION;
765 	else
766 		result = L2CAP_CR_LE_BAD_PSM;
767 
768 	l2cap_state_change(chan, BT_DISCONN);
769 
770 	rsp.dcid    = cpu_to_le16(chan->scid);
771 	rsp.mtu     = cpu_to_le16(chan->imtu);
772 	rsp.mps     = cpu_to_le16(chan->mps);
773 	rsp.credits = cpu_to_le16(chan->rx_credits);
774 	rsp.result  = cpu_to_le16(result);
775 
776 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 		       &rsp);
778 }
779 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 	l2cap_state_change(chan, BT_DISCONN);
783 
784 	__l2cap_ecred_conn_rsp_defer(chan);
785 }
786 
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 	struct l2cap_conn_rsp rsp;
791 	u16 result;
792 
793 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 		result = L2CAP_CR_SEC_BLOCK;
795 	else
796 		result = L2CAP_CR_BAD_PSM;
797 
798 	l2cap_state_change(chan, BT_DISCONN);
799 
800 	rsp.scid   = cpu_to_le16(chan->dcid);
801 	rsp.dcid   = cpu_to_le16(chan->scid);
802 	rsp.result = cpu_to_le16(result);
803 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 
805 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807 
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 	struct l2cap_conn *conn = chan->conn;
811 
812 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813 
814 	switch (chan->state) {
815 	case BT_LISTEN:
816 		chan->ops->teardown(chan, 0);
817 		break;
818 
819 	case BT_CONNECTED:
820 	case BT_CONFIG:
821 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 			l2cap_send_disconn_req(chan, reason);
824 		} else
825 			l2cap_chan_del(chan, reason);
826 		break;
827 
828 	case BT_CONNECT2:
829 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 			if (conn->hcon->type == ACL_LINK)
831 				l2cap_chan_connect_reject(chan);
832 			else if (conn->hcon->type == LE_LINK) {
833 				switch (chan->mode) {
834 				case L2CAP_MODE_LE_FLOWCTL:
835 					l2cap_chan_le_connect_reject(chan);
836 					break;
837 				case L2CAP_MODE_EXT_FLOWCTL:
838 					l2cap_chan_ecred_connect_reject(chan);
839 					return;
840 				}
841 			}
842 		}
843 
844 		l2cap_chan_del(chan, reason);
845 		break;
846 
847 	case BT_CONNECT:
848 	case BT_DISCONN:
849 		l2cap_chan_del(chan, reason);
850 		break;
851 
852 	default:
853 		chan->ops->teardown(chan, 0);
854 		break;
855 	}
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858 
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 	switch (chan->chan_type) {
862 	case L2CAP_CHAN_RAW:
863 		switch (chan->sec_level) {
864 		case BT_SECURITY_HIGH:
865 		case BT_SECURITY_FIPS:
866 			return HCI_AT_DEDICATED_BONDING_MITM;
867 		case BT_SECURITY_MEDIUM:
868 			return HCI_AT_DEDICATED_BONDING;
869 		default:
870 			return HCI_AT_NO_BONDING;
871 		}
872 		break;
873 	case L2CAP_CHAN_CONN_LESS:
874 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 			if (chan->sec_level == BT_SECURITY_LOW)
876 				chan->sec_level = BT_SECURITY_SDP;
877 		}
878 		if (chan->sec_level == BT_SECURITY_HIGH ||
879 		    chan->sec_level == BT_SECURITY_FIPS)
880 			return HCI_AT_NO_BONDING_MITM;
881 		else
882 			return HCI_AT_NO_BONDING;
883 		break;
884 	case L2CAP_CHAN_CONN_ORIENTED:
885 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 			if (chan->sec_level == BT_SECURITY_LOW)
887 				chan->sec_level = BT_SECURITY_SDP;
888 
889 			if (chan->sec_level == BT_SECURITY_HIGH ||
890 			    chan->sec_level == BT_SECURITY_FIPS)
891 				return HCI_AT_NO_BONDING_MITM;
892 			else
893 				return HCI_AT_NO_BONDING;
894 		}
895 		fallthrough;
896 
897 	default:
898 		switch (chan->sec_level) {
899 		case BT_SECURITY_HIGH:
900 		case BT_SECURITY_FIPS:
901 			return HCI_AT_GENERAL_BONDING_MITM;
902 		case BT_SECURITY_MEDIUM:
903 			return HCI_AT_GENERAL_BONDING;
904 		default:
905 			return HCI_AT_NO_BONDING;
906 		}
907 		break;
908 	}
909 }
910 
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 	struct l2cap_conn *conn = chan->conn;
915 	__u8 auth_type;
916 
917 	if (conn->hcon->type == LE_LINK)
918 		return smp_conn_security(conn->hcon, chan->sec_level);
919 
920 	auth_type = l2cap_get_auth_type(chan);
921 
922 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 				 initiator);
924 }
925 
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 	u8 id;
929 
930 	/* Get next available identificator.
931 	 *    1 - 128 are used by kernel.
932 	 *  129 - 199 are reserved.
933 	 *  200 - 254 are used by utilities like l2ping, etc.
934 	 */
935 
936 	mutex_lock(&conn->ident_lock);
937 
938 	if (++conn->tx_ident > 128)
939 		conn->tx_ident = 1;
940 
941 	id = conn->tx_ident;
942 
943 	mutex_unlock(&conn->ident_lock);
944 
945 	return id;
946 }
947 
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 			   u8 flags)
950 {
951 	/* Check if the hcon still valid before attempting to send */
952 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 		hci_send_acl(conn->hchan, skb, flags);
954 	else
955 		kfree_skb(skb);
956 }
957 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	l2cap_send_acl(conn, skb, flags);
981 }
982 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	/* Use NO_FLUSH for LE links (where this is the only option) or
992 	 * if the BR/EDR link supports it and flushing has not been
993 	 * explicitly requested (through FLAG_FLUSHABLE).
994 	 */
995 	if (hcon->type == LE_LINK ||
996 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 	     lmp_no_flush_capable(hcon->hdev)))
998 		flags = ACL_START_NO_FLUSH;
999 	else
1000 		flags = ACL_START;
1001 
1002 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 	hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010 
1011 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 		/* S-Frame */
1013 		control->sframe = 1;
1014 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016 
1017 		control->sar = 0;
1018 		control->txseq = 0;
1019 	} else {
1020 		/* I-Frame */
1021 		control->sframe = 0;
1022 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024 
1025 		control->poll = 0;
1026 		control->super = 0;
1027 	}
1028 }
1029 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034 
1035 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 		/* S-Frame */
1037 		control->sframe = 1;
1038 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040 
1041 		control->sar = 0;
1042 		control->txseq = 0;
1043 	} else {
1044 		/* I-Frame */
1045 		control->sframe = 0;
1046 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048 
1049 		control->poll = 0;
1050 		control->super = 0;
1051 	}
1052 }
1053 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 				    struct sk_buff *skb)
1056 {
1057 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 		__unpack_extended_control(get_unaligned_le32(skb->data),
1059 					  &bt_cb(skb)->l2cap);
1060 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 	} else {
1062 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 					  &bt_cb(skb)->l2cap);
1064 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 	}
1066 }
1067 
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 	u32 packed;
1071 
1072 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074 
1075 	if (control->sframe) {
1076 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 	} else {
1080 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 	}
1083 
1084 	return packed;
1085 }
1086 
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 	u16 packed;
1090 
1091 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093 
1094 	if (control->sframe) {
1095 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 		packed |= L2CAP_CTRL_FRAME_TYPE;
1098 	} else {
1099 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 	}
1102 
1103 	return packed;
1104 }
1105 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 				  struct l2cap_ctrl *control,
1108 				  struct sk_buff *skb)
1109 {
1110 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 		put_unaligned_le32(__pack_extended_control(control),
1112 				   skb->data + L2CAP_HDR_SIZE);
1113 	} else {
1114 		put_unaligned_le16(__pack_enhanced_control(control),
1115 				   skb->data + L2CAP_HDR_SIZE);
1116 	}
1117 }
1118 
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 		return L2CAP_EXT_HDR_SIZE;
1123 	else
1124 		return L2CAP_ENH_HDR_SIZE;
1125 }
1126 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 					       u32 control)
1129 {
1130 	struct sk_buff *skb;
1131 	struct l2cap_hdr *lh;
1132 	int hlen = __ertm_hdr_size(chan);
1133 
1134 	if (chan->fcs == L2CAP_FCS_CRC16)
1135 		hlen += L2CAP_FCS_SIZE;
1136 
1137 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138 
1139 	if (!skb)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 	lh->cid = cpu_to_le16(chan->dcid);
1145 
1146 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 	else
1149 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16) {
1152 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 	}
1155 
1156 	skb->priority = HCI_PRIO_MAX;
1157 	return skb;
1158 }
1159 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 			      struct l2cap_ctrl *control)
1162 {
1163 	struct sk_buff *skb;
1164 	u32 control_field;
1165 
1166 	BT_DBG("chan %p, control %p", chan, control);
1167 
1168 	if (!control->sframe)
1169 		return;
1170 
1171 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 	    !control->poll)
1173 		control->final = 1;
1174 
1175 	if (control->super == L2CAP_SUPER_RR)
1176 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 	else if (control->super == L2CAP_SUPER_RNR)
1178 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1179 
1180 	if (control->super != L2CAP_SUPER_SREJ) {
1181 		chan->last_acked_seq = control->reqseq;
1182 		__clear_ack_timer(chan);
1183 	}
1184 
1185 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 	       control->final, control->poll, control->super);
1187 
1188 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 		control_field = __pack_extended_control(control);
1190 	else
1191 		control_field = __pack_enhanced_control(control);
1192 
1193 	skb = l2cap_create_sframe_pdu(chan, control_field);
1194 	if (!IS_ERR(skb))
1195 		l2cap_do_send(chan, skb);
1196 }
1197 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 	struct l2cap_ctrl control;
1201 
1202 	BT_DBG("chan %p, poll %d", chan, poll);
1203 
1204 	memset(&control, 0, sizeof(control));
1205 	control.sframe = 1;
1206 	control.poll = poll;
1207 
1208 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 		control.super = L2CAP_SUPER_RNR;
1210 	else
1211 		control.super = L2CAP_SUPER_RR;
1212 
1213 	control.reqseq = chan->buffer_seq;
1214 	l2cap_send_sframe(chan, &control);
1215 }
1216 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 		return true;
1221 
1222 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224 
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 	struct l2cap_conn *conn = chan->conn;
1228 	struct l2cap_conn_req req;
1229 
1230 	req.scid = cpu_to_le16(chan->scid);
1231 	req.psm  = chan->psm;
1232 
1233 	chan->ident = l2cap_get_ident(conn);
1234 
1235 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236 
1237 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239 
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 	/* The channel may have already been flagged as connected in
1243 	 * case of receiving data before the L2CAP info req/rsp
1244 	 * procedure is complete.
1245 	 */
1246 	if (chan->state == BT_CONNECTED)
1247 		return;
1248 
1249 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 	chan->conf_state = 0;
1251 	__clear_chan_timer(chan);
1252 
1253 	switch (chan->mode) {
1254 	case L2CAP_MODE_LE_FLOWCTL:
1255 	case L2CAP_MODE_EXT_FLOWCTL:
1256 		if (!chan->tx_credits)
1257 			chan->ops->suspend(chan);
1258 		break;
1259 	}
1260 
1261 	chan->state = BT_CONNECTED;
1262 
1263 	chan->ops->ready(chan);
1264 }
1265 
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 	struct l2cap_conn *conn = chan->conn;
1269 	struct l2cap_le_conn_req req;
1270 
1271 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 		return;
1273 
1274 	if (!chan->imtu)
1275 		chan->imtu = chan->conn->mtu;
1276 
1277 	l2cap_le_flowctl_init(chan, 0);
1278 
1279 	memset(&req, 0, sizeof(req));
1280 	req.psm     = chan->psm;
1281 	req.scid    = cpu_to_le16(chan->scid);
1282 	req.mtu     = cpu_to_le16(chan->imtu);
1283 	req.mps     = cpu_to_le16(chan->mps);
1284 	req.credits = cpu_to_le16(chan->rx_credits);
1285 
1286 	chan->ident = l2cap_get_ident(conn);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 		       sizeof(req), &req);
1290 }
1291 
1292 struct l2cap_ecred_conn_data {
1293 	struct {
1294 		struct l2cap_ecred_conn_req_hdr req;
1295 		__le16 scid[5];
1296 	} __packed pdu;
1297 	struct l2cap_chan *chan;
1298 	struct pid *pid;
1299 	int count;
1300 };
1301 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 	struct l2cap_ecred_conn_data *conn = data;
1305 	struct pid *pid;
1306 
1307 	if (chan == conn->chan)
1308 		return;
1309 
1310 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 		return;
1312 
1313 	pid = chan->ops->get_peer_pid(chan);
1314 
1315 	/* Only add deferred channels with the same PID/PSM */
1316 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 		return;
1319 
1320 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 		return;
1322 
1323 	l2cap_ecred_init(chan, 0);
1324 
1325 	/* Set the same ident so we can match on the rsp */
1326 	chan->ident = conn->chan->ident;
1327 
1328 	/* Include all channels deferred */
1329 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330 
1331 	conn->count++;
1332 }
1333 
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct l2cap_ecred_conn_data data;
1338 
1339 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 		return;
1341 
1342 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 		return;
1344 
1345 	l2cap_ecred_init(chan, 0);
1346 
1347 	memset(&data, 0, sizeof(data));
1348 	data.pdu.req.psm     = chan->psm;
1349 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1350 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1351 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1353 
1354 	chan->ident = l2cap_get_ident(conn);
1355 
1356 	data.count = 1;
1357 	data.chan = chan;
1358 	data.pid = chan->ops->get_peer_pid(chan);
1359 
1360 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361 
1362 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 		       &data.pdu);
1365 }
1366 
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 	struct l2cap_conn *conn = chan->conn;
1370 
1371 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 		return;
1373 
1374 	if (!chan->psm) {
1375 		l2cap_chan_ready(chan);
1376 		return;
1377 	}
1378 
1379 	if (chan->state == BT_CONNECT) {
1380 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 			l2cap_ecred_connect(chan);
1382 		else
1383 			l2cap_le_connect(chan);
1384 	}
1385 }
1386 
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 	if (chan->conn->hcon->type == LE_LINK) {
1390 		l2cap_le_start(chan);
1391 	} else {
1392 		l2cap_send_conn_req(chan);
1393 	}
1394 }
1395 
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 	struct l2cap_info_req req;
1399 
1400 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 		return;
1402 
1403 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404 
1405 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 	conn->info_ident = l2cap_get_ident(conn);
1407 
1408 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409 
1410 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1415 				     struct l2cap_chan *chan)
1416 {
1417 	/* The minimum encryption key size needs to be enforced by the
1418 	 * host stack before establishing any L2CAP connections. The
1419 	 * specification in theory allows a minimum of 1, but to align
1420 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1421 	 *
1422 	 * This check might also be called for unencrypted connections
1423 	 * that have no key size requirements. Ensure that the link is
1424 	 * actually encrypted before enforcing a key size.
1425 	 */
1426 	int min_key_size = hcon->hdev->min_enc_key_size;
1427 
1428 	/* On FIPS security level, key size must be 16 bytes */
1429 	if (chan->sec_level == BT_SECURITY_FIPS)
1430 		min_key_size = 16;
1431 
1432 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1433 		hcon->enc_key_size >= min_key_size);
1434 }
1435 
l2cap_do_start(struct l2cap_chan * chan)1436 static void l2cap_do_start(struct l2cap_chan *chan)
1437 {
1438 	struct l2cap_conn *conn = chan->conn;
1439 
1440 	if (conn->hcon->type == LE_LINK) {
1441 		l2cap_le_start(chan);
1442 		return;
1443 	}
1444 
1445 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1446 		l2cap_request_info(conn);
1447 		return;
1448 	}
1449 
1450 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1451 		return;
1452 
1453 	if (!l2cap_chan_check_security(chan, true) ||
1454 	    !__l2cap_no_conn_pending(chan))
1455 		return;
1456 
1457 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1458 		l2cap_start_connection(chan);
1459 	else
1460 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1461 }
1462 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1464 {
1465 	u32 local_feat_mask = l2cap_feat_mask;
1466 	if (!disable_ertm)
1467 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1468 
1469 	switch (mode) {
1470 	case L2CAP_MODE_ERTM:
1471 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1472 	case L2CAP_MODE_STREAMING:
1473 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1474 	default:
1475 		return 0x00;
1476 	}
1477 }
1478 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1479 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1480 {
1481 	struct l2cap_conn *conn = chan->conn;
1482 	struct l2cap_disconn_req req;
1483 
1484 	if (!conn)
1485 		return;
1486 
1487 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1488 		__clear_retrans_timer(chan);
1489 		__clear_monitor_timer(chan);
1490 		__clear_ack_timer(chan);
1491 	}
1492 
1493 	req.dcid = cpu_to_le16(chan->dcid);
1494 	req.scid = cpu_to_le16(chan->scid);
1495 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1496 		       sizeof(req), &req);
1497 
1498 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1499 }
1500 
1501 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1502 static void l2cap_conn_start(struct l2cap_conn *conn)
1503 {
1504 	struct l2cap_chan *chan, *tmp;
1505 
1506 	BT_DBG("conn %p", conn);
1507 
1508 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1509 		l2cap_chan_lock(chan);
1510 
1511 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1512 			l2cap_chan_ready(chan);
1513 			l2cap_chan_unlock(chan);
1514 			continue;
1515 		}
1516 
1517 		if (chan->state == BT_CONNECT) {
1518 			if (!l2cap_chan_check_security(chan, true) ||
1519 			    !__l2cap_no_conn_pending(chan)) {
1520 				l2cap_chan_unlock(chan);
1521 				continue;
1522 			}
1523 
1524 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1525 			    && test_bit(CONF_STATE2_DEVICE,
1526 					&chan->conf_state)) {
1527 				l2cap_chan_close(chan, ECONNRESET);
1528 				l2cap_chan_unlock(chan);
1529 				continue;
1530 			}
1531 
1532 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1533 				l2cap_start_connection(chan);
1534 			else
1535 				l2cap_chan_close(chan, ECONNREFUSED);
1536 
1537 		} else if (chan->state == BT_CONNECT2) {
1538 			struct l2cap_conn_rsp rsp;
1539 			char buf[128];
1540 			rsp.scid = cpu_to_le16(chan->dcid);
1541 			rsp.dcid = cpu_to_le16(chan->scid);
1542 
1543 			if (l2cap_chan_check_security(chan, false)) {
1544 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1545 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1546 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1547 					chan->ops->defer(chan);
1548 
1549 				} else {
1550 					l2cap_state_change(chan, BT_CONFIG);
1551 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1552 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1553 				}
1554 			} else {
1555 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1556 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1557 			}
1558 
1559 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1560 				       sizeof(rsp), &rsp);
1561 
1562 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1563 			    rsp.result != L2CAP_CR_SUCCESS) {
1564 				l2cap_chan_unlock(chan);
1565 				continue;
1566 			}
1567 
1568 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1569 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1571 			chan->num_conf_req++;
1572 		}
1573 
1574 		l2cap_chan_unlock(chan);
1575 	}
1576 }
1577 
l2cap_le_conn_ready(struct l2cap_conn * conn)1578 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1579 {
1580 	struct hci_conn *hcon = conn->hcon;
1581 	struct hci_dev *hdev = hcon->hdev;
1582 
1583 	BT_DBG("%s conn %p", hdev->name, conn);
1584 
1585 	/* For outgoing pairing which doesn't necessarily have an
1586 	 * associated socket (e.g. mgmt_pair_device).
1587 	 */
1588 	if (hcon->out)
1589 		smp_conn_security(hcon, hcon->pending_sec_level);
1590 
1591 	/* For LE peripheral connections, make sure the connection interval
1592 	 * is in the range of the minimum and maximum interval that has
1593 	 * been configured for this connection. If not, then trigger
1594 	 * the connection update procedure.
1595 	 */
1596 	if (hcon->role == HCI_ROLE_SLAVE &&
1597 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1598 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1599 		struct l2cap_conn_param_update_req req;
1600 
1601 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1602 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1603 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1604 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1605 
1606 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1607 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1608 	}
1609 }
1610 
l2cap_conn_ready(struct l2cap_conn * conn)1611 static void l2cap_conn_ready(struct l2cap_conn *conn)
1612 {
1613 	struct l2cap_chan *chan;
1614 	struct hci_conn *hcon = conn->hcon;
1615 
1616 	BT_DBG("conn %p", conn);
1617 
1618 	if (hcon->type == ACL_LINK)
1619 		l2cap_request_info(conn);
1620 
1621 	mutex_lock(&conn->lock);
1622 
1623 	list_for_each_entry(chan, &conn->chan_l, list) {
1624 
1625 		l2cap_chan_lock(chan);
1626 
1627 		if (hcon->type == LE_LINK) {
1628 			l2cap_le_start(chan);
1629 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1630 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1631 				l2cap_chan_ready(chan);
1632 		} else if (chan->state == BT_CONNECT) {
1633 			l2cap_do_start(chan);
1634 		}
1635 
1636 		l2cap_chan_unlock(chan);
1637 	}
1638 
1639 	mutex_unlock(&conn->lock);
1640 
1641 	if (hcon->type == LE_LINK)
1642 		l2cap_le_conn_ready(conn);
1643 
1644 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1645 }
1646 
1647 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1648 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1649 {
1650 	struct l2cap_chan *chan;
1651 
1652 	BT_DBG("conn %p", conn);
1653 
1654 	list_for_each_entry(chan, &conn->chan_l, list) {
1655 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1656 			l2cap_chan_set_err(chan, err);
1657 	}
1658 }
1659 
l2cap_info_timeout(struct work_struct * work)1660 static void l2cap_info_timeout(struct work_struct *work)
1661 {
1662 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1663 					       info_timer.work);
1664 
1665 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1666 	conn->info_ident = 0;
1667 
1668 	mutex_lock(&conn->lock);
1669 	l2cap_conn_start(conn);
1670 	mutex_unlock(&conn->lock);
1671 }
1672 
1673 /*
1674  * l2cap_user
1675  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1676  * callback is called during registration. The ->remove callback is called
1677  * during unregistration.
1678  * An l2cap_user object can either be explicitly unregistered or when the
1679  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1680  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1681  * External modules must own a reference to the l2cap_conn object if they intend
1682  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1683  * any time if they don't.
1684  */
1685 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1686 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1687 {
1688 	struct hci_dev *hdev = conn->hcon->hdev;
1689 	int ret;
1690 
1691 	/* We need to check whether l2cap_conn is registered. If it is not, we
1692 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1693 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1694 	 * relies on the parent hci_conn object to be locked. This itself relies
1695 	 * on the hci_dev object to be locked. So we must lock the hci device
1696 	 * here, too. */
1697 
1698 	hci_dev_lock(hdev);
1699 
1700 	if (!list_empty(&user->list)) {
1701 		ret = -EINVAL;
1702 		goto out_unlock;
1703 	}
1704 
1705 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1706 	if (!conn->hchan) {
1707 		ret = -ENODEV;
1708 		goto out_unlock;
1709 	}
1710 
1711 	ret = user->probe(conn, user);
1712 	if (ret)
1713 		goto out_unlock;
1714 
1715 	list_add(&user->list, &conn->users);
1716 	ret = 0;
1717 
1718 out_unlock:
1719 	hci_dev_unlock(hdev);
1720 	return ret;
1721 }
1722 EXPORT_SYMBOL(l2cap_register_user);
1723 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1724 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1725 {
1726 	struct hci_dev *hdev = conn->hcon->hdev;
1727 
1728 	hci_dev_lock(hdev);
1729 
1730 	if (list_empty(&user->list))
1731 		goto out_unlock;
1732 
1733 	list_del_init(&user->list);
1734 	user->remove(conn, user);
1735 
1736 out_unlock:
1737 	hci_dev_unlock(hdev);
1738 }
1739 EXPORT_SYMBOL(l2cap_unregister_user);
1740 
l2cap_unregister_all_users(struct l2cap_conn * conn)1741 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1742 {
1743 	struct l2cap_user *user;
1744 
1745 	while (!list_empty(&conn->users)) {
1746 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1747 		list_del_init(&user->list);
1748 		user->remove(conn, user);
1749 	}
1750 }
1751 
l2cap_conn_del(struct hci_conn * hcon,int err)1752 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1753 {
1754 	struct l2cap_conn *conn = hcon->l2cap_data;
1755 	struct l2cap_chan *chan, *l;
1756 
1757 	if (!conn)
1758 		return;
1759 
1760 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1761 
1762 	mutex_lock(&conn->lock);
1763 
1764 	kfree_skb(conn->rx_skb);
1765 
1766 	skb_queue_purge(&conn->pending_rx);
1767 
1768 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1769 	 * might block if we are running on a worker from the same workqueue
1770 	 * pending_rx_work is waiting on.
1771 	 */
1772 	if (work_pending(&conn->pending_rx_work))
1773 		cancel_work_sync(&conn->pending_rx_work);
1774 
1775 	cancel_delayed_work_sync(&conn->id_addr_timer);
1776 
1777 	l2cap_unregister_all_users(conn);
1778 
1779 	/* Force the connection to be immediately dropped */
1780 	hcon->disc_timeout = 0;
1781 
1782 	/* Kill channels */
1783 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1784 		l2cap_chan_hold(chan);
1785 		l2cap_chan_lock(chan);
1786 
1787 		l2cap_chan_del(chan, err);
1788 
1789 		chan->ops->close(chan);
1790 
1791 		l2cap_chan_unlock(chan);
1792 		l2cap_chan_put(chan);
1793 	}
1794 
1795 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1796 		cancel_delayed_work_sync(&conn->info_timer);
1797 
1798 	hci_chan_del(conn->hchan);
1799 	conn->hchan = NULL;
1800 
1801 	hcon->l2cap_data = NULL;
1802 	mutex_unlock(&conn->lock);
1803 	l2cap_conn_put(conn);
1804 }
1805 
l2cap_conn_free(struct kref * ref)1806 static void l2cap_conn_free(struct kref *ref)
1807 {
1808 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1809 
1810 	hci_conn_put(conn->hcon);
1811 	kfree(conn);
1812 }
1813 
l2cap_conn_get(struct l2cap_conn * conn)1814 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1815 {
1816 	kref_get(&conn->ref);
1817 	return conn;
1818 }
1819 EXPORT_SYMBOL(l2cap_conn_get);
1820 
l2cap_conn_put(struct l2cap_conn * conn)1821 void l2cap_conn_put(struct l2cap_conn *conn)
1822 {
1823 	kref_put(&conn->ref, l2cap_conn_free);
1824 }
1825 EXPORT_SYMBOL(l2cap_conn_put);
1826 
1827 /* ---- Socket interface ---- */
1828 
1829 /* Find socket with psm and source / destination bdaddr.
1830  * Returns closest match.
1831  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1832 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1833 						   bdaddr_t *src,
1834 						   bdaddr_t *dst,
1835 						   u8 link_type)
1836 {
1837 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1838 
1839 	read_lock(&chan_list_lock);
1840 
1841 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1842 		if (state && c->state != state)
1843 			continue;
1844 
1845 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1846 			continue;
1847 
1848 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1849 			continue;
1850 
1851 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1852 			int src_match, dst_match;
1853 			int src_any, dst_any;
1854 
1855 			/* Exact match. */
1856 			src_match = !bacmp(&c->src, src);
1857 			dst_match = !bacmp(&c->dst, dst);
1858 			if (src_match && dst_match) {
1859 				if (!l2cap_chan_hold_unless_zero(c))
1860 					continue;
1861 
1862 				read_unlock(&chan_list_lock);
1863 				return c;
1864 			}
1865 
1866 			/* Closest match */
1867 			src_any = !bacmp(&c->src, BDADDR_ANY);
1868 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1869 			if ((src_match && dst_any) || (src_any && dst_match) ||
1870 			    (src_any && dst_any))
1871 				c1 = c;
1872 		}
1873 	}
1874 
1875 	if (c1)
1876 		c1 = l2cap_chan_hold_unless_zero(c1);
1877 
1878 	read_unlock(&chan_list_lock);
1879 
1880 	return c1;
1881 }
1882 
l2cap_monitor_timeout(struct work_struct * work)1883 static void l2cap_monitor_timeout(struct work_struct *work)
1884 {
1885 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1886 					       monitor_timer.work);
1887 
1888 	BT_DBG("chan %p", chan);
1889 
1890 	l2cap_chan_lock(chan);
1891 
1892 	if (!chan->conn) {
1893 		l2cap_chan_unlock(chan);
1894 		l2cap_chan_put(chan);
1895 		return;
1896 	}
1897 
1898 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899 
1900 	l2cap_chan_unlock(chan);
1901 	l2cap_chan_put(chan);
1902 }
1903 
l2cap_retrans_timeout(struct work_struct * work)1904 static void l2cap_retrans_timeout(struct work_struct *work)
1905 {
1906 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1907 					       retrans_timer.work);
1908 
1909 	BT_DBG("chan %p", chan);
1910 
1911 	l2cap_chan_lock(chan);
1912 
1913 	if (!chan->conn) {
1914 		l2cap_chan_unlock(chan);
1915 		l2cap_chan_put(chan);
1916 		return;
1917 	}
1918 
1919 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1920 	l2cap_chan_unlock(chan);
1921 	l2cap_chan_put(chan);
1922 }
1923 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1924 static void l2cap_streaming_send(struct l2cap_chan *chan,
1925 				 struct sk_buff_head *skbs)
1926 {
1927 	struct sk_buff *skb;
1928 	struct l2cap_ctrl *control;
1929 
1930 	BT_DBG("chan %p, skbs %p", chan, skbs);
1931 
1932 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1933 
1934 	while (!skb_queue_empty(&chan->tx_q)) {
1935 
1936 		skb = skb_dequeue(&chan->tx_q);
1937 
1938 		bt_cb(skb)->l2cap.retries = 1;
1939 		control = &bt_cb(skb)->l2cap;
1940 
1941 		control->reqseq = 0;
1942 		control->txseq = chan->next_tx_seq;
1943 
1944 		__pack_control(chan, control, skb);
1945 
1946 		if (chan->fcs == L2CAP_FCS_CRC16) {
1947 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1948 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1949 		}
1950 
1951 		l2cap_do_send(chan, skb);
1952 
1953 		BT_DBG("Sent txseq %u", control->txseq);
1954 
1955 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1956 		chan->frames_sent++;
1957 	}
1958 }
1959 
l2cap_ertm_send(struct l2cap_chan * chan)1960 static int l2cap_ertm_send(struct l2cap_chan *chan)
1961 {
1962 	struct sk_buff *skb, *tx_skb;
1963 	struct l2cap_ctrl *control;
1964 	int sent = 0;
1965 
1966 	BT_DBG("chan %p", chan);
1967 
1968 	if (chan->state != BT_CONNECTED)
1969 		return -ENOTCONN;
1970 
1971 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1972 		return 0;
1973 
1974 	while (chan->tx_send_head &&
1975 	       chan->unacked_frames < chan->remote_tx_win &&
1976 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977 
1978 		skb = chan->tx_send_head;
1979 
1980 		bt_cb(skb)->l2cap.retries = 1;
1981 		control = &bt_cb(skb)->l2cap;
1982 
1983 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1984 			control->final = 1;
1985 
1986 		control->reqseq = chan->buffer_seq;
1987 		chan->last_acked_seq = chan->buffer_seq;
1988 		control->txseq = chan->next_tx_seq;
1989 
1990 		__pack_control(chan, control, skb);
1991 
1992 		if (chan->fcs == L2CAP_FCS_CRC16) {
1993 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1994 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1995 		}
1996 
1997 		/* Clone after data has been modified. Data is assumed to be
1998 		   read-only (for locking purposes) on cloned sk_buffs.
1999 		 */
2000 		tx_skb = skb_clone(skb, GFP_KERNEL);
2001 
2002 		if (!tx_skb)
2003 			break;
2004 
2005 		__set_retrans_timer(chan);
2006 
2007 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2008 		chan->unacked_frames++;
2009 		chan->frames_sent++;
2010 		sent++;
2011 
2012 		if (skb_queue_is_last(&chan->tx_q, skb))
2013 			chan->tx_send_head = NULL;
2014 		else
2015 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016 
2017 		l2cap_do_send(chan, tx_skb);
2018 		BT_DBG("Sent txseq %u", control->txseq);
2019 	}
2020 
2021 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2022 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2023 
2024 	return sent;
2025 }
2026 
l2cap_ertm_resend(struct l2cap_chan * chan)2027 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 {
2029 	struct l2cap_ctrl control;
2030 	struct sk_buff *skb;
2031 	struct sk_buff *tx_skb;
2032 	u16 seq;
2033 
2034 	BT_DBG("chan %p", chan);
2035 
2036 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 		return;
2038 
2039 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2040 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2041 
2042 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2043 		if (!skb) {
2044 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2045 			       seq);
2046 			continue;
2047 		}
2048 
2049 		bt_cb(skb)->l2cap.retries++;
2050 		control = bt_cb(skb)->l2cap;
2051 
2052 		if (chan->max_tx != 0 &&
2053 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2054 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2055 			l2cap_send_disconn_req(chan, ECONNRESET);
2056 			l2cap_seq_list_clear(&chan->retrans_list);
2057 			break;
2058 		}
2059 
2060 		control.reqseq = chan->buffer_seq;
2061 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2062 			control.final = 1;
2063 		else
2064 			control.final = 0;
2065 
2066 		if (skb_cloned(skb)) {
2067 			/* Cloned sk_buffs are read-only, so we need a
2068 			 * writeable copy
2069 			 */
2070 			tx_skb = skb_copy(skb, GFP_KERNEL);
2071 		} else {
2072 			tx_skb = skb_clone(skb, GFP_KERNEL);
2073 		}
2074 
2075 		if (!tx_skb) {
2076 			l2cap_seq_list_clear(&chan->retrans_list);
2077 			break;
2078 		}
2079 
2080 		/* Update skb contents */
2081 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2082 			put_unaligned_le32(__pack_extended_control(&control),
2083 					   tx_skb->data + L2CAP_HDR_SIZE);
2084 		} else {
2085 			put_unaligned_le16(__pack_enhanced_control(&control),
2086 					   tx_skb->data + L2CAP_HDR_SIZE);
2087 		}
2088 
2089 		/* Update FCS */
2090 		if (chan->fcs == L2CAP_FCS_CRC16) {
2091 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2092 					tx_skb->len - L2CAP_FCS_SIZE);
2093 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2094 						L2CAP_FCS_SIZE);
2095 		}
2096 
2097 		l2cap_do_send(chan, tx_skb);
2098 
2099 		BT_DBG("Resent txseq %d", control.txseq);
2100 
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 	}
2103 }
2104 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 			     struct l2cap_ctrl *control)
2107 {
2108 	BT_DBG("chan %p, control %p", chan, control);
2109 
2110 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 	l2cap_ertm_resend(chan);
2112 }
2113 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 				 struct l2cap_ctrl *control)
2116 {
2117 	struct sk_buff *skb;
2118 
2119 	BT_DBG("chan %p, control %p", chan, control);
2120 
2121 	if (control->poll)
2122 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123 
2124 	l2cap_seq_list_clear(&chan->retrans_list);
2125 
2126 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 		return;
2128 
2129 	if (chan->unacked_frames) {
2130 		skb_queue_walk(&chan->tx_q, skb) {
2131 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2132 			    skb == chan->tx_send_head)
2133 				break;
2134 		}
2135 
2136 		skb_queue_walk_from(&chan->tx_q, skb) {
2137 			if (skb == chan->tx_send_head)
2138 				break;
2139 
2140 			l2cap_seq_list_append(&chan->retrans_list,
2141 					      bt_cb(skb)->l2cap.txseq);
2142 		}
2143 
2144 		l2cap_ertm_resend(chan);
2145 	}
2146 }
2147 
l2cap_send_ack(struct l2cap_chan * chan)2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 	struct l2cap_ctrl control;
2151 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 					 chan->last_acked_seq);
2153 	int threshold;
2154 
2155 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 	       chan, chan->last_acked_seq, chan->buffer_seq);
2157 
2158 	memset(&control, 0, sizeof(control));
2159 	control.sframe = 1;
2160 
2161 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 		__clear_ack_timer(chan);
2164 		control.super = L2CAP_SUPER_RNR;
2165 		control.reqseq = chan->buffer_seq;
2166 		l2cap_send_sframe(chan, &control);
2167 	} else {
2168 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 			l2cap_ertm_send(chan);
2170 			/* If any i-frames were sent, they included an ack */
2171 			if (chan->buffer_seq == chan->last_acked_seq)
2172 				frames_to_ack = 0;
2173 		}
2174 
2175 		/* Ack now if the window is 3/4ths full.
2176 		 * Calculate without mul or div
2177 		 */
2178 		threshold = chan->ack_win;
2179 		threshold += threshold << 1;
2180 		threshold >>= 2;
2181 
2182 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 		       threshold);
2184 
2185 		if (frames_to_ack >= threshold) {
2186 			__clear_ack_timer(chan);
2187 			control.super = L2CAP_SUPER_RR;
2188 			control.reqseq = chan->buffer_seq;
2189 			l2cap_send_sframe(chan, &control);
2190 			frames_to_ack = 0;
2191 		}
2192 
2193 		if (frames_to_ack)
2194 			__set_ack_timer(chan);
2195 	}
2196 }
2197 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 					 struct msghdr *msg, int len,
2200 					 int count, struct sk_buff *skb)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff **frag;
2204 	int sent = 0;
2205 
2206 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2207 		return -EFAULT;
2208 
2209 	sent += count;
2210 	len  -= count;
2211 
2212 	/* Continuation fragments (no L2CAP header) */
2213 	frag = &skb_shinfo(skb)->frag_list;
2214 	while (len) {
2215 		struct sk_buff *tmp;
2216 
2217 		count = min_t(unsigned int, conn->mtu, len);
2218 
2219 		tmp = chan->ops->alloc_skb(chan, 0, count,
2220 					   msg->msg_flags & MSG_DONTWAIT);
2221 		if (IS_ERR(tmp))
2222 			return PTR_ERR(tmp);
2223 
2224 		*frag = tmp;
2225 
2226 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2227 				   &msg->msg_iter))
2228 			return -EFAULT;
2229 
2230 		sent += count;
2231 		len  -= count;
2232 
2233 		skb->len += (*frag)->len;
2234 		skb->data_len += (*frag)->len;
2235 
2236 		frag = &(*frag)->next;
2237 	}
2238 
2239 	return sent;
2240 }
2241 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2242 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2243 						 struct msghdr *msg, size_t len)
2244 {
2245 	struct l2cap_conn *conn = chan->conn;
2246 	struct sk_buff *skb;
2247 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2248 	struct l2cap_hdr *lh;
2249 
2250 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2251 	       __le16_to_cpu(chan->psm), len);
2252 
2253 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2254 
2255 	skb = chan->ops->alloc_skb(chan, hlen, count,
2256 				   msg->msg_flags & MSG_DONTWAIT);
2257 	if (IS_ERR(skb))
2258 		return skb;
2259 
2260 	/* Create L2CAP header */
2261 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2262 	lh->cid = cpu_to_le16(chan->dcid);
2263 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2264 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2265 
2266 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2267 	if (unlikely(err < 0)) {
2268 		kfree_skb(skb);
2269 		return ERR_PTR(err);
2270 	}
2271 	return skb;
2272 }
2273 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2274 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2275 					      struct msghdr *msg, size_t len)
2276 {
2277 	struct l2cap_conn *conn = chan->conn;
2278 	struct sk_buff *skb;
2279 	int err, count;
2280 	struct l2cap_hdr *lh;
2281 
2282 	BT_DBG("chan %p len %zu", chan, len);
2283 
2284 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2285 
2286 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2287 				   msg->msg_flags & MSG_DONTWAIT);
2288 	if (IS_ERR(skb))
2289 		return skb;
2290 
2291 	/* Create L2CAP header */
2292 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2293 	lh->cid = cpu_to_le16(chan->dcid);
2294 	lh->len = cpu_to_le16(len);
2295 
2296 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2297 	if (unlikely(err < 0)) {
2298 		kfree_skb(skb);
2299 		return ERR_PTR(err);
2300 	}
2301 	return skb;
2302 }
2303 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2304 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2305 					       struct msghdr *msg, size_t len,
2306 					       u16 sdulen)
2307 {
2308 	struct l2cap_conn *conn = chan->conn;
2309 	struct sk_buff *skb;
2310 	int err, count, hlen;
2311 	struct l2cap_hdr *lh;
2312 
2313 	BT_DBG("chan %p len %zu", chan, len);
2314 
2315 	if (!conn)
2316 		return ERR_PTR(-ENOTCONN);
2317 
2318 	hlen = __ertm_hdr_size(chan);
2319 
2320 	if (sdulen)
2321 		hlen += L2CAP_SDULEN_SIZE;
2322 
2323 	if (chan->fcs == L2CAP_FCS_CRC16)
2324 		hlen += L2CAP_FCS_SIZE;
2325 
2326 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2327 
2328 	skb = chan->ops->alloc_skb(chan, hlen, count,
2329 				   msg->msg_flags & MSG_DONTWAIT);
2330 	if (IS_ERR(skb))
2331 		return skb;
2332 
2333 	/* Create L2CAP header */
2334 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2335 	lh->cid = cpu_to_le16(chan->dcid);
2336 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2337 
2338 	/* Control header is populated later */
2339 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2340 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2341 	else
2342 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2343 
2344 	if (sdulen)
2345 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2346 
2347 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2348 	if (unlikely(err < 0)) {
2349 		kfree_skb(skb);
2350 		return ERR_PTR(err);
2351 	}
2352 
2353 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2354 	bt_cb(skb)->l2cap.retries = 0;
2355 	return skb;
2356 }
2357 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2358 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2359 			     struct sk_buff_head *seg_queue,
2360 			     struct msghdr *msg, size_t len)
2361 {
2362 	struct sk_buff *skb;
2363 	u16 sdu_len;
2364 	size_t pdu_len;
2365 	u8 sar;
2366 
2367 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2368 
2369 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2370 	 * so fragmented skbs are not used.  The HCI layer's handling
2371 	 * of fragmented skbs is not compatible with ERTM's queueing.
2372 	 */
2373 
2374 	/* PDU size is derived from the HCI MTU */
2375 	pdu_len = chan->conn->mtu;
2376 
2377 	/* Constrain PDU size for BR/EDR connections */
2378 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2379 
2380 	/* Adjust for largest possible L2CAP overhead. */
2381 	if (chan->fcs)
2382 		pdu_len -= L2CAP_FCS_SIZE;
2383 
2384 	pdu_len -= __ertm_hdr_size(chan);
2385 
2386 	/* Remote device may have requested smaller PDUs */
2387 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2388 
2389 	if (len <= pdu_len) {
2390 		sar = L2CAP_SAR_UNSEGMENTED;
2391 		sdu_len = 0;
2392 		pdu_len = len;
2393 	} else {
2394 		sar = L2CAP_SAR_START;
2395 		sdu_len = len;
2396 	}
2397 
2398 	while (len > 0) {
2399 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2400 
2401 		if (IS_ERR(skb)) {
2402 			__skb_queue_purge(seg_queue);
2403 			return PTR_ERR(skb);
2404 		}
2405 
2406 		bt_cb(skb)->l2cap.sar = sar;
2407 		__skb_queue_tail(seg_queue, skb);
2408 
2409 		len -= pdu_len;
2410 		if (sdu_len)
2411 			sdu_len = 0;
2412 
2413 		if (len <= pdu_len) {
2414 			sar = L2CAP_SAR_END;
2415 			pdu_len = len;
2416 		} else {
2417 			sar = L2CAP_SAR_CONTINUE;
2418 		}
2419 	}
2420 
2421 	return 0;
2422 }
2423 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2424 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2425 						   struct msghdr *msg,
2426 						   size_t len, u16 sdulen)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count, hlen;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	if (!conn)
2436 		return ERR_PTR(-ENOTCONN);
2437 
2438 	hlen = L2CAP_HDR_SIZE;
2439 
2440 	if (sdulen)
2441 		hlen += L2CAP_SDULEN_SIZE;
2442 
2443 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2444 
2445 	skb = chan->ops->alloc_skb(chan, hlen, count,
2446 				   msg->msg_flags & MSG_DONTWAIT);
2447 	if (IS_ERR(skb))
2448 		return skb;
2449 
2450 	/* Create L2CAP header */
2451 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 	lh->cid = cpu_to_le16(chan->dcid);
2453 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454 
2455 	if (sdulen)
2456 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2457 
2458 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2459 	if (unlikely(err < 0)) {
2460 		kfree_skb(skb);
2461 		return ERR_PTR(err);
2462 	}
2463 
2464 	return skb;
2465 }
2466 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2467 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2468 				struct sk_buff_head *seg_queue,
2469 				struct msghdr *msg, size_t len)
2470 {
2471 	struct sk_buff *skb;
2472 	size_t pdu_len;
2473 	u16 sdu_len;
2474 
2475 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2476 
2477 	sdu_len = len;
2478 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2479 
2480 	while (len > 0) {
2481 		if (len <= pdu_len)
2482 			pdu_len = len;
2483 
2484 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2485 		if (IS_ERR(skb)) {
2486 			__skb_queue_purge(seg_queue);
2487 			return PTR_ERR(skb);
2488 		}
2489 
2490 		__skb_queue_tail(seg_queue, skb);
2491 
2492 		len -= pdu_len;
2493 
2494 		if (sdu_len) {
2495 			sdu_len = 0;
2496 			pdu_len += L2CAP_SDULEN_SIZE;
2497 		}
2498 	}
2499 
2500 	return 0;
2501 }
2502 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2503 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2504 {
2505 	int sent = 0;
2506 
2507 	BT_DBG("chan %p", chan);
2508 
2509 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2510 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2511 		chan->tx_credits--;
2512 		sent++;
2513 	}
2514 
2515 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2516 	       skb_queue_len(&chan->tx_q));
2517 }
2518 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2519 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2520 {
2521 	struct sk_buff *skb;
2522 	int err;
2523 	struct sk_buff_head seg_queue;
2524 
2525 	if (!chan->conn)
2526 		return -ENOTCONN;
2527 
2528 	/* Connectionless channel */
2529 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2530 		skb = l2cap_create_connless_pdu(chan, msg, len);
2531 		if (IS_ERR(skb))
2532 			return PTR_ERR(skb);
2533 
2534 		l2cap_do_send(chan, skb);
2535 		return len;
2536 	}
2537 
2538 	switch (chan->mode) {
2539 	case L2CAP_MODE_LE_FLOWCTL:
2540 	case L2CAP_MODE_EXT_FLOWCTL:
2541 		/* Check outgoing MTU */
2542 		if (len > chan->omtu)
2543 			return -EMSGSIZE;
2544 
2545 		__skb_queue_head_init(&seg_queue);
2546 
2547 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2548 
2549 		if (chan->state != BT_CONNECTED) {
2550 			__skb_queue_purge(&seg_queue);
2551 			err = -ENOTCONN;
2552 		}
2553 
2554 		if (err)
2555 			return err;
2556 
2557 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2558 
2559 		l2cap_le_flowctl_send(chan);
2560 
2561 		if (!chan->tx_credits)
2562 			chan->ops->suspend(chan);
2563 
2564 		err = len;
2565 
2566 		break;
2567 
2568 	case L2CAP_MODE_BASIC:
2569 		/* Check outgoing MTU */
2570 		if (len > chan->omtu)
2571 			return -EMSGSIZE;
2572 
2573 		/* Create a basic PDU */
2574 		skb = l2cap_create_basic_pdu(chan, msg, len);
2575 		if (IS_ERR(skb))
2576 			return PTR_ERR(skb);
2577 
2578 		l2cap_do_send(chan, skb);
2579 		err = len;
2580 		break;
2581 
2582 	case L2CAP_MODE_ERTM:
2583 	case L2CAP_MODE_STREAMING:
2584 		/* Check outgoing MTU */
2585 		if (len > chan->omtu) {
2586 			err = -EMSGSIZE;
2587 			break;
2588 		}
2589 
2590 		__skb_queue_head_init(&seg_queue);
2591 
2592 		/* Do segmentation before calling in to the state machine,
2593 		 * since it's possible to block while waiting for memory
2594 		 * allocation.
2595 		 */
2596 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2597 
2598 		if (err)
2599 			break;
2600 
2601 		if (chan->mode == L2CAP_MODE_ERTM)
2602 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2603 		else
2604 			l2cap_streaming_send(chan, &seg_queue);
2605 
2606 		err = len;
2607 
2608 		/* If the skbs were not queued for sending, they'll still be in
2609 		 * seg_queue and need to be purged.
2610 		 */
2611 		__skb_queue_purge(&seg_queue);
2612 		break;
2613 
2614 	default:
2615 		BT_DBG("bad state %1.1x", chan->mode);
2616 		err = -EBADFD;
2617 	}
2618 
2619 	return err;
2620 }
2621 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2622 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2623 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2624 {
2625 	struct l2cap_ctrl control;
2626 	u16 seq;
2627 
2628 	BT_DBG("chan %p, txseq %u", chan, txseq);
2629 
2630 	memset(&control, 0, sizeof(control));
2631 	control.sframe = 1;
2632 	control.super = L2CAP_SUPER_SREJ;
2633 
2634 	for (seq = chan->expected_tx_seq; seq != txseq;
2635 	     seq = __next_seq(chan, seq)) {
2636 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2637 			control.reqseq = seq;
2638 			l2cap_send_sframe(chan, &control);
2639 			l2cap_seq_list_append(&chan->srej_list, seq);
2640 		}
2641 	}
2642 
2643 	chan->expected_tx_seq = __next_seq(chan, txseq);
2644 }
2645 
l2cap_send_srej_tail(struct l2cap_chan * chan)2646 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2647 {
2648 	struct l2cap_ctrl control;
2649 
2650 	BT_DBG("chan %p", chan);
2651 
2652 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2653 		return;
2654 
2655 	memset(&control, 0, sizeof(control));
2656 	control.sframe = 1;
2657 	control.super = L2CAP_SUPER_SREJ;
2658 	control.reqseq = chan->srej_list.tail;
2659 	l2cap_send_sframe(chan, &control);
2660 }
2661 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2662 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2663 {
2664 	struct l2cap_ctrl control;
2665 	u16 initial_head;
2666 	u16 seq;
2667 
2668 	BT_DBG("chan %p, txseq %u", chan, txseq);
2669 
2670 	memset(&control, 0, sizeof(control));
2671 	control.sframe = 1;
2672 	control.super = L2CAP_SUPER_SREJ;
2673 
2674 	/* Capture initial list head to allow only one pass through the list. */
2675 	initial_head = chan->srej_list.head;
2676 
2677 	do {
2678 		seq = l2cap_seq_list_pop(&chan->srej_list);
2679 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2680 			break;
2681 
2682 		control.reqseq = seq;
2683 		l2cap_send_sframe(chan, &control);
2684 		l2cap_seq_list_append(&chan->srej_list, seq);
2685 	} while (chan->srej_list.head != initial_head);
2686 }
2687 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2688 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2689 {
2690 	struct sk_buff *acked_skb;
2691 	u16 ackseq;
2692 
2693 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2694 
2695 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2696 		return;
2697 
2698 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2699 	       chan->expected_ack_seq, chan->unacked_frames);
2700 
2701 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2702 	     ackseq = __next_seq(chan, ackseq)) {
2703 
2704 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2705 		if (acked_skb) {
2706 			skb_unlink(acked_skb, &chan->tx_q);
2707 			kfree_skb(acked_skb);
2708 			chan->unacked_frames--;
2709 		}
2710 	}
2711 
2712 	chan->expected_ack_seq = reqseq;
2713 
2714 	if (chan->unacked_frames == 0)
2715 		__clear_retrans_timer(chan);
2716 
2717 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2718 }
2719 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2720 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2721 {
2722 	BT_DBG("chan %p", chan);
2723 
2724 	chan->expected_tx_seq = chan->buffer_seq;
2725 	l2cap_seq_list_clear(&chan->srej_list);
2726 	skb_queue_purge(&chan->srej_q);
2727 	chan->rx_state = L2CAP_RX_STATE_RECV;
2728 }
2729 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2730 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2731 				struct l2cap_ctrl *control,
2732 				struct sk_buff_head *skbs, u8 event)
2733 {
2734 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2735 	       event);
2736 
2737 	switch (event) {
2738 	case L2CAP_EV_DATA_REQUEST:
2739 		if (chan->tx_send_head == NULL)
2740 			chan->tx_send_head = skb_peek(skbs);
2741 
2742 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2743 		l2cap_ertm_send(chan);
2744 		break;
2745 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2746 		BT_DBG("Enter LOCAL_BUSY");
2747 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 
2749 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2750 			/* The SREJ_SENT state must be aborted if we are to
2751 			 * enter the LOCAL_BUSY state.
2752 			 */
2753 			l2cap_abort_rx_srej_sent(chan);
2754 		}
2755 
2756 		l2cap_send_ack(chan);
2757 
2758 		break;
2759 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2760 		BT_DBG("Exit LOCAL_BUSY");
2761 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762 
2763 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2764 			struct l2cap_ctrl local_control;
2765 
2766 			memset(&local_control, 0, sizeof(local_control));
2767 			local_control.sframe = 1;
2768 			local_control.super = L2CAP_SUPER_RR;
2769 			local_control.poll = 1;
2770 			local_control.reqseq = chan->buffer_seq;
2771 			l2cap_send_sframe(chan, &local_control);
2772 
2773 			chan->retry_count = 1;
2774 			__set_monitor_timer(chan);
2775 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2776 		}
2777 		break;
2778 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2779 		l2cap_process_reqseq(chan, control->reqseq);
2780 		break;
2781 	case L2CAP_EV_EXPLICIT_POLL:
2782 		l2cap_send_rr_or_rnr(chan, 1);
2783 		chan->retry_count = 1;
2784 		__set_monitor_timer(chan);
2785 		__clear_ack_timer(chan);
2786 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 		break;
2788 	case L2CAP_EV_RETRANS_TO:
2789 		l2cap_send_rr_or_rnr(chan, 1);
2790 		chan->retry_count = 1;
2791 		__set_monitor_timer(chan);
2792 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2793 		break;
2794 	case L2CAP_EV_RECV_FBIT:
2795 		/* Nothing to process */
2796 		break;
2797 	default:
2798 		break;
2799 	}
2800 }
2801 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2802 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2803 				  struct l2cap_ctrl *control,
2804 				  struct sk_buff_head *skbs, u8 event)
2805 {
2806 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2807 	       event);
2808 
2809 	switch (event) {
2810 	case L2CAP_EV_DATA_REQUEST:
2811 		if (chan->tx_send_head == NULL)
2812 			chan->tx_send_head = skb_peek(skbs);
2813 		/* Queue data, but don't send. */
2814 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2815 		break;
2816 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2817 		BT_DBG("Enter LOCAL_BUSY");
2818 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819 
2820 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2821 			/* The SREJ_SENT state must be aborted if we are to
2822 			 * enter the LOCAL_BUSY state.
2823 			 */
2824 			l2cap_abort_rx_srej_sent(chan);
2825 		}
2826 
2827 		l2cap_send_ack(chan);
2828 
2829 		break;
2830 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2831 		BT_DBG("Exit LOCAL_BUSY");
2832 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2833 
2834 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2835 			struct l2cap_ctrl local_control;
2836 			memset(&local_control, 0, sizeof(local_control));
2837 			local_control.sframe = 1;
2838 			local_control.super = L2CAP_SUPER_RR;
2839 			local_control.poll = 1;
2840 			local_control.reqseq = chan->buffer_seq;
2841 			l2cap_send_sframe(chan, &local_control);
2842 
2843 			chan->retry_count = 1;
2844 			__set_monitor_timer(chan);
2845 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2846 		}
2847 		break;
2848 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2849 		l2cap_process_reqseq(chan, control->reqseq);
2850 		fallthrough;
2851 
2852 	case L2CAP_EV_RECV_FBIT:
2853 		if (control && control->final) {
2854 			__clear_monitor_timer(chan);
2855 			if (chan->unacked_frames > 0)
2856 				__set_retrans_timer(chan);
2857 			chan->retry_count = 0;
2858 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2859 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2860 		}
2861 		break;
2862 	case L2CAP_EV_EXPLICIT_POLL:
2863 		/* Ignore */
2864 		break;
2865 	case L2CAP_EV_MONITOR_TO:
2866 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2867 			l2cap_send_rr_or_rnr(chan, 1);
2868 			__set_monitor_timer(chan);
2869 			chan->retry_count++;
2870 		} else {
2871 			l2cap_send_disconn_req(chan, ECONNABORTED);
2872 		}
2873 		break;
2874 	default:
2875 		break;
2876 	}
2877 }
2878 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2879 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2880 		     struct sk_buff_head *skbs, u8 event)
2881 {
2882 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2883 	       chan, control, skbs, event, chan->tx_state);
2884 
2885 	switch (chan->tx_state) {
2886 	case L2CAP_TX_STATE_XMIT:
2887 		l2cap_tx_state_xmit(chan, control, skbs, event);
2888 		break;
2889 	case L2CAP_TX_STATE_WAIT_F:
2890 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2891 		break;
2892 	default:
2893 		/* Ignore event */
2894 		break;
2895 	}
2896 }
2897 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2898 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2899 			     struct l2cap_ctrl *control)
2900 {
2901 	BT_DBG("chan %p, control %p", chan, control);
2902 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2903 }
2904 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2905 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2906 				  struct l2cap_ctrl *control)
2907 {
2908 	BT_DBG("chan %p, control %p", chan, control);
2909 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2910 }
2911 
2912 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2913 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2914 {
2915 	struct sk_buff *nskb;
2916 	struct l2cap_chan *chan;
2917 
2918 	BT_DBG("conn %p", conn);
2919 
2920 	list_for_each_entry(chan, &conn->chan_l, list) {
2921 		if (chan->chan_type != L2CAP_CHAN_RAW)
2922 			continue;
2923 
2924 		/* Don't send frame to the channel it came from */
2925 		if (bt_cb(skb)->l2cap.chan == chan)
2926 			continue;
2927 
2928 		nskb = skb_clone(skb, GFP_KERNEL);
2929 		if (!nskb)
2930 			continue;
2931 		if (chan->ops->recv(chan, nskb))
2932 			kfree_skb(nskb);
2933 	}
2934 }
2935 
2936 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2937 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2938 				       u8 ident, u16 dlen, void *data)
2939 {
2940 	struct sk_buff *skb, **frag;
2941 	struct l2cap_cmd_hdr *cmd;
2942 	struct l2cap_hdr *lh;
2943 	int len, count;
2944 
2945 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2946 	       conn, code, ident, dlen);
2947 
2948 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2949 		return NULL;
2950 
2951 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2952 	count = min_t(unsigned int, conn->mtu, len);
2953 
2954 	skb = bt_skb_alloc(count, GFP_KERNEL);
2955 	if (!skb)
2956 		return NULL;
2957 
2958 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2959 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2960 
2961 	if (conn->hcon->type == LE_LINK)
2962 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2963 	else
2964 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2965 
2966 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2967 	cmd->code  = code;
2968 	cmd->ident = ident;
2969 	cmd->len   = cpu_to_le16(dlen);
2970 
2971 	if (dlen) {
2972 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2973 		skb_put_data(skb, data, count);
2974 		data += count;
2975 	}
2976 
2977 	len -= skb->len;
2978 
2979 	/* Continuation fragments (no L2CAP header) */
2980 	frag = &skb_shinfo(skb)->frag_list;
2981 	while (len) {
2982 		count = min_t(unsigned int, conn->mtu, len);
2983 
2984 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2985 		if (!*frag)
2986 			goto fail;
2987 
2988 		skb_put_data(*frag, data, count);
2989 
2990 		len  -= count;
2991 		data += count;
2992 
2993 		frag = &(*frag)->next;
2994 	}
2995 
2996 	return skb;
2997 
2998 fail:
2999 	kfree_skb(skb);
3000 	return NULL;
3001 }
3002 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3003 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3004 				     unsigned long *val)
3005 {
3006 	struct l2cap_conf_opt *opt = *ptr;
3007 	int len;
3008 
3009 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3010 	*ptr += len;
3011 
3012 	*type = opt->type;
3013 	*olen = opt->len;
3014 
3015 	switch (opt->len) {
3016 	case 1:
3017 		*val = *((u8 *) opt->val);
3018 		break;
3019 
3020 	case 2:
3021 		*val = get_unaligned_le16(opt->val);
3022 		break;
3023 
3024 	case 4:
3025 		*val = get_unaligned_le32(opt->val);
3026 		break;
3027 
3028 	default:
3029 		*val = (unsigned long) opt->val;
3030 		break;
3031 	}
3032 
3033 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3034 	return len;
3035 }
3036 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3037 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3038 {
3039 	struct l2cap_conf_opt *opt = *ptr;
3040 
3041 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3042 
3043 	if (size < L2CAP_CONF_OPT_SIZE + len)
3044 		return;
3045 
3046 	opt->type = type;
3047 	opt->len  = len;
3048 
3049 	switch (len) {
3050 	case 1:
3051 		*((u8 *) opt->val)  = val;
3052 		break;
3053 
3054 	case 2:
3055 		put_unaligned_le16(val, opt->val);
3056 		break;
3057 
3058 	case 4:
3059 		put_unaligned_le32(val, opt->val);
3060 		break;
3061 
3062 	default:
3063 		memcpy(opt->val, (void *) val, len);
3064 		break;
3065 	}
3066 
3067 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3068 }
3069 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3070 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3071 {
3072 	struct l2cap_conf_efs efs;
3073 
3074 	switch (chan->mode) {
3075 	case L2CAP_MODE_ERTM:
3076 		efs.id		= chan->local_id;
3077 		efs.stype	= chan->local_stype;
3078 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3079 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3080 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3081 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3082 		break;
3083 
3084 	case L2CAP_MODE_STREAMING:
3085 		efs.id		= 1;
3086 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3087 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3088 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3089 		efs.acc_lat	= 0;
3090 		efs.flush_to	= 0;
3091 		break;
3092 
3093 	default:
3094 		return;
3095 	}
3096 
3097 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3098 			   (unsigned long) &efs, size);
3099 }
3100 
l2cap_ack_timeout(struct work_struct * work)3101 static void l2cap_ack_timeout(struct work_struct *work)
3102 {
3103 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3104 					       ack_timer.work);
3105 	u16 frames_to_ack;
3106 
3107 	BT_DBG("chan %p", chan);
3108 
3109 	l2cap_chan_lock(chan);
3110 
3111 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3112 				     chan->last_acked_seq);
3113 
3114 	if (frames_to_ack)
3115 		l2cap_send_rr_or_rnr(chan, 0);
3116 
3117 	l2cap_chan_unlock(chan);
3118 	l2cap_chan_put(chan);
3119 }
3120 
l2cap_ertm_init(struct l2cap_chan * chan)3121 int l2cap_ertm_init(struct l2cap_chan *chan)
3122 {
3123 	int err;
3124 
3125 	chan->next_tx_seq = 0;
3126 	chan->expected_tx_seq = 0;
3127 	chan->expected_ack_seq = 0;
3128 	chan->unacked_frames = 0;
3129 	chan->buffer_seq = 0;
3130 	chan->frames_sent = 0;
3131 	chan->last_acked_seq = 0;
3132 	chan->sdu = NULL;
3133 	chan->sdu_last_frag = NULL;
3134 	chan->sdu_len = 0;
3135 
3136 	skb_queue_head_init(&chan->tx_q);
3137 
3138 	if (chan->mode != L2CAP_MODE_ERTM)
3139 		return 0;
3140 
3141 	chan->rx_state = L2CAP_RX_STATE_RECV;
3142 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3143 
3144 	skb_queue_head_init(&chan->srej_q);
3145 
3146 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3147 	if (err < 0)
3148 		return err;
3149 
3150 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3151 	if (err < 0)
3152 		l2cap_seq_list_free(&chan->srej_list);
3153 
3154 	return err;
3155 }
3156 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3157 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3158 {
3159 	switch (mode) {
3160 	case L2CAP_MODE_STREAMING:
3161 	case L2CAP_MODE_ERTM:
3162 		if (l2cap_mode_supported(mode, remote_feat_mask))
3163 			return mode;
3164 		fallthrough;
3165 	default:
3166 		return L2CAP_MODE_BASIC;
3167 	}
3168 }
3169 
__l2cap_ews_supported(struct l2cap_conn * conn)3170 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3171 {
3172 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3173 }
3174 
__l2cap_efs_supported(struct l2cap_conn * conn)3175 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3176 {
3177 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3178 }
3179 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3180 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3181 				      struct l2cap_conf_rfc *rfc)
3182 {
3183 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3184 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3185 }
3186 
l2cap_txwin_setup(struct l2cap_chan * chan)3187 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3188 {
3189 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3190 	    __l2cap_ews_supported(chan->conn)) {
3191 		/* use extended control field */
3192 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3193 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3194 	} else {
3195 		chan->tx_win = min_t(u16, chan->tx_win,
3196 				     L2CAP_DEFAULT_TX_WINDOW);
3197 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3198 	}
3199 	chan->ack_win = chan->tx_win;
3200 }
3201 
l2cap_mtu_auto(struct l2cap_chan * chan)3202 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3203 {
3204 	struct hci_conn *conn = chan->conn->hcon;
3205 
3206 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3207 
3208 	/* The 2-DH1 packet has between 2 and 56 information bytes
3209 	 * (including the 2-byte payload header)
3210 	 */
3211 	if (!(conn->pkt_type & HCI_2DH1))
3212 		chan->imtu = 54;
3213 
3214 	/* The 3-DH1 packet has between 2 and 85 information bytes
3215 	 * (including the 2-byte payload header)
3216 	 */
3217 	if (!(conn->pkt_type & HCI_3DH1))
3218 		chan->imtu = 83;
3219 
3220 	/* The 2-DH3 packet has between 2 and 369 information bytes
3221 	 * (including the 2-byte payload header)
3222 	 */
3223 	if (!(conn->pkt_type & HCI_2DH3))
3224 		chan->imtu = 367;
3225 
3226 	/* The 3-DH3 packet has between 2 and 554 information bytes
3227 	 * (including the 2-byte payload header)
3228 	 */
3229 	if (!(conn->pkt_type & HCI_3DH3))
3230 		chan->imtu = 552;
3231 
3232 	/* The 2-DH5 packet has between 2 and 681 information bytes
3233 	 * (including the 2-byte payload header)
3234 	 */
3235 	if (!(conn->pkt_type & HCI_2DH5))
3236 		chan->imtu = 679;
3237 
3238 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3239 	 * (including the 2-byte payload header)
3240 	 */
3241 	if (!(conn->pkt_type & HCI_3DH5))
3242 		chan->imtu = 1021;
3243 }
3244 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3245 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3246 {
3247 	struct l2cap_conf_req *req = data;
3248 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3249 	void *ptr = req->data;
3250 	void *endptr = data + data_size;
3251 	u16 size;
3252 
3253 	BT_DBG("chan %p", chan);
3254 
3255 	if (chan->num_conf_req || chan->num_conf_rsp)
3256 		goto done;
3257 
3258 	switch (chan->mode) {
3259 	case L2CAP_MODE_STREAMING:
3260 	case L2CAP_MODE_ERTM:
3261 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3262 			break;
3263 
3264 		if (__l2cap_efs_supported(chan->conn))
3265 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3266 
3267 		fallthrough;
3268 	default:
3269 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3270 		break;
3271 	}
3272 
3273 done:
3274 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3275 		if (!chan->imtu)
3276 			l2cap_mtu_auto(chan);
3277 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3278 				   endptr - ptr);
3279 	}
3280 
3281 	switch (chan->mode) {
3282 	case L2CAP_MODE_BASIC:
3283 		if (disable_ertm)
3284 			break;
3285 
3286 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3287 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3288 			break;
3289 
3290 		rfc.mode            = L2CAP_MODE_BASIC;
3291 		rfc.txwin_size      = 0;
3292 		rfc.max_transmit    = 0;
3293 		rfc.retrans_timeout = 0;
3294 		rfc.monitor_timeout = 0;
3295 		rfc.max_pdu_size    = 0;
3296 
3297 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3298 				   (unsigned long) &rfc, endptr - ptr);
3299 		break;
3300 
3301 	case L2CAP_MODE_ERTM:
3302 		rfc.mode            = L2CAP_MODE_ERTM;
3303 		rfc.max_transmit    = chan->max_tx;
3304 
3305 		__l2cap_set_ertm_timeouts(chan, &rfc);
3306 
3307 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3308 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3309 			     L2CAP_FCS_SIZE);
3310 		rfc.max_pdu_size = cpu_to_le16(size);
3311 
3312 		l2cap_txwin_setup(chan);
3313 
3314 		rfc.txwin_size = min_t(u16, chan->tx_win,
3315 				       L2CAP_DEFAULT_TX_WINDOW);
3316 
3317 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3318 				   (unsigned long) &rfc, endptr - ptr);
3319 
3320 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3321 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3322 
3323 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3324 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3325 					   chan->tx_win, endptr - ptr);
3326 
3327 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3328 			if (chan->fcs == L2CAP_FCS_NONE ||
3329 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3330 				chan->fcs = L2CAP_FCS_NONE;
3331 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3332 						   chan->fcs, endptr - ptr);
3333 			}
3334 		break;
3335 
3336 	case L2CAP_MODE_STREAMING:
3337 		l2cap_txwin_setup(chan);
3338 		rfc.mode            = L2CAP_MODE_STREAMING;
3339 		rfc.txwin_size      = 0;
3340 		rfc.max_transmit    = 0;
3341 		rfc.retrans_timeout = 0;
3342 		rfc.monitor_timeout = 0;
3343 
3344 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3345 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3346 			     L2CAP_FCS_SIZE);
3347 		rfc.max_pdu_size = cpu_to_le16(size);
3348 
3349 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 				   (unsigned long) &rfc, endptr - ptr);
3351 
3352 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3353 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3354 
3355 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3356 			if (chan->fcs == L2CAP_FCS_NONE ||
3357 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3358 				chan->fcs = L2CAP_FCS_NONE;
3359 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3360 						   chan->fcs, endptr - ptr);
3361 			}
3362 		break;
3363 	}
3364 
3365 	req->dcid  = cpu_to_le16(chan->dcid);
3366 	req->flags = cpu_to_le16(0);
3367 
3368 	return ptr - data;
3369 }
3370 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3371 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3372 {
3373 	struct l2cap_conf_rsp *rsp = data;
3374 	void *ptr = rsp->data;
3375 	void *endptr = data + data_size;
3376 	void *req = chan->conf_req;
3377 	int len = chan->conf_len;
3378 	int type, hint, olen;
3379 	unsigned long val;
3380 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3381 	struct l2cap_conf_efs efs;
3382 	u8 remote_efs = 0;
3383 	u16 mtu = 0;
3384 	u16 result = L2CAP_CONF_SUCCESS;
3385 	u16 size;
3386 
3387 	BT_DBG("chan %p", chan);
3388 
3389 	while (len >= L2CAP_CONF_OPT_SIZE) {
3390 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3391 		if (len < 0)
3392 			break;
3393 
3394 		hint  = type & L2CAP_CONF_HINT;
3395 		type &= L2CAP_CONF_MASK;
3396 
3397 		switch (type) {
3398 		case L2CAP_CONF_MTU:
3399 			if (olen != 2)
3400 				break;
3401 			mtu = val;
3402 			break;
3403 
3404 		case L2CAP_CONF_FLUSH_TO:
3405 			if (olen != 2)
3406 				break;
3407 			chan->flush_to = val;
3408 			break;
3409 
3410 		case L2CAP_CONF_QOS:
3411 			break;
3412 
3413 		case L2CAP_CONF_RFC:
3414 			if (olen != sizeof(rfc))
3415 				break;
3416 			memcpy(&rfc, (void *) val, olen);
3417 			break;
3418 
3419 		case L2CAP_CONF_FCS:
3420 			if (olen != 1)
3421 				break;
3422 			if (val == L2CAP_FCS_NONE)
3423 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3424 			break;
3425 
3426 		case L2CAP_CONF_EFS:
3427 			if (olen != sizeof(efs))
3428 				break;
3429 			remote_efs = 1;
3430 			memcpy(&efs, (void *) val, olen);
3431 			break;
3432 
3433 		case L2CAP_CONF_EWS:
3434 			if (olen != 2)
3435 				break;
3436 			return -ECONNREFUSED;
3437 
3438 		default:
3439 			if (hint)
3440 				break;
3441 			result = L2CAP_CONF_UNKNOWN;
3442 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3443 			break;
3444 		}
3445 	}
3446 
3447 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3448 		goto done;
3449 
3450 	switch (chan->mode) {
3451 	case L2CAP_MODE_STREAMING:
3452 	case L2CAP_MODE_ERTM:
3453 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3454 			chan->mode = l2cap_select_mode(rfc.mode,
3455 						       chan->conn->feat_mask);
3456 			break;
3457 		}
3458 
3459 		if (remote_efs) {
3460 			if (__l2cap_efs_supported(chan->conn))
3461 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3462 			else
3463 				return -ECONNREFUSED;
3464 		}
3465 
3466 		if (chan->mode != rfc.mode)
3467 			return -ECONNREFUSED;
3468 
3469 		break;
3470 	}
3471 
3472 done:
3473 	if (chan->mode != rfc.mode) {
3474 		result = L2CAP_CONF_UNACCEPT;
3475 		rfc.mode = chan->mode;
3476 
3477 		if (chan->num_conf_rsp == 1)
3478 			return -ECONNREFUSED;
3479 
3480 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3481 				   (unsigned long) &rfc, endptr - ptr);
3482 	}
3483 
3484 	if (result == L2CAP_CONF_SUCCESS) {
3485 		/* Configure output options and let the other side know
3486 		 * which ones we don't like. */
3487 
3488 		/* If MTU is not provided in configure request, try adjusting it
3489 		 * to the current output MTU if it has been set
3490 		 *
3491 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3492 		 *
3493 		 * Each configuration parameter value (if any is present) in an
3494 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3495 		 * configuration parameter value that has been sent (or, in case
3496 		 * of default values, implied) in the corresponding
3497 		 * L2CAP_CONFIGURATION_REQ packet.
3498 		 */
3499 		if (!mtu) {
3500 			/* Only adjust for ERTM channels as for older modes the
3501 			 * remote stack may not be able to detect that the
3502 			 * adjustment causing it to silently drop packets.
3503 			 */
3504 			if (chan->mode == L2CAP_MODE_ERTM &&
3505 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3506 				mtu = chan->omtu;
3507 			else
3508 				mtu = L2CAP_DEFAULT_MTU;
3509 		}
3510 
3511 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3512 			result = L2CAP_CONF_UNACCEPT;
3513 		else {
3514 			chan->omtu = mtu;
3515 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3516 		}
3517 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3518 
3519 		if (remote_efs) {
3520 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3521 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3522 			    efs.stype != chan->local_stype) {
3523 
3524 				result = L2CAP_CONF_UNACCEPT;
3525 
3526 				if (chan->num_conf_req >= 1)
3527 					return -ECONNREFUSED;
3528 
3529 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3530 						   sizeof(efs),
3531 						   (unsigned long) &efs, endptr - ptr);
3532 			} else {
3533 				/* Send PENDING Conf Rsp */
3534 				result = L2CAP_CONF_PENDING;
3535 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3536 			}
3537 		}
3538 
3539 		switch (rfc.mode) {
3540 		case L2CAP_MODE_BASIC:
3541 			chan->fcs = L2CAP_FCS_NONE;
3542 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3543 			break;
3544 
3545 		case L2CAP_MODE_ERTM:
3546 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3547 				chan->remote_tx_win = rfc.txwin_size;
3548 			else
3549 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3550 
3551 			chan->remote_max_tx = rfc.max_transmit;
3552 
3553 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3554 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3555 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3556 			rfc.max_pdu_size = cpu_to_le16(size);
3557 			chan->remote_mps = size;
3558 
3559 			__l2cap_set_ertm_timeouts(chan, &rfc);
3560 
3561 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3562 
3563 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3564 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3565 
3566 			if (remote_efs &&
3567 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3568 				chan->remote_id = efs.id;
3569 				chan->remote_stype = efs.stype;
3570 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3571 				chan->remote_flush_to =
3572 					le32_to_cpu(efs.flush_to);
3573 				chan->remote_acc_lat =
3574 					le32_to_cpu(efs.acc_lat);
3575 				chan->remote_sdu_itime =
3576 					le32_to_cpu(efs.sdu_itime);
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3578 						   sizeof(efs),
3579 						   (unsigned long) &efs, endptr - ptr);
3580 			}
3581 			break;
3582 
3583 		case L2CAP_MODE_STREAMING:
3584 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3585 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3586 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3587 			rfc.max_pdu_size = cpu_to_le16(size);
3588 			chan->remote_mps = size;
3589 
3590 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3591 
3592 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3593 					   (unsigned long) &rfc, endptr - ptr);
3594 
3595 			break;
3596 
3597 		default:
3598 			result = L2CAP_CONF_UNACCEPT;
3599 
3600 			memset(&rfc, 0, sizeof(rfc));
3601 			rfc.mode = chan->mode;
3602 		}
3603 
3604 		if (result == L2CAP_CONF_SUCCESS)
3605 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3606 	}
3607 	rsp->scid   = cpu_to_le16(chan->dcid);
3608 	rsp->result = cpu_to_le16(result);
3609 	rsp->flags  = cpu_to_le16(0);
3610 
3611 	return ptr - data;
3612 }
3613 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3614 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3615 				void *data, size_t size, u16 *result)
3616 {
3617 	struct l2cap_conf_req *req = data;
3618 	void *ptr = req->data;
3619 	void *endptr = data + size;
3620 	int type, olen;
3621 	unsigned long val;
3622 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3623 	struct l2cap_conf_efs efs;
3624 
3625 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3626 
3627 	while (len >= L2CAP_CONF_OPT_SIZE) {
3628 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3629 		if (len < 0)
3630 			break;
3631 
3632 		switch (type) {
3633 		case L2CAP_CONF_MTU:
3634 			if (olen != 2)
3635 				break;
3636 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3637 				*result = L2CAP_CONF_UNACCEPT;
3638 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3639 			} else
3640 				chan->imtu = val;
3641 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3642 					   endptr - ptr);
3643 			break;
3644 
3645 		case L2CAP_CONF_FLUSH_TO:
3646 			if (olen != 2)
3647 				break;
3648 			chan->flush_to = val;
3649 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3650 					   chan->flush_to, endptr - ptr);
3651 			break;
3652 
3653 		case L2CAP_CONF_RFC:
3654 			if (olen != sizeof(rfc))
3655 				break;
3656 			memcpy(&rfc, (void *)val, olen);
3657 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3658 			    rfc.mode != chan->mode)
3659 				return -ECONNREFUSED;
3660 			chan->fcs = 0;
3661 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3662 					   (unsigned long) &rfc, endptr - ptr);
3663 			break;
3664 
3665 		case L2CAP_CONF_EWS:
3666 			if (olen != 2)
3667 				break;
3668 			chan->ack_win = min_t(u16, val, chan->ack_win);
3669 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3670 					   chan->tx_win, endptr - ptr);
3671 			break;
3672 
3673 		case L2CAP_CONF_EFS:
3674 			if (olen != sizeof(efs))
3675 				break;
3676 			memcpy(&efs, (void *)val, olen);
3677 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3678 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3679 			    efs.stype != chan->local_stype)
3680 				return -ECONNREFUSED;
3681 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3682 					   (unsigned long) &efs, endptr - ptr);
3683 			break;
3684 
3685 		case L2CAP_CONF_FCS:
3686 			if (olen != 1)
3687 				break;
3688 			if (*result == L2CAP_CONF_PENDING)
3689 				if (val == L2CAP_FCS_NONE)
3690 					set_bit(CONF_RECV_NO_FCS,
3691 						&chan->conf_state);
3692 			break;
3693 		}
3694 	}
3695 
3696 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3697 		return -ECONNREFUSED;
3698 
3699 	chan->mode = rfc.mode;
3700 
3701 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3702 		switch (rfc.mode) {
3703 		case L2CAP_MODE_ERTM:
3704 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3705 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3706 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3707 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3708 				chan->ack_win = min_t(u16, chan->ack_win,
3709 						      rfc.txwin_size);
3710 
3711 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3712 				chan->local_msdu = le16_to_cpu(efs.msdu);
3713 				chan->local_sdu_itime =
3714 					le32_to_cpu(efs.sdu_itime);
3715 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3716 				chan->local_flush_to =
3717 					le32_to_cpu(efs.flush_to);
3718 			}
3719 			break;
3720 
3721 		case L2CAP_MODE_STREAMING:
3722 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3723 		}
3724 	}
3725 
3726 	req->dcid   = cpu_to_le16(chan->dcid);
3727 	req->flags  = cpu_to_le16(0);
3728 
3729 	return ptr - data;
3730 }
3731 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3732 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3733 				u16 result, u16 flags)
3734 {
3735 	struct l2cap_conf_rsp *rsp = data;
3736 	void *ptr = rsp->data;
3737 
3738 	BT_DBG("chan %p", chan);
3739 
3740 	rsp->scid   = cpu_to_le16(chan->dcid);
3741 	rsp->result = cpu_to_le16(result);
3742 	rsp->flags  = cpu_to_le16(flags);
3743 
3744 	return ptr - data;
3745 }
3746 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3747 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3748 {
3749 	struct l2cap_le_conn_rsp rsp;
3750 	struct l2cap_conn *conn = chan->conn;
3751 
3752 	BT_DBG("chan %p", chan);
3753 
3754 	rsp.dcid    = cpu_to_le16(chan->scid);
3755 	rsp.mtu     = cpu_to_le16(chan->imtu);
3756 	rsp.mps     = cpu_to_le16(chan->mps);
3757 	rsp.credits = cpu_to_le16(chan->rx_credits);
3758 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3759 
3760 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3761 		       &rsp);
3762 }
3763 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3764 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3765 {
3766 	int *result = data;
3767 
3768 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3769 		return;
3770 
3771 	switch (chan->state) {
3772 	case BT_CONNECT2:
3773 		/* If channel still pending accept add to result */
3774 		(*result)++;
3775 		return;
3776 	case BT_CONNECTED:
3777 		return;
3778 	default:
3779 		/* If not connected or pending accept it has been refused */
3780 		*result = -ECONNREFUSED;
3781 		return;
3782 	}
3783 }
3784 
3785 struct l2cap_ecred_rsp_data {
3786 	struct {
3787 		struct l2cap_ecred_conn_rsp_hdr rsp;
3788 		__le16 scid[L2CAP_ECRED_MAX_CID];
3789 	} __packed pdu;
3790 	int count;
3791 };
3792 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3793 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3794 {
3795 	struct l2cap_ecred_rsp_data *rsp = data;
3796 	struct l2cap_ecred_conn_rsp *rsp_flex =
3797 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3798 
3799 	/* Check if channel for outgoing connection or if it wasn't deferred
3800 	 * since in those cases it must be skipped.
3801 	 */
3802 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3803 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3804 		return;
3805 
3806 	/* Reset ident so only one response is sent */
3807 	chan->ident = 0;
3808 
3809 	/* Include all channels pending with the same ident */
3810 	if (!rsp->pdu.rsp.result)
3811 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3812 	else
3813 		l2cap_chan_del(chan, ECONNRESET);
3814 }
3815 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3816 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3817 {
3818 	struct l2cap_conn *conn = chan->conn;
3819 	struct l2cap_ecred_rsp_data data;
3820 	u16 id = chan->ident;
3821 	int result = 0;
3822 
3823 	if (!id)
3824 		return;
3825 
3826 	BT_DBG("chan %p id %d", chan, id);
3827 
3828 	memset(&data, 0, sizeof(data));
3829 
3830 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3831 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3832 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3833 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3834 
3835 	/* Verify that all channels are ready */
3836 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3837 
3838 	if (result > 0)
3839 		return;
3840 
3841 	if (result < 0)
3842 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3843 
3844 	/* Build response */
3845 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3846 
3847 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3848 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3849 		       &data.pdu);
3850 }
3851 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3852 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3853 {
3854 	struct l2cap_conn_rsp rsp;
3855 	struct l2cap_conn *conn = chan->conn;
3856 	u8 buf[128];
3857 	u8 rsp_code;
3858 
3859 	rsp.scid   = cpu_to_le16(chan->dcid);
3860 	rsp.dcid   = cpu_to_le16(chan->scid);
3861 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3862 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3863 	rsp_code = L2CAP_CONN_RSP;
3864 
3865 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3866 
3867 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3868 
3869 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3870 		return;
3871 
3872 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3873 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3874 	chan->num_conf_req++;
3875 }
3876 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3877 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3878 {
3879 	int type, olen;
3880 	unsigned long val;
3881 	/* Use sane default values in case a misbehaving remote device
3882 	 * did not send an RFC or extended window size option.
3883 	 */
3884 	u16 txwin_ext = chan->ack_win;
3885 	struct l2cap_conf_rfc rfc = {
3886 		.mode = chan->mode,
3887 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3888 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3889 		.max_pdu_size = cpu_to_le16(chan->imtu),
3890 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3891 	};
3892 
3893 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3894 
3895 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3896 		return;
3897 
3898 	while (len >= L2CAP_CONF_OPT_SIZE) {
3899 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3900 		if (len < 0)
3901 			break;
3902 
3903 		switch (type) {
3904 		case L2CAP_CONF_RFC:
3905 			if (olen != sizeof(rfc))
3906 				break;
3907 			memcpy(&rfc, (void *)val, olen);
3908 			break;
3909 		case L2CAP_CONF_EWS:
3910 			if (olen != 2)
3911 				break;
3912 			txwin_ext = val;
3913 			break;
3914 		}
3915 	}
3916 
3917 	switch (rfc.mode) {
3918 	case L2CAP_MODE_ERTM:
3919 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3920 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3921 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3922 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3923 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3924 		else
3925 			chan->ack_win = min_t(u16, chan->ack_win,
3926 					      rfc.txwin_size);
3927 		break;
3928 	case L2CAP_MODE_STREAMING:
3929 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3930 	}
3931 }
3932 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3933 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3934 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3935 				    u8 *data)
3936 {
3937 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3938 
3939 	if (cmd_len < sizeof(*rej))
3940 		return -EPROTO;
3941 
3942 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3943 		return 0;
3944 
3945 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3946 	    cmd->ident == conn->info_ident) {
3947 		cancel_delayed_work(&conn->info_timer);
3948 
3949 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3950 		conn->info_ident = 0;
3951 
3952 		l2cap_conn_start(conn);
3953 	}
3954 
3955 	return 0;
3956 }
3957 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3958 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3959 			  u8 *data, u8 rsp_code)
3960 {
3961 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3962 	struct l2cap_conn_rsp rsp;
3963 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3964 	int result, status = L2CAP_CS_NO_INFO;
3965 
3966 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3967 	__le16 psm = req->psm;
3968 
3969 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3970 
3971 	/* Check if we have socket listening on psm */
3972 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3973 					 &conn->hcon->dst, ACL_LINK);
3974 	if (!pchan) {
3975 		result = L2CAP_CR_BAD_PSM;
3976 		goto response;
3977 	}
3978 
3979 	l2cap_chan_lock(pchan);
3980 
3981 	/* Check if the ACL is secure enough (if not SDP) */
3982 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3983 	    (!hci_conn_check_link_mode(conn->hcon) ||
3984 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
3985 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3986 		result = L2CAP_CR_SEC_BLOCK;
3987 		goto response;
3988 	}
3989 
3990 	result = L2CAP_CR_NO_MEM;
3991 
3992 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3993 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3994 		result = L2CAP_CR_INVALID_SCID;
3995 		goto response;
3996 	}
3997 
3998 	/* Check if we already have channel with that dcid */
3999 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4000 		result = L2CAP_CR_SCID_IN_USE;
4001 		goto response;
4002 	}
4003 
4004 	chan = pchan->ops->new_connection(pchan);
4005 	if (!chan)
4006 		goto response;
4007 
4008 	/* For certain devices (ex: HID mouse), support for authentication,
4009 	 * pairing and bonding is optional. For such devices, inorder to avoid
4010 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4011 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4012 	 */
4013 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4014 
4015 	bacpy(&chan->src, &conn->hcon->src);
4016 	bacpy(&chan->dst, &conn->hcon->dst);
4017 	chan->src_type = bdaddr_src_type(conn->hcon);
4018 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4019 	chan->psm  = psm;
4020 	chan->dcid = scid;
4021 
4022 	__l2cap_chan_add(conn, chan);
4023 
4024 	dcid = chan->scid;
4025 
4026 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4027 
4028 	chan->ident = cmd->ident;
4029 
4030 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4031 		if (l2cap_chan_check_security(chan, false)) {
4032 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4033 				l2cap_state_change(chan, BT_CONNECT2);
4034 				result = L2CAP_CR_PEND;
4035 				status = L2CAP_CS_AUTHOR_PEND;
4036 				chan->ops->defer(chan);
4037 			} else {
4038 				l2cap_state_change(chan, BT_CONFIG);
4039 				result = L2CAP_CR_SUCCESS;
4040 				status = L2CAP_CS_NO_INFO;
4041 			}
4042 		} else {
4043 			l2cap_state_change(chan, BT_CONNECT2);
4044 			result = L2CAP_CR_PEND;
4045 			status = L2CAP_CS_AUTHEN_PEND;
4046 		}
4047 	} else {
4048 		l2cap_state_change(chan, BT_CONNECT2);
4049 		result = L2CAP_CR_PEND;
4050 		status = L2CAP_CS_NO_INFO;
4051 	}
4052 
4053 response:
4054 	rsp.scid   = cpu_to_le16(scid);
4055 	rsp.dcid   = cpu_to_le16(dcid);
4056 	rsp.result = cpu_to_le16(result);
4057 	rsp.status = cpu_to_le16(status);
4058 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4059 
4060 	if (!pchan)
4061 		return;
4062 
4063 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4064 		struct l2cap_info_req info;
4065 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4066 
4067 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4068 		conn->info_ident = l2cap_get_ident(conn);
4069 
4070 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4071 
4072 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4073 			       sizeof(info), &info);
4074 	}
4075 
4076 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4077 	    result == L2CAP_CR_SUCCESS) {
4078 		u8 buf[128];
4079 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4080 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4081 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4082 		chan->num_conf_req++;
4083 	}
4084 
4085 	l2cap_chan_unlock(pchan);
4086 	l2cap_chan_put(pchan);
4087 }
4088 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4089 static int l2cap_connect_req(struct l2cap_conn *conn,
4090 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4091 {
4092 	if (cmd_len < sizeof(struct l2cap_conn_req))
4093 		return -EPROTO;
4094 
4095 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4096 	return 0;
4097 }
4098 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4099 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4100 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4101 				    u8 *data)
4102 {
4103 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4104 	u16 scid, dcid, result, status;
4105 	struct l2cap_chan *chan;
4106 	u8 req[128];
4107 	int err;
4108 
4109 	if (cmd_len < sizeof(*rsp))
4110 		return -EPROTO;
4111 
4112 	scid   = __le16_to_cpu(rsp->scid);
4113 	dcid   = __le16_to_cpu(rsp->dcid);
4114 	result = __le16_to_cpu(rsp->result);
4115 	status = __le16_to_cpu(rsp->status);
4116 
4117 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4118 					   dcid > L2CAP_CID_DYN_END))
4119 		return -EPROTO;
4120 
4121 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4122 	       dcid, scid, result, status);
4123 
4124 	if (scid) {
4125 		chan = __l2cap_get_chan_by_scid(conn, scid);
4126 		if (!chan)
4127 			return -EBADSLT;
4128 	} else {
4129 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4130 		if (!chan)
4131 			return -EBADSLT;
4132 	}
4133 
4134 	chan = l2cap_chan_hold_unless_zero(chan);
4135 	if (!chan)
4136 		return -EBADSLT;
4137 
4138 	err = 0;
4139 
4140 	l2cap_chan_lock(chan);
4141 
4142 	switch (result) {
4143 	case L2CAP_CR_SUCCESS:
4144 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4145 			err = -EBADSLT;
4146 			break;
4147 		}
4148 
4149 		l2cap_state_change(chan, BT_CONFIG);
4150 		chan->ident = 0;
4151 		chan->dcid = dcid;
4152 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4153 
4154 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4155 			break;
4156 
4157 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4158 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4159 		chan->num_conf_req++;
4160 		break;
4161 
4162 	case L2CAP_CR_PEND:
4163 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4164 		break;
4165 
4166 	default:
4167 		l2cap_chan_del(chan, ECONNREFUSED);
4168 		break;
4169 	}
4170 
4171 	l2cap_chan_unlock(chan);
4172 	l2cap_chan_put(chan);
4173 
4174 	return err;
4175 }
4176 
set_default_fcs(struct l2cap_chan * chan)4177 static inline void set_default_fcs(struct l2cap_chan *chan)
4178 {
4179 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4180 	 * sides request it.
4181 	 */
4182 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4183 		chan->fcs = L2CAP_FCS_NONE;
4184 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4185 		chan->fcs = L2CAP_FCS_CRC16;
4186 }
4187 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4188 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4189 				    u8 ident, u16 flags)
4190 {
4191 	struct l2cap_conn *conn = chan->conn;
4192 
4193 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4194 	       flags);
4195 
4196 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4197 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4198 
4199 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4200 		       l2cap_build_conf_rsp(chan, data,
4201 					    L2CAP_CONF_SUCCESS, flags), data);
4202 }
4203 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4204 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4205 				   u16 scid, u16 dcid)
4206 {
4207 	struct l2cap_cmd_rej_cid rej;
4208 
4209 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4210 	rej.scid = __cpu_to_le16(scid);
4211 	rej.dcid = __cpu_to_le16(dcid);
4212 
4213 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4214 }
4215 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4216 static inline int l2cap_config_req(struct l2cap_conn *conn,
4217 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4218 				   u8 *data)
4219 {
4220 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4221 	u16 dcid, flags;
4222 	u8 rsp[64];
4223 	struct l2cap_chan *chan;
4224 	int len, err = 0;
4225 
4226 	if (cmd_len < sizeof(*req))
4227 		return -EPROTO;
4228 
4229 	dcid  = __le16_to_cpu(req->dcid);
4230 	flags = __le16_to_cpu(req->flags);
4231 
4232 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4233 
4234 	chan = l2cap_get_chan_by_scid(conn, dcid);
4235 	if (!chan) {
4236 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4237 		return 0;
4238 	}
4239 
4240 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4241 	    chan->state != BT_CONNECTED) {
4242 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4243 				       chan->dcid);
4244 		goto unlock;
4245 	}
4246 
4247 	/* Reject if config buffer is too small. */
4248 	len = cmd_len - sizeof(*req);
4249 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4250 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4251 			       l2cap_build_conf_rsp(chan, rsp,
4252 			       L2CAP_CONF_REJECT, flags), rsp);
4253 		goto unlock;
4254 	}
4255 
4256 	/* Store config. */
4257 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4258 	chan->conf_len += len;
4259 
4260 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4261 		/* Incomplete config. Send empty response. */
4262 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4263 			       l2cap_build_conf_rsp(chan, rsp,
4264 			       L2CAP_CONF_SUCCESS, flags), rsp);
4265 		goto unlock;
4266 	}
4267 
4268 	/* Complete config. */
4269 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4270 	if (len < 0) {
4271 		l2cap_send_disconn_req(chan, ECONNRESET);
4272 		goto unlock;
4273 	}
4274 
4275 	chan->ident = cmd->ident;
4276 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4277 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4278 		chan->num_conf_rsp++;
4279 
4280 	/* Reset config buffer. */
4281 	chan->conf_len = 0;
4282 
4283 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4284 		goto unlock;
4285 
4286 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4287 		set_default_fcs(chan);
4288 
4289 		if (chan->mode == L2CAP_MODE_ERTM ||
4290 		    chan->mode == L2CAP_MODE_STREAMING)
4291 			err = l2cap_ertm_init(chan);
4292 
4293 		if (err < 0)
4294 			l2cap_send_disconn_req(chan, -err);
4295 		else
4296 			l2cap_chan_ready(chan);
4297 
4298 		goto unlock;
4299 	}
4300 
4301 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4302 		u8 buf[64];
4303 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4304 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4305 		chan->num_conf_req++;
4306 	}
4307 
4308 	/* Got Conf Rsp PENDING from remote side and assume we sent
4309 	   Conf Rsp PENDING in the code above */
4310 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4311 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4312 
4313 		/* check compatibility */
4314 
4315 		/* Send rsp for BR/EDR channel */
4316 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4317 	}
4318 
4319 unlock:
4320 	l2cap_chan_unlock(chan);
4321 	l2cap_chan_put(chan);
4322 	return err;
4323 }
4324 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4325 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4326 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4327 				   u8 *data)
4328 {
4329 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4330 	u16 scid, flags, result;
4331 	struct l2cap_chan *chan;
4332 	int len = cmd_len - sizeof(*rsp);
4333 	int err = 0;
4334 
4335 	if (cmd_len < sizeof(*rsp))
4336 		return -EPROTO;
4337 
4338 	scid   = __le16_to_cpu(rsp->scid);
4339 	flags  = __le16_to_cpu(rsp->flags);
4340 	result = __le16_to_cpu(rsp->result);
4341 
4342 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4343 	       result, len);
4344 
4345 	chan = l2cap_get_chan_by_scid(conn, scid);
4346 	if (!chan)
4347 		return 0;
4348 
4349 	switch (result) {
4350 	case L2CAP_CONF_SUCCESS:
4351 		l2cap_conf_rfc_get(chan, rsp->data, len);
4352 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4353 		break;
4354 
4355 	case L2CAP_CONF_PENDING:
4356 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4357 
4358 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4359 			char buf[64];
4360 
4361 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4362 						   buf, sizeof(buf), &result);
4363 			if (len < 0) {
4364 				l2cap_send_disconn_req(chan, ECONNRESET);
4365 				goto done;
4366 			}
4367 
4368 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4369 		}
4370 		goto done;
4371 
4372 	case L2CAP_CONF_UNKNOWN:
4373 	case L2CAP_CONF_UNACCEPT:
4374 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4375 			char req[64];
4376 
4377 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4378 				l2cap_send_disconn_req(chan, ECONNRESET);
4379 				goto done;
4380 			}
4381 
4382 			/* throw out any old stored conf requests */
4383 			result = L2CAP_CONF_SUCCESS;
4384 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4385 						   req, sizeof(req), &result);
4386 			if (len < 0) {
4387 				l2cap_send_disconn_req(chan, ECONNRESET);
4388 				goto done;
4389 			}
4390 
4391 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4392 				       L2CAP_CONF_REQ, len, req);
4393 			chan->num_conf_req++;
4394 			if (result != L2CAP_CONF_SUCCESS)
4395 				goto done;
4396 			break;
4397 		}
4398 		fallthrough;
4399 
4400 	default:
4401 		l2cap_chan_set_err(chan, ECONNRESET);
4402 
4403 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4404 		l2cap_send_disconn_req(chan, ECONNRESET);
4405 		goto done;
4406 	}
4407 
4408 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4409 		goto done;
4410 
4411 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4412 
4413 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4414 		set_default_fcs(chan);
4415 
4416 		if (chan->mode == L2CAP_MODE_ERTM ||
4417 		    chan->mode == L2CAP_MODE_STREAMING)
4418 			err = l2cap_ertm_init(chan);
4419 
4420 		if (err < 0)
4421 			l2cap_send_disconn_req(chan, -err);
4422 		else
4423 			l2cap_chan_ready(chan);
4424 	}
4425 
4426 done:
4427 	l2cap_chan_unlock(chan);
4428 	l2cap_chan_put(chan);
4429 	return err;
4430 }
4431 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4432 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4433 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4434 				       u8 *data)
4435 {
4436 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4437 	struct l2cap_disconn_rsp rsp;
4438 	u16 dcid, scid;
4439 	struct l2cap_chan *chan;
4440 
4441 	if (cmd_len != sizeof(*req))
4442 		return -EPROTO;
4443 
4444 	scid = __le16_to_cpu(req->scid);
4445 	dcid = __le16_to_cpu(req->dcid);
4446 
4447 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4448 
4449 	chan = l2cap_get_chan_by_scid(conn, dcid);
4450 	if (!chan) {
4451 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4452 		return 0;
4453 	}
4454 
4455 	rsp.dcid = cpu_to_le16(chan->scid);
4456 	rsp.scid = cpu_to_le16(chan->dcid);
4457 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4458 
4459 	chan->ops->set_shutdown(chan);
4460 
4461 	l2cap_chan_del(chan, ECONNRESET);
4462 
4463 	chan->ops->close(chan);
4464 
4465 	l2cap_chan_unlock(chan);
4466 	l2cap_chan_put(chan);
4467 
4468 	return 0;
4469 }
4470 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4471 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4472 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4473 				       u8 *data)
4474 {
4475 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4476 	u16 dcid, scid;
4477 	struct l2cap_chan *chan;
4478 
4479 	if (cmd_len != sizeof(*rsp))
4480 		return -EPROTO;
4481 
4482 	scid = __le16_to_cpu(rsp->scid);
4483 	dcid = __le16_to_cpu(rsp->dcid);
4484 
4485 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4486 
4487 	chan = l2cap_get_chan_by_scid(conn, scid);
4488 	if (!chan) {
4489 		return 0;
4490 	}
4491 
4492 	if (chan->state != BT_DISCONN) {
4493 		l2cap_chan_unlock(chan);
4494 		l2cap_chan_put(chan);
4495 		return 0;
4496 	}
4497 
4498 	l2cap_chan_del(chan, 0);
4499 
4500 	chan->ops->close(chan);
4501 
4502 	l2cap_chan_unlock(chan);
4503 	l2cap_chan_put(chan);
4504 
4505 	return 0;
4506 }
4507 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4508 static inline int l2cap_information_req(struct l2cap_conn *conn,
4509 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4510 					u8 *data)
4511 {
4512 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4513 	u16 type;
4514 
4515 	if (cmd_len != sizeof(*req))
4516 		return -EPROTO;
4517 
4518 	type = __le16_to_cpu(req->type);
4519 
4520 	BT_DBG("type 0x%4.4x", type);
4521 
4522 	if (type == L2CAP_IT_FEAT_MASK) {
4523 		u8 buf[8];
4524 		u32 feat_mask = l2cap_feat_mask;
4525 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4526 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4527 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4528 		if (!disable_ertm)
4529 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4530 				| L2CAP_FEAT_FCS;
4531 
4532 		put_unaligned_le32(feat_mask, rsp->data);
4533 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4534 			       buf);
4535 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4536 		u8 buf[12];
4537 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4538 
4539 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4540 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4541 		rsp->data[0] = conn->local_fixed_chan;
4542 		memset(rsp->data + 1, 0, 7);
4543 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4544 			       buf);
4545 	} else {
4546 		struct l2cap_info_rsp rsp;
4547 		rsp.type   = cpu_to_le16(type);
4548 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4549 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4550 			       &rsp);
4551 	}
4552 
4553 	return 0;
4554 }
4555 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4556 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4557 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4558 					u8 *data)
4559 {
4560 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4561 	u16 type, result;
4562 
4563 	if (cmd_len < sizeof(*rsp))
4564 		return -EPROTO;
4565 
4566 	type   = __le16_to_cpu(rsp->type);
4567 	result = __le16_to_cpu(rsp->result);
4568 
4569 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4570 
4571 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4572 	if (cmd->ident != conn->info_ident ||
4573 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4574 		return 0;
4575 
4576 	cancel_delayed_work(&conn->info_timer);
4577 
4578 	if (result != L2CAP_IR_SUCCESS) {
4579 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4580 		conn->info_ident = 0;
4581 
4582 		l2cap_conn_start(conn);
4583 
4584 		return 0;
4585 	}
4586 
4587 	switch (type) {
4588 	case L2CAP_IT_FEAT_MASK:
4589 		conn->feat_mask = get_unaligned_le32(rsp->data);
4590 
4591 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4592 			struct l2cap_info_req req;
4593 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4594 
4595 			conn->info_ident = l2cap_get_ident(conn);
4596 
4597 			l2cap_send_cmd(conn, conn->info_ident,
4598 				       L2CAP_INFO_REQ, sizeof(req), &req);
4599 		} else {
4600 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4601 			conn->info_ident = 0;
4602 
4603 			l2cap_conn_start(conn);
4604 		}
4605 		break;
4606 
4607 	case L2CAP_IT_FIXED_CHAN:
4608 		conn->remote_fixed_chan = rsp->data[0];
4609 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 		conn->info_ident = 0;
4611 
4612 		l2cap_conn_start(conn);
4613 		break;
4614 	}
4615 
4616 	return 0;
4617 }
4618 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4619 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4620 					      struct l2cap_cmd_hdr *cmd,
4621 					      u16 cmd_len, u8 *data)
4622 {
4623 	struct hci_conn *hcon = conn->hcon;
4624 	struct l2cap_conn_param_update_req *req;
4625 	struct l2cap_conn_param_update_rsp rsp;
4626 	u16 min, max, latency, to_multiplier;
4627 	int err;
4628 
4629 	if (hcon->role != HCI_ROLE_MASTER)
4630 		return -EINVAL;
4631 
4632 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4633 		return -EPROTO;
4634 
4635 	req = (struct l2cap_conn_param_update_req *) data;
4636 	min		= __le16_to_cpu(req->min);
4637 	max		= __le16_to_cpu(req->max);
4638 	latency		= __le16_to_cpu(req->latency);
4639 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4640 
4641 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4642 	       min, max, latency, to_multiplier);
4643 
4644 	memset(&rsp, 0, sizeof(rsp));
4645 
4646 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4647 	if (err)
4648 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4649 	else
4650 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4651 
4652 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4653 		       sizeof(rsp), &rsp);
4654 
4655 	if (!err) {
4656 		u8 store_hint;
4657 
4658 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4659 						to_multiplier);
4660 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4661 				    store_hint, min, max, latency,
4662 				    to_multiplier);
4663 
4664 	}
4665 
4666 	return 0;
4667 }
4668 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4669 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4670 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4671 				u8 *data)
4672 {
4673 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4674 	struct hci_conn *hcon = conn->hcon;
4675 	u16 dcid, mtu, mps, credits, result;
4676 	struct l2cap_chan *chan;
4677 	int err, sec_level;
4678 
4679 	if (cmd_len < sizeof(*rsp))
4680 		return -EPROTO;
4681 
4682 	dcid    = __le16_to_cpu(rsp->dcid);
4683 	mtu     = __le16_to_cpu(rsp->mtu);
4684 	mps     = __le16_to_cpu(rsp->mps);
4685 	credits = __le16_to_cpu(rsp->credits);
4686 	result  = __le16_to_cpu(rsp->result);
4687 
4688 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4689 					   dcid < L2CAP_CID_DYN_START ||
4690 					   dcid > L2CAP_CID_LE_DYN_END))
4691 		return -EPROTO;
4692 
4693 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4694 	       dcid, mtu, mps, credits, result);
4695 
4696 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4697 	if (!chan)
4698 		return -EBADSLT;
4699 
4700 	err = 0;
4701 
4702 	l2cap_chan_lock(chan);
4703 
4704 	switch (result) {
4705 	case L2CAP_CR_LE_SUCCESS:
4706 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4707 			err = -EBADSLT;
4708 			break;
4709 		}
4710 
4711 		chan->ident = 0;
4712 		chan->dcid = dcid;
4713 		chan->omtu = mtu;
4714 		chan->remote_mps = mps;
4715 		chan->tx_credits = credits;
4716 		l2cap_chan_ready(chan);
4717 		break;
4718 
4719 	case L2CAP_CR_LE_AUTHENTICATION:
4720 	case L2CAP_CR_LE_ENCRYPTION:
4721 		/* If we already have MITM protection we can't do
4722 		 * anything.
4723 		 */
4724 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4725 			l2cap_chan_del(chan, ECONNREFUSED);
4726 			break;
4727 		}
4728 
4729 		sec_level = hcon->sec_level + 1;
4730 		if (chan->sec_level < sec_level)
4731 			chan->sec_level = sec_level;
4732 
4733 		/* We'll need to send a new Connect Request */
4734 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4735 
4736 		smp_conn_security(hcon, chan->sec_level);
4737 		break;
4738 
4739 	default:
4740 		l2cap_chan_del(chan, ECONNREFUSED);
4741 		break;
4742 	}
4743 
4744 	l2cap_chan_unlock(chan);
4745 
4746 	return err;
4747 }
4748 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4749 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4750 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4751 				      u8 *data)
4752 {
4753 	int err = 0;
4754 
4755 	switch (cmd->code) {
4756 	case L2CAP_COMMAND_REJ:
4757 		l2cap_command_rej(conn, cmd, cmd_len, data);
4758 		break;
4759 
4760 	case L2CAP_CONN_REQ:
4761 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4762 		break;
4763 
4764 	case L2CAP_CONN_RSP:
4765 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4766 		break;
4767 
4768 	case L2CAP_CONF_REQ:
4769 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4770 		break;
4771 
4772 	case L2CAP_CONF_RSP:
4773 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4774 		break;
4775 
4776 	case L2CAP_DISCONN_REQ:
4777 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4778 		break;
4779 
4780 	case L2CAP_DISCONN_RSP:
4781 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4782 		break;
4783 
4784 	case L2CAP_ECHO_REQ:
4785 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4786 		break;
4787 
4788 	case L2CAP_ECHO_RSP:
4789 		break;
4790 
4791 	case L2CAP_INFO_REQ:
4792 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4793 		break;
4794 
4795 	case L2CAP_INFO_RSP:
4796 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4797 		break;
4798 
4799 	default:
4800 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4801 		err = -EINVAL;
4802 		break;
4803 	}
4804 
4805 	return err;
4806 }
4807 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4808 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4809 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4810 				u8 *data)
4811 {
4812 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4813 	struct l2cap_le_conn_rsp rsp;
4814 	struct l2cap_chan *chan, *pchan;
4815 	u16 dcid, scid, credits, mtu, mps;
4816 	__le16 psm;
4817 	u8 result;
4818 
4819 	if (cmd_len != sizeof(*req))
4820 		return -EPROTO;
4821 
4822 	scid = __le16_to_cpu(req->scid);
4823 	mtu  = __le16_to_cpu(req->mtu);
4824 	mps  = __le16_to_cpu(req->mps);
4825 	psm  = req->psm;
4826 	dcid = 0;
4827 	credits = 0;
4828 
4829 	if (mtu < 23 || mps < 23)
4830 		return -EPROTO;
4831 
4832 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4833 	       scid, mtu, mps);
4834 
4835 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4836 	 * page 1059:
4837 	 *
4838 	 * Valid range: 0x0001-0x00ff
4839 	 *
4840 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4841 	 */
4842 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4843 		result = L2CAP_CR_LE_BAD_PSM;
4844 		chan = NULL;
4845 		goto response;
4846 	}
4847 
4848 	/* Check if we have socket listening on psm */
4849 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4850 					 &conn->hcon->dst, LE_LINK);
4851 	if (!pchan) {
4852 		result = L2CAP_CR_LE_BAD_PSM;
4853 		chan = NULL;
4854 		goto response;
4855 	}
4856 
4857 	l2cap_chan_lock(pchan);
4858 
4859 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4860 				     SMP_ALLOW_STK)) {
4861 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4862 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4863 		chan = NULL;
4864 		goto response_unlock;
4865 	}
4866 
4867 	/* Check for valid dynamic CID range */
4868 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4869 		result = L2CAP_CR_LE_INVALID_SCID;
4870 		chan = NULL;
4871 		goto response_unlock;
4872 	}
4873 
4874 	/* Check if we already have channel with that dcid */
4875 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4876 		result = L2CAP_CR_LE_SCID_IN_USE;
4877 		chan = NULL;
4878 		goto response_unlock;
4879 	}
4880 
4881 	chan = pchan->ops->new_connection(pchan);
4882 	if (!chan) {
4883 		result = L2CAP_CR_LE_NO_MEM;
4884 		goto response_unlock;
4885 	}
4886 
4887 	bacpy(&chan->src, &conn->hcon->src);
4888 	bacpy(&chan->dst, &conn->hcon->dst);
4889 	chan->src_type = bdaddr_src_type(conn->hcon);
4890 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4891 	chan->psm  = psm;
4892 	chan->dcid = scid;
4893 	chan->omtu = mtu;
4894 	chan->remote_mps = mps;
4895 
4896 	__l2cap_chan_add(conn, chan);
4897 
4898 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4899 
4900 	dcid = chan->scid;
4901 	credits = chan->rx_credits;
4902 
4903 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4904 
4905 	chan->ident = cmd->ident;
4906 
4907 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4908 		l2cap_state_change(chan, BT_CONNECT2);
4909 		/* The following result value is actually not defined
4910 		 * for LE CoC but we use it to let the function know
4911 		 * that it should bail out after doing its cleanup
4912 		 * instead of sending a response.
4913 		 */
4914 		result = L2CAP_CR_PEND;
4915 		chan->ops->defer(chan);
4916 	} else {
4917 		l2cap_chan_ready(chan);
4918 		result = L2CAP_CR_LE_SUCCESS;
4919 	}
4920 
4921 response_unlock:
4922 	l2cap_chan_unlock(pchan);
4923 	l2cap_chan_put(pchan);
4924 
4925 	if (result == L2CAP_CR_PEND)
4926 		return 0;
4927 
4928 response:
4929 	if (chan) {
4930 		rsp.mtu = cpu_to_le16(chan->imtu);
4931 		rsp.mps = cpu_to_le16(chan->mps);
4932 	} else {
4933 		rsp.mtu = 0;
4934 		rsp.mps = 0;
4935 	}
4936 
4937 	rsp.dcid    = cpu_to_le16(dcid);
4938 	rsp.credits = cpu_to_le16(credits);
4939 	rsp.result  = cpu_to_le16(result);
4940 
4941 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4942 
4943 	return 0;
4944 }
4945 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4946 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4947 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4948 				   u8 *data)
4949 {
4950 	struct l2cap_le_credits *pkt;
4951 	struct l2cap_chan *chan;
4952 	u16 cid, credits, max_credits;
4953 
4954 	if (cmd_len != sizeof(*pkt))
4955 		return -EPROTO;
4956 
4957 	pkt = (struct l2cap_le_credits *) data;
4958 	cid	= __le16_to_cpu(pkt->cid);
4959 	credits	= __le16_to_cpu(pkt->credits);
4960 
4961 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4962 
4963 	chan = l2cap_get_chan_by_dcid(conn, cid);
4964 	if (!chan)
4965 		return -EBADSLT;
4966 
4967 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4968 	if (credits > max_credits) {
4969 		BT_ERR("LE credits overflow");
4970 		l2cap_send_disconn_req(chan, ECONNRESET);
4971 
4972 		/* Return 0 so that we don't trigger an unnecessary
4973 		 * command reject packet.
4974 		 */
4975 		goto unlock;
4976 	}
4977 
4978 	chan->tx_credits += credits;
4979 
4980 	/* Resume sending */
4981 	l2cap_le_flowctl_send(chan);
4982 
4983 	if (chan->tx_credits)
4984 		chan->ops->resume(chan);
4985 
4986 unlock:
4987 	l2cap_chan_unlock(chan);
4988 	l2cap_chan_put(chan);
4989 
4990 	return 0;
4991 }
4992 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4993 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4994 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4995 				       u8 *data)
4996 {
4997 	struct l2cap_ecred_conn_req *req = (void *) data;
4998 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
4999 	struct l2cap_chan *chan, *pchan;
5000 	u16 mtu, mps;
5001 	__le16 psm;
5002 	u8 result, len = 0;
5003 	int i, num_scid;
5004 	bool defer = false;
5005 
5006 	if (!enable_ecred)
5007 		return -EINVAL;
5008 
5009 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5010 		result = L2CAP_CR_LE_INVALID_PARAMS;
5011 		goto response;
5012 	}
5013 
5014 	cmd_len -= sizeof(*req);
5015 	num_scid = cmd_len / sizeof(u16);
5016 
5017 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5018 		result = L2CAP_CR_LE_INVALID_PARAMS;
5019 		goto response;
5020 	}
5021 
5022 	mtu  = __le16_to_cpu(req->mtu);
5023 	mps  = __le16_to_cpu(req->mps);
5024 
5025 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5026 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5027 		goto response;
5028 	}
5029 
5030 	psm  = req->psm;
5031 
5032 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5033 	 * page 1059:
5034 	 *
5035 	 * Valid range: 0x0001-0x00ff
5036 	 *
5037 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5038 	 */
5039 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5040 		result = L2CAP_CR_LE_BAD_PSM;
5041 		goto response;
5042 	}
5043 
5044 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5045 
5046 	memset(pdu, 0, sizeof(*pdu));
5047 
5048 	/* Check if we have socket listening on psm */
5049 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5050 					 &conn->hcon->dst, LE_LINK);
5051 	if (!pchan) {
5052 		result = L2CAP_CR_LE_BAD_PSM;
5053 		goto response;
5054 	}
5055 
5056 	l2cap_chan_lock(pchan);
5057 
5058 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5059 				     SMP_ALLOW_STK)) {
5060 		result = L2CAP_CR_LE_AUTHENTICATION;
5061 		goto unlock;
5062 	}
5063 
5064 	result = L2CAP_CR_LE_SUCCESS;
5065 
5066 	for (i = 0; i < num_scid; i++) {
5067 		u16 scid = __le16_to_cpu(req->scid[i]);
5068 
5069 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5070 
5071 		pdu->dcid[i] = 0x0000;
5072 		len += sizeof(*pdu->dcid);
5073 
5074 		/* Check for valid dynamic CID range */
5075 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5076 			result = L2CAP_CR_LE_INVALID_SCID;
5077 			continue;
5078 		}
5079 
5080 		/* Check if we already have channel with that dcid */
5081 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5082 			result = L2CAP_CR_LE_SCID_IN_USE;
5083 			continue;
5084 		}
5085 
5086 		chan = pchan->ops->new_connection(pchan);
5087 		if (!chan) {
5088 			result = L2CAP_CR_LE_NO_MEM;
5089 			continue;
5090 		}
5091 
5092 		bacpy(&chan->src, &conn->hcon->src);
5093 		bacpy(&chan->dst, &conn->hcon->dst);
5094 		chan->src_type = bdaddr_src_type(conn->hcon);
5095 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5096 		chan->psm  = psm;
5097 		chan->dcid = scid;
5098 		chan->omtu = mtu;
5099 		chan->remote_mps = mps;
5100 
5101 		__l2cap_chan_add(conn, chan);
5102 
5103 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5104 
5105 		/* Init response */
5106 		if (!pdu->credits) {
5107 			pdu->mtu = cpu_to_le16(chan->imtu);
5108 			pdu->mps = cpu_to_le16(chan->mps);
5109 			pdu->credits = cpu_to_le16(chan->rx_credits);
5110 		}
5111 
5112 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5113 
5114 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5115 
5116 		chan->ident = cmd->ident;
5117 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5118 
5119 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5120 			l2cap_state_change(chan, BT_CONNECT2);
5121 			defer = true;
5122 			chan->ops->defer(chan);
5123 		} else {
5124 			l2cap_chan_ready(chan);
5125 		}
5126 	}
5127 
5128 unlock:
5129 	l2cap_chan_unlock(pchan);
5130 	l2cap_chan_put(pchan);
5131 
5132 response:
5133 	pdu->result = cpu_to_le16(result);
5134 
5135 	if (defer)
5136 		return 0;
5137 
5138 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5139 		       sizeof(*pdu) + len, pdu);
5140 
5141 	return 0;
5142 }
5143 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5144 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5145 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5146 				       u8 *data)
5147 {
5148 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5149 	struct hci_conn *hcon = conn->hcon;
5150 	u16 mtu, mps, credits, result;
5151 	struct l2cap_chan *chan, *tmp;
5152 	int err = 0, sec_level;
5153 	int i = 0;
5154 
5155 	if (cmd_len < sizeof(*rsp))
5156 		return -EPROTO;
5157 
5158 	mtu     = __le16_to_cpu(rsp->mtu);
5159 	mps     = __le16_to_cpu(rsp->mps);
5160 	credits = __le16_to_cpu(rsp->credits);
5161 	result  = __le16_to_cpu(rsp->result);
5162 
5163 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5164 	       result);
5165 
5166 	cmd_len -= sizeof(*rsp);
5167 
5168 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5169 		u16 dcid;
5170 
5171 		if (chan->ident != cmd->ident ||
5172 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5173 		    chan->state == BT_CONNECTED)
5174 			continue;
5175 
5176 		l2cap_chan_lock(chan);
5177 
5178 		/* Check that there is a dcid for each pending channel */
5179 		if (cmd_len < sizeof(dcid)) {
5180 			l2cap_chan_del(chan, ECONNREFUSED);
5181 			l2cap_chan_unlock(chan);
5182 			continue;
5183 		}
5184 
5185 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5186 		cmd_len -= sizeof(u16);
5187 
5188 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5189 
5190 		/* Check if dcid is already in use */
5191 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5192 			/* If a device receives a
5193 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5194 			 * already-assigned Destination CID, then both the
5195 			 * original channel and the new channel shall be
5196 			 * immediately discarded and not used.
5197 			 */
5198 			l2cap_chan_del(chan, ECONNREFUSED);
5199 			l2cap_chan_unlock(chan);
5200 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5201 			l2cap_chan_lock(chan);
5202 			l2cap_chan_del(chan, ECONNRESET);
5203 			l2cap_chan_unlock(chan);
5204 			continue;
5205 		}
5206 
5207 		switch (result) {
5208 		case L2CAP_CR_LE_AUTHENTICATION:
5209 		case L2CAP_CR_LE_ENCRYPTION:
5210 			/* If we already have MITM protection we can't do
5211 			 * anything.
5212 			 */
5213 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5214 				l2cap_chan_del(chan, ECONNREFUSED);
5215 				break;
5216 			}
5217 
5218 			sec_level = hcon->sec_level + 1;
5219 			if (chan->sec_level < sec_level)
5220 				chan->sec_level = sec_level;
5221 
5222 			/* We'll need to send a new Connect Request */
5223 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5224 
5225 			smp_conn_security(hcon, chan->sec_level);
5226 			break;
5227 
5228 		case L2CAP_CR_LE_BAD_PSM:
5229 			l2cap_chan_del(chan, ECONNREFUSED);
5230 			break;
5231 
5232 		default:
5233 			/* If dcid was not set it means channels was refused */
5234 			if (!dcid) {
5235 				l2cap_chan_del(chan, ECONNREFUSED);
5236 				break;
5237 			}
5238 
5239 			chan->ident = 0;
5240 			chan->dcid = dcid;
5241 			chan->omtu = mtu;
5242 			chan->remote_mps = mps;
5243 			chan->tx_credits = credits;
5244 			l2cap_chan_ready(chan);
5245 			break;
5246 		}
5247 
5248 		l2cap_chan_unlock(chan);
5249 	}
5250 
5251 	return err;
5252 }
5253 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5254 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5255 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5256 					 u8 *data)
5257 {
5258 	struct l2cap_ecred_reconf_req *req = (void *) data;
5259 	struct l2cap_ecred_reconf_rsp rsp;
5260 	u16 mtu, mps, result;
5261 	struct l2cap_chan *chan;
5262 	int i, num_scid;
5263 
5264 	if (!enable_ecred)
5265 		return -EINVAL;
5266 
5267 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5268 		result = L2CAP_CR_LE_INVALID_PARAMS;
5269 		goto respond;
5270 	}
5271 
5272 	mtu = __le16_to_cpu(req->mtu);
5273 	mps = __le16_to_cpu(req->mps);
5274 
5275 	BT_DBG("mtu %u mps %u", mtu, mps);
5276 
5277 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5278 		result = L2CAP_RECONF_INVALID_MTU;
5279 		goto respond;
5280 	}
5281 
5282 	if (mps < L2CAP_ECRED_MIN_MPS) {
5283 		result = L2CAP_RECONF_INVALID_MPS;
5284 		goto respond;
5285 	}
5286 
5287 	cmd_len -= sizeof(*req);
5288 	num_scid = cmd_len / sizeof(u16);
5289 	result = L2CAP_RECONF_SUCCESS;
5290 
5291 	for (i = 0; i < num_scid; i++) {
5292 		u16 scid;
5293 
5294 		scid = __le16_to_cpu(req->scid[i]);
5295 		if (!scid)
5296 			return -EPROTO;
5297 
5298 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5299 		if (!chan)
5300 			continue;
5301 
5302 		/* If the MTU value is decreased for any of the included
5303 		 * channels, then the receiver shall disconnect all
5304 		 * included channels.
5305 		 */
5306 		if (chan->omtu > mtu) {
5307 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5308 			       chan->omtu, mtu);
5309 			result = L2CAP_RECONF_INVALID_MTU;
5310 		}
5311 
5312 		chan->omtu = mtu;
5313 		chan->remote_mps = mps;
5314 	}
5315 
5316 respond:
5317 	rsp.result = cpu_to_le16(result);
5318 
5319 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5320 		       &rsp);
5321 
5322 	return 0;
5323 }
5324 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5325 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5326 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5327 					 u8 *data)
5328 {
5329 	struct l2cap_chan *chan, *tmp;
5330 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5331 	u16 result;
5332 
5333 	if (cmd_len < sizeof(*rsp))
5334 		return -EPROTO;
5335 
5336 	result = __le16_to_cpu(rsp->result);
5337 
5338 	BT_DBG("result 0x%4.4x", rsp->result);
5339 
5340 	if (!result)
5341 		return 0;
5342 
5343 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5344 		if (chan->ident != cmd->ident)
5345 			continue;
5346 
5347 		l2cap_chan_del(chan, ECONNRESET);
5348 	}
5349 
5350 	return 0;
5351 }
5352 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5353 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5354 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5355 				       u8 *data)
5356 {
5357 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5358 	struct l2cap_chan *chan;
5359 
5360 	if (cmd_len < sizeof(*rej))
5361 		return -EPROTO;
5362 
5363 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5364 	if (!chan)
5365 		goto done;
5366 
5367 	chan = l2cap_chan_hold_unless_zero(chan);
5368 	if (!chan)
5369 		goto done;
5370 
5371 	l2cap_chan_lock(chan);
5372 	l2cap_chan_del(chan, ECONNREFUSED);
5373 	l2cap_chan_unlock(chan);
5374 	l2cap_chan_put(chan);
5375 
5376 done:
5377 	return 0;
5378 }
5379 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5380 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5381 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5382 				   u8 *data)
5383 {
5384 	int err = 0;
5385 
5386 	switch (cmd->code) {
5387 	case L2CAP_COMMAND_REJ:
5388 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5389 		break;
5390 
5391 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5392 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5393 		break;
5394 
5395 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5396 		break;
5397 
5398 	case L2CAP_LE_CONN_RSP:
5399 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5400 		break;
5401 
5402 	case L2CAP_LE_CONN_REQ:
5403 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5404 		break;
5405 
5406 	case L2CAP_LE_CREDITS:
5407 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5408 		break;
5409 
5410 	case L2CAP_ECRED_CONN_REQ:
5411 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5412 		break;
5413 
5414 	case L2CAP_ECRED_CONN_RSP:
5415 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5416 		break;
5417 
5418 	case L2CAP_ECRED_RECONF_REQ:
5419 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5420 		break;
5421 
5422 	case L2CAP_ECRED_RECONF_RSP:
5423 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5424 		break;
5425 
5426 	case L2CAP_DISCONN_REQ:
5427 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5428 		break;
5429 
5430 	case L2CAP_DISCONN_RSP:
5431 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5432 		break;
5433 
5434 	default:
5435 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5436 		err = -EINVAL;
5437 		break;
5438 	}
5439 
5440 	return err;
5441 }
5442 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5443 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5444 					struct sk_buff *skb)
5445 {
5446 	struct hci_conn *hcon = conn->hcon;
5447 	struct l2cap_cmd_hdr *cmd;
5448 	u16 len;
5449 	int err;
5450 
5451 	if (hcon->type != LE_LINK)
5452 		goto drop;
5453 
5454 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5455 		goto drop;
5456 
5457 	cmd = (void *) skb->data;
5458 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5459 
5460 	len = le16_to_cpu(cmd->len);
5461 
5462 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5463 
5464 	if (len != skb->len || !cmd->ident) {
5465 		BT_DBG("corrupted command");
5466 		goto drop;
5467 	}
5468 
5469 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5470 	if (err) {
5471 		struct l2cap_cmd_rej_unk rej;
5472 
5473 		BT_ERR("Wrong link type (%d)", err);
5474 
5475 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5476 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5477 			       sizeof(rej), &rej);
5478 	}
5479 
5480 drop:
5481 	kfree_skb(skb);
5482 }
5483 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5484 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5485 {
5486 	struct l2cap_cmd_rej_unk rej;
5487 
5488 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5489 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5490 }
5491 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5492 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5493 				     struct sk_buff *skb)
5494 {
5495 	struct hci_conn *hcon = conn->hcon;
5496 	struct l2cap_cmd_hdr *cmd;
5497 	int err;
5498 
5499 	l2cap_raw_recv(conn, skb);
5500 
5501 	if (hcon->type != ACL_LINK)
5502 		goto drop;
5503 
5504 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5505 		u16 len;
5506 
5507 		cmd = (void *) skb->data;
5508 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5509 
5510 		len = le16_to_cpu(cmd->len);
5511 
5512 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5513 		       cmd->ident);
5514 
5515 		if (len > skb->len || !cmd->ident) {
5516 			BT_DBG("corrupted command");
5517 			l2cap_sig_send_rej(conn, cmd->ident);
5518 			skb_pull(skb, len > skb->len ? skb->len : len);
5519 			continue;
5520 		}
5521 
5522 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5523 		if (err) {
5524 			BT_ERR("Wrong link type (%d)", err);
5525 			l2cap_sig_send_rej(conn, cmd->ident);
5526 		}
5527 
5528 		skb_pull(skb, len);
5529 	}
5530 
5531 	if (skb->len > 0) {
5532 		BT_DBG("corrupted command");
5533 		l2cap_sig_send_rej(conn, 0);
5534 	}
5535 
5536 drop:
5537 	kfree_skb(skb);
5538 }
5539 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5540 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5541 {
5542 	u16 our_fcs, rcv_fcs;
5543 	int hdr_size;
5544 
5545 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5546 		hdr_size = L2CAP_EXT_HDR_SIZE;
5547 	else
5548 		hdr_size = L2CAP_ENH_HDR_SIZE;
5549 
5550 	if (chan->fcs == L2CAP_FCS_CRC16) {
5551 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5552 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5553 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5554 
5555 		if (our_fcs != rcv_fcs)
5556 			return -EBADMSG;
5557 	}
5558 	return 0;
5559 }
5560 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5561 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5562 {
5563 	struct l2cap_ctrl control;
5564 
5565 	BT_DBG("chan %p", chan);
5566 
5567 	memset(&control, 0, sizeof(control));
5568 	control.sframe = 1;
5569 	control.final = 1;
5570 	control.reqseq = chan->buffer_seq;
5571 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5572 
5573 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5574 		control.super = L2CAP_SUPER_RNR;
5575 		l2cap_send_sframe(chan, &control);
5576 	}
5577 
5578 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5579 	    chan->unacked_frames > 0)
5580 		__set_retrans_timer(chan);
5581 
5582 	/* Send pending iframes */
5583 	l2cap_ertm_send(chan);
5584 
5585 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5586 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5587 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5588 		 * send it now.
5589 		 */
5590 		control.super = L2CAP_SUPER_RR;
5591 		l2cap_send_sframe(chan, &control);
5592 	}
5593 }
5594 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5595 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5596 			    struct sk_buff **last_frag)
5597 {
5598 	/* skb->len reflects data in skb as well as all fragments
5599 	 * skb->data_len reflects only data in fragments
5600 	 */
5601 	if (!skb_has_frag_list(skb))
5602 		skb_shinfo(skb)->frag_list = new_frag;
5603 
5604 	new_frag->next = NULL;
5605 
5606 	(*last_frag)->next = new_frag;
5607 	*last_frag = new_frag;
5608 
5609 	skb->len += new_frag->len;
5610 	skb->data_len += new_frag->len;
5611 	skb->truesize += new_frag->truesize;
5612 }
5613 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5614 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5615 				struct l2cap_ctrl *control)
5616 {
5617 	int err = -EINVAL;
5618 
5619 	switch (control->sar) {
5620 	case L2CAP_SAR_UNSEGMENTED:
5621 		if (chan->sdu)
5622 			break;
5623 
5624 		err = chan->ops->recv(chan, skb);
5625 		break;
5626 
5627 	case L2CAP_SAR_START:
5628 		if (chan->sdu)
5629 			break;
5630 
5631 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5632 			break;
5633 
5634 		chan->sdu_len = get_unaligned_le16(skb->data);
5635 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5636 
5637 		if (chan->sdu_len > chan->imtu) {
5638 			err = -EMSGSIZE;
5639 			break;
5640 		}
5641 
5642 		if (skb->len >= chan->sdu_len)
5643 			break;
5644 
5645 		chan->sdu = skb;
5646 		chan->sdu_last_frag = skb;
5647 
5648 		skb = NULL;
5649 		err = 0;
5650 		break;
5651 
5652 	case L2CAP_SAR_CONTINUE:
5653 		if (!chan->sdu)
5654 			break;
5655 
5656 		append_skb_frag(chan->sdu, skb,
5657 				&chan->sdu_last_frag);
5658 		skb = NULL;
5659 
5660 		if (chan->sdu->len >= chan->sdu_len)
5661 			break;
5662 
5663 		err = 0;
5664 		break;
5665 
5666 	case L2CAP_SAR_END:
5667 		if (!chan->sdu)
5668 			break;
5669 
5670 		append_skb_frag(chan->sdu, skb,
5671 				&chan->sdu_last_frag);
5672 		skb = NULL;
5673 
5674 		if (chan->sdu->len != chan->sdu_len)
5675 			break;
5676 
5677 		err = chan->ops->recv(chan, chan->sdu);
5678 
5679 		if (!err) {
5680 			/* Reassembly complete */
5681 			chan->sdu = NULL;
5682 			chan->sdu_last_frag = NULL;
5683 			chan->sdu_len = 0;
5684 		}
5685 		break;
5686 	}
5687 
5688 	if (err) {
5689 		kfree_skb(skb);
5690 		kfree_skb(chan->sdu);
5691 		chan->sdu = NULL;
5692 		chan->sdu_last_frag = NULL;
5693 		chan->sdu_len = 0;
5694 	}
5695 
5696 	return err;
5697 }
5698 
l2cap_resegment(struct l2cap_chan * chan)5699 static int l2cap_resegment(struct l2cap_chan *chan)
5700 {
5701 	/* Placeholder */
5702 	return 0;
5703 }
5704 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5705 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5706 {
5707 	u8 event;
5708 
5709 	if (chan->mode != L2CAP_MODE_ERTM)
5710 		return;
5711 
5712 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5713 	l2cap_tx(chan, NULL, NULL, event);
5714 }
5715 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5716 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5717 {
5718 	int err = 0;
5719 	/* Pass sequential frames to l2cap_reassemble_sdu()
5720 	 * until a gap is encountered.
5721 	 */
5722 
5723 	BT_DBG("chan %p", chan);
5724 
5725 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5726 		struct sk_buff *skb;
5727 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5728 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5729 
5730 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5731 
5732 		if (!skb)
5733 			break;
5734 
5735 		skb_unlink(skb, &chan->srej_q);
5736 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5737 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5738 		if (err)
5739 			break;
5740 	}
5741 
5742 	if (skb_queue_empty(&chan->srej_q)) {
5743 		chan->rx_state = L2CAP_RX_STATE_RECV;
5744 		l2cap_send_ack(chan);
5745 	}
5746 
5747 	return err;
5748 }
5749 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5750 static void l2cap_handle_srej(struct l2cap_chan *chan,
5751 			      struct l2cap_ctrl *control)
5752 {
5753 	struct sk_buff *skb;
5754 
5755 	BT_DBG("chan %p, control %p", chan, control);
5756 
5757 	if (control->reqseq == chan->next_tx_seq) {
5758 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5759 		l2cap_send_disconn_req(chan, ECONNRESET);
5760 		return;
5761 	}
5762 
5763 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5764 
5765 	if (skb == NULL) {
5766 		BT_DBG("Seq %d not available for retransmission",
5767 		       control->reqseq);
5768 		return;
5769 	}
5770 
5771 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5772 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5773 		l2cap_send_disconn_req(chan, ECONNRESET);
5774 		return;
5775 	}
5776 
5777 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5778 
5779 	if (control->poll) {
5780 		l2cap_pass_to_tx(chan, control);
5781 
5782 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5783 		l2cap_retransmit(chan, control);
5784 		l2cap_ertm_send(chan);
5785 
5786 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5787 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5788 			chan->srej_save_reqseq = control->reqseq;
5789 		}
5790 	} else {
5791 		l2cap_pass_to_tx_fbit(chan, control);
5792 
5793 		if (control->final) {
5794 			if (chan->srej_save_reqseq != control->reqseq ||
5795 			    !test_and_clear_bit(CONN_SREJ_ACT,
5796 						&chan->conn_state))
5797 				l2cap_retransmit(chan, control);
5798 		} else {
5799 			l2cap_retransmit(chan, control);
5800 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5801 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5802 				chan->srej_save_reqseq = control->reqseq;
5803 			}
5804 		}
5805 	}
5806 }
5807 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5808 static void l2cap_handle_rej(struct l2cap_chan *chan,
5809 			     struct l2cap_ctrl *control)
5810 {
5811 	struct sk_buff *skb;
5812 
5813 	BT_DBG("chan %p, control %p", chan, control);
5814 
5815 	if (control->reqseq == chan->next_tx_seq) {
5816 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5817 		l2cap_send_disconn_req(chan, ECONNRESET);
5818 		return;
5819 	}
5820 
5821 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5822 
5823 	if (chan->max_tx && skb &&
5824 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5825 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5826 		l2cap_send_disconn_req(chan, ECONNRESET);
5827 		return;
5828 	}
5829 
5830 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5831 
5832 	l2cap_pass_to_tx(chan, control);
5833 
5834 	if (control->final) {
5835 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5836 			l2cap_retransmit_all(chan, control);
5837 	} else {
5838 		l2cap_retransmit_all(chan, control);
5839 		l2cap_ertm_send(chan);
5840 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5841 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5842 	}
5843 }
5844 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5845 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5846 {
5847 	BT_DBG("chan %p, txseq %d", chan, txseq);
5848 
5849 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5850 	       chan->expected_tx_seq);
5851 
5852 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5853 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5854 		    chan->tx_win) {
5855 			/* See notes below regarding "double poll" and
5856 			 * invalid packets.
5857 			 */
5858 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5859 				BT_DBG("Invalid/Ignore - after SREJ");
5860 				return L2CAP_TXSEQ_INVALID_IGNORE;
5861 			} else {
5862 				BT_DBG("Invalid - in window after SREJ sent");
5863 				return L2CAP_TXSEQ_INVALID;
5864 			}
5865 		}
5866 
5867 		if (chan->srej_list.head == txseq) {
5868 			BT_DBG("Expected SREJ");
5869 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5870 		}
5871 
5872 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5873 			BT_DBG("Duplicate SREJ - txseq already stored");
5874 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5875 		}
5876 
5877 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5878 			BT_DBG("Unexpected SREJ - not requested");
5879 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5880 		}
5881 	}
5882 
5883 	if (chan->expected_tx_seq == txseq) {
5884 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5885 		    chan->tx_win) {
5886 			BT_DBG("Invalid - txseq outside tx window");
5887 			return L2CAP_TXSEQ_INVALID;
5888 		} else {
5889 			BT_DBG("Expected");
5890 			return L2CAP_TXSEQ_EXPECTED;
5891 		}
5892 	}
5893 
5894 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5895 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5896 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5897 		return L2CAP_TXSEQ_DUPLICATE;
5898 	}
5899 
5900 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5901 		/* A source of invalid packets is a "double poll" condition,
5902 		 * where delays cause us to send multiple poll packets.  If
5903 		 * the remote stack receives and processes both polls,
5904 		 * sequence numbers can wrap around in such a way that a
5905 		 * resent frame has a sequence number that looks like new data
5906 		 * with a sequence gap.  This would trigger an erroneous SREJ
5907 		 * request.
5908 		 *
5909 		 * Fortunately, this is impossible with a tx window that's
5910 		 * less than half of the maximum sequence number, which allows
5911 		 * invalid frames to be safely ignored.
5912 		 *
5913 		 * With tx window sizes greater than half of the tx window
5914 		 * maximum, the frame is invalid and cannot be ignored.  This
5915 		 * causes a disconnect.
5916 		 */
5917 
5918 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5919 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5920 			return L2CAP_TXSEQ_INVALID_IGNORE;
5921 		} else {
5922 			BT_DBG("Invalid - txseq outside tx window");
5923 			return L2CAP_TXSEQ_INVALID;
5924 		}
5925 	} else {
5926 		BT_DBG("Unexpected - txseq indicates missing frames");
5927 		return L2CAP_TXSEQ_UNEXPECTED;
5928 	}
5929 }
5930 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5931 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5932 			       struct l2cap_ctrl *control,
5933 			       struct sk_buff *skb, u8 event)
5934 {
5935 	struct l2cap_ctrl local_control;
5936 	int err = 0;
5937 	bool skb_in_use = false;
5938 
5939 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5940 	       event);
5941 
5942 	switch (event) {
5943 	case L2CAP_EV_RECV_IFRAME:
5944 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5945 		case L2CAP_TXSEQ_EXPECTED:
5946 			l2cap_pass_to_tx(chan, control);
5947 
5948 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5949 				BT_DBG("Busy, discarding expected seq %d",
5950 				       control->txseq);
5951 				break;
5952 			}
5953 
5954 			chan->expected_tx_seq = __next_seq(chan,
5955 							   control->txseq);
5956 
5957 			chan->buffer_seq = chan->expected_tx_seq;
5958 			skb_in_use = true;
5959 
5960 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5961 			 * control, so make a copy in advance to use it after
5962 			 * l2cap_reassemble_sdu returns and to avoid the race
5963 			 * condition, for example:
5964 			 *
5965 			 * The current thread calls:
5966 			 *   l2cap_reassemble_sdu
5967 			 *     chan->ops->recv == l2cap_sock_recv_cb
5968 			 *       __sock_queue_rcv_skb
5969 			 * Another thread calls:
5970 			 *   bt_sock_recvmsg
5971 			 *     skb_recv_datagram
5972 			 *     skb_free_datagram
5973 			 * Then the current thread tries to access control, but
5974 			 * it was freed by skb_free_datagram.
5975 			 */
5976 			local_control = *control;
5977 			err = l2cap_reassemble_sdu(chan, skb, control);
5978 			if (err)
5979 				break;
5980 
5981 			if (local_control.final) {
5982 				if (!test_and_clear_bit(CONN_REJ_ACT,
5983 							&chan->conn_state)) {
5984 					local_control.final = 0;
5985 					l2cap_retransmit_all(chan, &local_control);
5986 					l2cap_ertm_send(chan);
5987 				}
5988 			}
5989 
5990 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5991 				l2cap_send_ack(chan);
5992 			break;
5993 		case L2CAP_TXSEQ_UNEXPECTED:
5994 			l2cap_pass_to_tx(chan, control);
5995 
5996 			/* Can't issue SREJ frames in the local busy state.
5997 			 * Drop this frame, it will be seen as missing
5998 			 * when local busy is exited.
5999 			 */
6000 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6001 				BT_DBG("Busy, discarding unexpected seq %d",
6002 				       control->txseq);
6003 				break;
6004 			}
6005 
6006 			/* There was a gap in the sequence, so an SREJ
6007 			 * must be sent for each missing frame.  The
6008 			 * current frame is stored for later use.
6009 			 */
6010 			skb_queue_tail(&chan->srej_q, skb);
6011 			skb_in_use = true;
6012 			BT_DBG("Queued %p (queue len %d)", skb,
6013 			       skb_queue_len(&chan->srej_q));
6014 
6015 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6016 			l2cap_seq_list_clear(&chan->srej_list);
6017 			l2cap_send_srej(chan, control->txseq);
6018 
6019 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6020 			break;
6021 		case L2CAP_TXSEQ_DUPLICATE:
6022 			l2cap_pass_to_tx(chan, control);
6023 			break;
6024 		case L2CAP_TXSEQ_INVALID_IGNORE:
6025 			break;
6026 		case L2CAP_TXSEQ_INVALID:
6027 		default:
6028 			l2cap_send_disconn_req(chan, ECONNRESET);
6029 			break;
6030 		}
6031 		break;
6032 	case L2CAP_EV_RECV_RR:
6033 		l2cap_pass_to_tx(chan, control);
6034 		if (control->final) {
6035 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6036 
6037 			if (!test_and_clear_bit(CONN_REJ_ACT,
6038 						&chan->conn_state)) {
6039 				control->final = 0;
6040 				l2cap_retransmit_all(chan, control);
6041 			}
6042 
6043 			l2cap_ertm_send(chan);
6044 		} else if (control->poll) {
6045 			l2cap_send_i_or_rr_or_rnr(chan);
6046 		} else {
6047 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6048 					       &chan->conn_state) &&
6049 			    chan->unacked_frames)
6050 				__set_retrans_timer(chan);
6051 
6052 			l2cap_ertm_send(chan);
6053 		}
6054 		break;
6055 	case L2CAP_EV_RECV_RNR:
6056 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6057 		l2cap_pass_to_tx(chan, control);
6058 		if (control && control->poll) {
6059 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6060 			l2cap_send_rr_or_rnr(chan, 0);
6061 		}
6062 		__clear_retrans_timer(chan);
6063 		l2cap_seq_list_clear(&chan->retrans_list);
6064 		break;
6065 	case L2CAP_EV_RECV_REJ:
6066 		l2cap_handle_rej(chan, control);
6067 		break;
6068 	case L2CAP_EV_RECV_SREJ:
6069 		l2cap_handle_srej(chan, control);
6070 		break;
6071 	default:
6072 		break;
6073 	}
6074 
6075 	if (skb && !skb_in_use) {
6076 		BT_DBG("Freeing %p", skb);
6077 		kfree_skb(skb);
6078 	}
6079 
6080 	return err;
6081 }
6082 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6083 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6084 				    struct l2cap_ctrl *control,
6085 				    struct sk_buff *skb, u8 event)
6086 {
6087 	int err = 0;
6088 	u16 txseq = control->txseq;
6089 	bool skb_in_use = false;
6090 
6091 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6092 	       event);
6093 
6094 	switch (event) {
6095 	case L2CAP_EV_RECV_IFRAME:
6096 		switch (l2cap_classify_txseq(chan, txseq)) {
6097 		case L2CAP_TXSEQ_EXPECTED:
6098 			/* Keep frame for reassembly later */
6099 			l2cap_pass_to_tx(chan, control);
6100 			skb_queue_tail(&chan->srej_q, skb);
6101 			skb_in_use = true;
6102 			BT_DBG("Queued %p (queue len %d)", skb,
6103 			       skb_queue_len(&chan->srej_q));
6104 
6105 			chan->expected_tx_seq = __next_seq(chan, txseq);
6106 			break;
6107 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6108 			l2cap_seq_list_pop(&chan->srej_list);
6109 
6110 			l2cap_pass_to_tx(chan, control);
6111 			skb_queue_tail(&chan->srej_q, skb);
6112 			skb_in_use = true;
6113 			BT_DBG("Queued %p (queue len %d)", skb,
6114 			       skb_queue_len(&chan->srej_q));
6115 
6116 			err = l2cap_rx_queued_iframes(chan);
6117 			if (err)
6118 				break;
6119 
6120 			break;
6121 		case L2CAP_TXSEQ_UNEXPECTED:
6122 			/* Got a frame that can't be reassembled yet.
6123 			 * Save it for later, and send SREJs to cover
6124 			 * the missing frames.
6125 			 */
6126 			skb_queue_tail(&chan->srej_q, skb);
6127 			skb_in_use = true;
6128 			BT_DBG("Queued %p (queue len %d)", skb,
6129 			       skb_queue_len(&chan->srej_q));
6130 
6131 			l2cap_pass_to_tx(chan, control);
6132 			l2cap_send_srej(chan, control->txseq);
6133 			break;
6134 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6135 			/* This frame was requested with an SREJ, but
6136 			 * some expected retransmitted frames are
6137 			 * missing.  Request retransmission of missing
6138 			 * SREJ'd frames.
6139 			 */
6140 			skb_queue_tail(&chan->srej_q, skb);
6141 			skb_in_use = true;
6142 			BT_DBG("Queued %p (queue len %d)", skb,
6143 			       skb_queue_len(&chan->srej_q));
6144 
6145 			l2cap_pass_to_tx(chan, control);
6146 			l2cap_send_srej_list(chan, control->txseq);
6147 			break;
6148 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6149 			/* We've already queued this frame.  Drop this copy. */
6150 			l2cap_pass_to_tx(chan, control);
6151 			break;
6152 		case L2CAP_TXSEQ_DUPLICATE:
6153 			/* Expecting a later sequence number, so this frame
6154 			 * was already received.  Ignore it completely.
6155 			 */
6156 			break;
6157 		case L2CAP_TXSEQ_INVALID_IGNORE:
6158 			break;
6159 		case L2CAP_TXSEQ_INVALID:
6160 		default:
6161 			l2cap_send_disconn_req(chan, ECONNRESET);
6162 			break;
6163 		}
6164 		break;
6165 	case L2CAP_EV_RECV_RR:
6166 		l2cap_pass_to_tx(chan, control);
6167 		if (control->final) {
6168 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6169 
6170 			if (!test_and_clear_bit(CONN_REJ_ACT,
6171 						&chan->conn_state)) {
6172 				control->final = 0;
6173 				l2cap_retransmit_all(chan, control);
6174 			}
6175 
6176 			l2cap_ertm_send(chan);
6177 		} else if (control->poll) {
6178 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6179 					       &chan->conn_state) &&
6180 			    chan->unacked_frames) {
6181 				__set_retrans_timer(chan);
6182 			}
6183 
6184 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6185 			l2cap_send_srej_tail(chan);
6186 		} else {
6187 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6188 					       &chan->conn_state) &&
6189 			    chan->unacked_frames)
6190 				__set_retrans_timer(chan);
6191 
6192 			l2cap_send_ack(chan);
6193 		}
6194 		break;
6195 	case L2CAP_EV_RECV_RNR:
6196 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6197 		l2cap_pass_to_tx(chan, control);
6198 		if (control->poll) {
6199 			l2cap_send_srej_tail(chan);
6200 		} else {
6201 			struct l2cap_ctrl rr_control;
6202 			memset(&rr_control, 0, sizeof(rr_control));
6203 			rr_control.sframe = 1;
6204 			rr_control.super = L2CAP_SUPER_RR;
6205 			rr_control.reqseq = chan->buffer_seq;
6206 			l2cap_send_sframe(chan, &rr_control);
6207 		}
6208 
6209 		break;
6210 	case L2CAP_EV_RECV_REJ:
6211 		l2cap_handle_rej(chan, control);
6212 		break;
6213 	case L2CAP_EV_RECV_SREJ:
6214 		l2cap_handle_srej(chan, control);
6215 		break;
6216 	}
6217 
6218 	if (skb && !skb_in_use) {
6219 		BT_DBG("Freeing %p", skb);
6220 		kfree_skb(skb);
6221 	}
6222 
6223 	return err;
6224 }
6225 
l2cap_finish_move(struct l2cap_chan * chan)6226 static int l2cap_finish_move(struct l2cap_chan *chan)
6227 {
6228 	BT_DBG("chan %p", chan);
6229 
6230 	chan->rx_state = L2CAP_RX_STATE_RECV;
6231 	chan->conn->mtu = chan->conn->hcon->mtu;
6232 
6233 	return l2cap_resegment(chan);
6234 }
6235 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6236 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6237 				 struct l2cap_ctrl *control,
6238 				 struct sk_buff *skb, u8 event)
6239 {
6240 	int err;
6241 
6242 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6243 	       event);
6244 
6245 	if (!control->poll)
6246 		return -EPROTO;
6247 
6248 	l2cap_process_reqseq(chan, control->reqseq);
6249 
6250 	if (!skb_queue_empty(&chan->tx_q))
6251 		chan->tx_send_head = skb_peek(&chan->tx_q);
6252 	else
6253 		chan->tx_send_head = NULL;
6254 
6255 	/* Rewind next_tx_seq to the point expected
6256 	 * by the receiver.
6257 	 */
6258 	chan->next_tx_seq = control->reqseq;
6259 	chan->unacked_frames = 0;
6260 
6261 	err = l2cap_finish_move(chan);
6262 	if (err)
6263 		return err;
6264 
6265 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6266 	l2cap_send_i_or_rr_or_rnr(chan);
6267 
6268 	if (event == L2CAP_EV_RECV_IFRAME)
6269 		return -EPROTO;
6270 
6271 	return l2cap_rx_state_recv(chan, control, NULL, event);
6272 }
6273 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6274 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6275 				 struct l2cap_ctrl *control,
6276 				 struct sk_buff *skb, u8 event)
6277 {
6278 	int err;
6279 
6280 	if (!control->final)
6281 		return -EPROTO;
6282 
6283 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6284 
6285 	chan->rx_state = L2CAP_RX_STATE_RECV;
6286 	l2cap_process_reqseq(chan, control->reqseq);
6287 
6288 	if (!skb_queue_empty(&chan->tx_q))
6289 		chan->tx_send_head = skb_peek(&chan->tx_q);
6290 	else
6291 		chan->tx_send_head = NULL;
6292 
6293 	/* Rewind next_tx_seq to the point expected
6294 	 * by the receiver.
6295 	 */
6296 	chan->next_tx_seq = control->reqseq;
6297 	chan->unacked_frames = 0;
6298 	chan->conn->mtu = chan->conn->hcon->mtu;
6299 
6300 	err = l2cap_resegment(chan);
6301 
6302 	if (!err)
6303 		err = l2cap_rx_state_recv(chan, control, skb, event);
6304 
6305 	return err;
6306 }
6307 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6308 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6309 {
6310 	/* Make sure reqseq is for a packet that has been sent but not acked */
6311 	u16 unacked;
6312 
6313 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6314 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6315 }
6316 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6317 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6318 		    struct sk_buff *skb, u8 event)
6319 {
6320 	int err = 0;
6321 
6322 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6323 	       control, skb, event, chan->rx_state);
6324 
6325 	if (__valid_reqseq(chan, control->reqseq)) {
6326 		switch (chan->rx_state) {
6327 		case L2CAP_RX_STATE_RECV:
6328 			err = l2cap_rx_state_recv(chan, control, skb, event);
6329 			break;
6330 		case L2CAP_RX_STATE_SREJ_SENT:
6331 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6332 						       event);
6333 			break;
6334 		case L2CAP_RX_STATE_WAIT_P:
6335 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6336 			break;
6337 		case L2CAP_RX_STATE_WAIT_F:
6338 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6339 			break;
6340 		default:
6341 			/* shut it down */
6342 			break;
6343 		}
6344 	} else {
6345 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6346 		       control->reqseq, chan->next_tx_seq,
6347 		       chan->expected_ack_seq);
6348 		l2cap_send_disconn_req(chan, ECONNRESET);
6349 	}
6350 
6351 	return err;
6352 }
6353 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6354 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6355 			   struct sk_buff *skb)
6356 {
6357 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6358 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6359 	 * returns and to avoid the race condition, for example:
6360 	 *
6361 	 * The current thread calls:
6362 	 *   l2cap_reassemble_sdu
6363 	 *     chan->ops->recv == l2cap_sock_recv_cb
6364 	 *       __sock_queue_rcv_skb
6365 	 * Another thread calls:
6366 	 *   bt_sock_recvmsg
6367 	 *     skb_recv_datagram
6368 	 *     skb_free_datagram
6369 	 * Then the current thread tries to access control, but it was freed by
6370 	 * skb_free_datagram.
6371 	 */
6372 	u16 txseq = control->txseq;
6373 
6374 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6375 	       chan->rx_state);
6376 
6377 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6378 		l2cap_pass_to_tx(chan, control);
6379 
6380 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6381 		       __next_seq(chan, chan->buffer_seq));
6382 
6383 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6384 
6385 		l2cap_reassemble_sdu(chan, skb, control);
6386 	} else {
6387 		if (chan->sdu) {
6388 			kfree_skb(chan->sdu);
6389 			chan->sdu = NULL;
6390 		}
6391 		chan->sdu_last_frag = NULL;
6392 		chan->sdu_len = 0;
6393 
6394 		if (skb) {
6395 			BT_DBG("Freeing %p", skb);
6396 			kfree_skb(skb);
6397 		}
6398 	}
6399 
6400 	chan->last_acked_seq = txseq;
6401 	chan->expected_tx_seq = __next_seq(chan, txseq);
6402 
6403 	return 0;
6404 }
6405 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6406 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6407 {
6408 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6409 	u16 len;
6410 	u8 event;
6411 
6412 	__unpack_control(chan, skb);
6413 
6414 	len = skb->len;
6415 
6416 	/*
6417 	 * We can just drop the corrupted I-frame here.
6418 	 * Receiver will miss it and start proper recovery
6419 	 * procedures and ask for retransmission.
6420 	 */
6421 	if (l2cap_check_fcs(chan, skb))
6422 		goto drop;
6423 
6424 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6425 		len -= L2CAP_SDULEN_SIZE;
6426 
6427 	if (chan->fcs == L2CAP_FCS_CRC16)
6428 		len -= L2CAP_FCS_SIZE;
6429 
6430 	if (len > chan->mps) {
6431 		l2cap_send_disconn_req(chan, ECONNRESET);
6432 		goto drop;
6433 	}
6434 
6435 	if (chan->ops->filter) {
6436 		if (chan->ops->filter(chan, skb))
6437 			goto drop;
6438 	}
6439 
6440 	if (!control->sframe) {
6441 		int err;
6442 
6443 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6444 		       control->sar, control->reqseq, control->final,
6445 		       control->txseq);
6446 
6447 		/* Validate F-bit - F=0 always valid, F=1 only
6448 		 * valid in TX WAIT_F
6449 		 */
6450 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6451 			goto drop;
6452 
6453 		if (chan->mode != L2CAP_MODE_STREAMING) {
6454 			event = L2CAP_EV_RECV_IFRAME;
6455 			err = l2cap_rx(chan, control, skb, event);
6456 		} else {
6457 			err = l2cap_stream_rx(chan, control, skb);
6458 		}
6459 
6460 		if (err)
6461 			l2cap_send_disconn_req(chan, ECONNRESET);
6462 	} else {
6463 		const u8 rx_func_to_event[4] = {
6464 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6465 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6466 		};
6467 
6468 		/* Only I-frames are expected in streaming mode */
6469 		if (chan->mode == L2CAP_MODE_STREAMING)
6470 			goto drop;
6471 
6472 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6473 		       control->reqseq, control->final, control->poll,
6474 		       control->super);
6475 
6476 		if (len != 0) {
6477 			BT_ERR("Trailing bytes: %d in sframe", len);
6478 			l2cap_send_disconn_req(chan, ECONNRESET);
6479 			goto drop;
6480 		}
6481 
6482 		/* Validate F and P bits */
6483 		if (control->final && (control->poll ||
6484 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6485 			goto drop;
6486 
6487 		event = rx_func_to_event[control->super];
6488 		if (l2cap_rx(chan, control, skb, event))
6489 			l2cap_send_disconn_req(chan, ECONNRESET);
6490 	}
6491 
6492 	return 0;
6493 
6494 drop:
6495 	kfree_skb(skb);
6496 	return 0;
6497 }
6498 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6499 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6500 {
6501 	struct l2cap_conn *conn = chan->conn;
6502 	struct l2cap_le_credits pkt;
6503 	u16 return_credits = l2cap_le_rx_credits(chan);
6504 
6505 	if (chan->rx_credits >= return_credits)
6506 		return;
6507 
6508 	return_credits -= chan->rx_credits;
6509 
6510 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6511 
6512 	chan->rx_credits += return_credits;
6513 
6514 	pkt.cid     = cpu_to_le16(chan->scid);
6515 	pkt.credits = cpu_to_le16(return_credits);
6516 
6517 	chan->ident = l2cap_get_ident(conn);
6518 
6519 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6520 }
6521 
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6522 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6523 {
6524 	if (chan->rx_avail == rx_avail)
6525 		return;
6526 
6527 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6528 
6529 	chan->rx_avail = rx_avail;
6530 
6531 	if (chan->state == BT_CONNECTED)
6532 		l2cap_chan_le_send_credits(chan);
6533 }
6534 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6535 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6536 {
6537 	int err;
6538 
6539 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6540 
6541 	/* Wait recv to confirm reception before updating the credits */
6542 	err = chan->ops->recv(chan, skb);
6543 
6544 	if (err < 0 && chan->rx_avail != -1) {
6545 		BT_ERR("Queueing received LE L2CAP data failed");
6546 		l2cap_send_disconn_req(chan, ECONNRESET);
6547 		return err;
6548 	}
6549 
6550 	/* Update credits whenever an SDU is received */
6551 	l2cap_chan_le_send_credits(chan);
6552 
6553 	return err;
6554 }
6555 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6556 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6557 {
6558 	int err;
6559 
6560 	if (!chan->rx_credits) {
6561 		BT_ERR("No credits to receive LE L2CAP data");
6562 		l2cap_send_disconn_req(chan, ECONNRESET);
6563 		return -ENOBUFS;
6564 	}
6565 
6566 	if (chan->imtu < skb->len) {
6567 		BT_ERR("Too big LE L2CAP PDU");
6568 		return -ENOBUFS;
6569 	}
6570 
6571 	chan->rx_credits--;
6572 	BT_DBG("chan %p: rx_credits %u -> %u",
6573 	       chan, chan->rx_credits + 1, chan->rx_credits);
6574 
6575 	/* Update if remote had run out of credits, this should only happens
6576 	 * if the remote is not using the entire MPS.
6577 	 */
6578 	if (!chan->rx_credits)
6579 		l2cap_chan_le_send_credits(chan);
6580 
6581 	err = 0;
6582 
6583 	if (!chan->sdu) {
6584 		u16 sdu_len;
6585 
6586 		sdu_len = get_unaligned_le16(skb->data);
6587 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6588 
6589 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6590 		       sdu_len, skb->len, chan->imtu);
6591 
6592 		if (sdu_len > chan->imtu) {
6593 			BT_ERR("Too big LE L2CAP SDU length received");
6594 			err = -EMSGSIZE;
6595 			goto failed;
6596 		}
6597 
6598 		if (skb->len > sdu_len) {
6599 			BT_ERR("Too much LE L2CAP data received");
6600 			err = -EINVAL;
6601 			goto failed;
6602 		}
6603 
6604 		if (skb->len == sdu_len)
6605 			return l2cap_ecred_recv(chan, skb);
6606 
6607 		chan->sdu = skb;
6608 		chan->sdu_len = sdu_len;
6609 		chan->sdu_last_frag = skb;
6610 
6611 		/* Detect if remote is not able to use the selected MPS */
6612 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6613 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6614 
6615 			/* Adjust the number of credits */
6616 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6617 			chan->mps = mps_len;
6618 			l2cap_chan_le_send_credits(chan);
6619 		}
6620 
6621 		return 0;
6622 	}
6623 
6624 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6625 	       chan->sdu->len, skb->len, chan->sdu_len);
6626 
6627 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6628 		BT_ERR("Too much LE L2CAP data received");
6629 		err = -EINVAL;
6630 		goto failed;
6631 	}
6632 
6633 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6634 	skb = NULL;
6635 
6636 	if (chan->sdu->len == chan->sdu_len) {
6637 		err = l2cap_ecred_recv(chan, chan->sdu);
6638 		if (!err) {
6639 			chan->sdu = NULL;
6640 			chan->sdu_last_frag = NULL;
6641 			chan->sdu_len = 0;
6642 		}
6643 	}
6644 
6645 failed:
6646 	if (err) {
6647 		kfree_skb(skb);
6648 		kfree_skb(chan->sdu);
6649 		chan->sdu = NULL;
6650 		chan->sdu_last_frag = NULL;
6651 		chan->sdu_len = 0;
6652 	}
6653 
6654 	/* We can't return an error here since we took care of the skb
6655 	 * freeing internally. An error return would cause the caller to
6656 	 * do a double-free of the skb.
6657 	 */
6658 	return 0;
6659 }
6660 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6661 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6662 			       struct sk_buff *skb)
6663 {
6664 	struct l2cap_chan *chan;
6665 
6666 	chan = l2cap_get_chan_by_scid(conn, cid);
6667 	if (!chan) {
6668 		BT_DBG("unknown cid 0x%4.4x", cid);
6669 		/* Drop packet and return */
6670 		kfree_skb(skb);
6671 		return;
6672 	}
6673 
6674 	BT_DBG("chan %p, len %d", chan, skb->len);
6675 
6676 	/* If we receive data on a fixed channel before the info req/rsp
6677 	 * procedure is done simply assume that the channel is supported
6678 	 * and mark it as ready.
6679 	 */
6680 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6681 		l2cap_chan_ready(chan);
6682 
6683 	if (chan->state != BT_CONNECTED)
6684 		goto drop;
6685 
6686 	switch (chan->mode) {
6687 	case L2CAP_MODE_LE_FLOWCTL:
6688 	case L2CAP_MODE_EXT_FLOWCTL:
6689 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6690 			goto drop;
6691 
6692 		goto done;
6693 
6694 	case L2CAP_MODE_BASIC:
6695 		/* If socket recv buffers overflows we drop data here
6696 		 * which is *bad* because L2CAP has to be reliable.
6697 		 * But we don't have any other choice. L2CAP doesn't
6698 		 * provide flow control mechanism. */
6699 
6700 		if (chan->imtu < skb->len) {
6701 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6702 			goto drop;
6703 		}
6704 
6705 		if (!chan->ops->recv(chan, skb))
6706 			goto done;
6707 		break;
6708 
6709 	case L2CAP_MODE_ERTM:
6710 	case L2CAP_MODE_STREAMING:
6711 		l2cap_data_rcv(chan, skb);
6712 		goto done;
6713 
6714 	default:
6715 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6716 		break;
6717 	}
6718 
6719 drop:
6720 	kfree_skb(skb);
6721 
6722 done:
6723 	l2cap_chan_unlock(chan);
6724 	l2cap_chan_put(chan);
6725 }
6726 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6727 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6728 				  struct sk_buff *skb)
6729 {
6730 	struct hci_conn *hcon = conn->hcon;
6731 	struct l2cap_chan *chan;
6732 
6733 	if (hcon->type != ACL_LINK)
6734 		goto free_skb;
6735 
6736 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6737 					ACL_LINK);
6738 	if (!chan)
6739 		goto free_skb;
6740 
6741 	BT_DBG("chan %p, len %d", chan, skb->len);
6742 
6743 	l2cap_chan_lock(chan);
6744 
6745 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6746 		goto drop;
6747 
6748 	if (chan->imtu < skb->len)
6749 		goto drop;
6750 
6751 	/* Store remote BD_ADDR and PSM for msg_name */
6752 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6753 	bt_cb(skb)->l2cap.psm = psm;
6754 
6755 	if (!chan->ops->recv(chan, skb)) {
6756 		l2cap_chan_unlock(chan);
6757 		l2cap_chan_put(chan);
6758 		return;
6759 	}
6760 
6761 drop:
6762 	l2cap_chan_unlock(chan);
6763 	l2cap_chan_put(chan);
6764 free_skb:
6765 	kfree_skb(skb);
6766 }
6767 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6768 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6769 {
6770 	struct l2cap_hdr *lh = (void *) skb->data;
6771 	struct hci_conn *hcon = conn->hcon;
6772 	u16 cid, len;
6773 	__le16 psm;
6774 
6775 	if (hcon->state != BT_CONNECTED) {
6776 		BT_DBG("queueing pending rx skb");
6777 		skb_queue_tail(&conn->pending_rx, skb);
6778 		return;
6779 	}
6780 
6781 	skb_pull(skb, L2CAP_HDR_SIZE);
6782 	cid = __le16_to_cpu(lh->cid);
6783 	len = __le16_to_cpu(lh->len);
6784 
6785 	if (len != skb->len) {
6786 		kfree_skb(skb);
6787 		return;
6788 	}
6789 
6790 	/* Since we can't actively block incoming LE connections we must
6791 	 * at least ensure that we ignore incoming data from them.
6792 	 */
6793 	if (hcon->type == LE_LINK &&
6794 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6795 				   bdaddr_dst_type(hcon))) {
6796 		kfree_skb(skb);
6797 		return;
6798 	}
6799 
6800 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6801 
6802 	switch (cid) {
6803 	case L2CAP_CID_SIGNALING:
6804 		l2cap_sig_channel(conn, skb);
6805 		break;
6806 
6807 	case L2CAP_CID_CONN_LESS:
6808 		psm = get_unaligned((__le16 *) skb->data);
6809 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6810 		l2cap_conless_channel(conn, psm, skb);
6811 		break;
6812 
6813 	case L2CAP_CID_LE_SIGNALING:
6814 		l2cap_le_sig_channel(conn, skb);
6815 		break;
6816 
6817 	default:
6818 		l2cap_data_channel(conn, cid, skb);
6819 		break;
6820 	}
6821 }
6822 
process_pending_rx(struct work_struct * work)6823 static void process_pending_rx(struct work_struct *work)
6824 {
6825 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6826 					       pending_rx_work);
6827 	struct sk_buff *skb;
6828 
6829 	BT_DBG("");
6830 
6831 	mutex_lock(&conn->lock);
6832 
6833 	while ((skb = skb_dequeue(&conn->pending_rx)))
6834 		l2cap_recv_frame(conn, skb);
6835 
6836 	mutex_unlock(&conn->lock);
6837 }
6838 
l2cap_conn_add(struct hci_conn * hcon)6839 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6840 {
6841 	struct l2cap_conn *conn = hcon->l2cap_data;
6842 	struct hci_chan *hchan;
6843 
6844 	if (conn)
6845 		return conn;
6846 
6847 	hchan = hci_chan_create(hcon);
6848 	if (!hchan)
6849 		return NULL;
6850 
6851 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6852 	if (!conn) {
6853 		hci_chan_del(hchan);
6854 		return NULL;
6855 	}
6856 
6857 	kref_init(&conn->ref);
6858 	hcon->l2cap_data = conn;
6859 	conn->hcon = hci_conn_get(hcon);
6860 	conn->hchan = hchan;
6861 
6862 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6863 
6864 	conn->mtu = hcon->mtu;
6865 	conn->feat_mask = 0;
6866 
6867 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6868 
6869 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6870 	    (bredr_sc_enabled(hcon->hdev) ||
6871 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6872 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6873 
6874 	mutex_init(&conn->ident_lock);
6875 	mutex_init(&conn->lock);
6876 
6877 	INIT_LIST_HEAD(&conn->chan_l);
6878 	INIT_LIST_HEAD(&conn->users);
6879 
6880 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6881 
6882 	skb_queue_head_init(&conn->pending_rx);
6883 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6884 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6885 
6886 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6887 
6888 	return conn;
6889 }
6890 
is_valid_psm(u16 psm,u8 dst_type)6891 static bool is_valid_psm(u16 psm, u8 dst_type)
6892 {
6893 	if (!psm)
6894 		return false;
6895 
6896 	if (bdaddr_type_is_le(dst_type))
6897 		return (psm <= 0x00ff);
6898 
6899 	/* PSM must be odd and lsb of upper byte must be 0 */
6900 	return ((psm & 0x0101) == 0x0001);
6901 }
6902 
6903 struct l2cap_chan_data {
6904 	struct l2cap_chan *chan;
6905 	struct pid *pid;
6906 	int count;
6907 };
6908 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6909 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6910 {
6911 	struct l2cap_chan_data *d = data;
6912 	struct pid *pid;
6913 
6914 	if (chan == d->chan)
6915 		return;
6916 
6917 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6918 		return;
6919 
6920 	pid = chan->ops->get_peer_pid(chan);
6921 
6922 	/* Only count deferred channels with the same PID/PSM */
6923 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6924 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6925 		return;
6926 
6927 	d->count++;
6928 }
6929 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)6930 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6931 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6932 {
6933 	struct l2cap_conn *conn;
6934 	struct hci_conn *hcon;
6935 	struct hci_dev *hdev;
6936 	int err;
6937 
6938 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6939 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6940 
6941 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6942 	if (!hdev)
6943 		return -EHOSTUNREACH;
6944 
6945 	hci_dev_lock(hdev);
6946 
6947 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6948 	    chan->chan_type != L2CAP_CHAN_RAW) {
6949 		err = -EINVAL;
6950 		goto done;
6951 	}
6952 
6953 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6954 		err = -EINVAL;
6955 		goto done;
6956 	}
6957 
6958 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6959 		err = -EINVAL;
6960 		goto done;
6961 	}
6962 
6963 	switch (chan->mode) {
6964 	case L2CAP_MODE_BASIC:
6965 		break;
6966 	case L2CAP_MODE_LE_FLOWCTL:
6967 		break;
6968 	case L2CAP_MODE_EXT_FLOWCTL:
6969 		if (!enable_ecred) {
6970 			err = -EOPNOTSUPP;
6971 			goto done;
6972 		}
6973 		break;
6974 	case L2CAP_MODE_ERTM:
6975 	case L2CAP_MODE_STREAMING:
6976 		if (!disable_ertm)
6977 			break;
6978 		fallthrough;
6979 	default:
6980 		err = -EOPNOTSUPP;
6981 		goto done;
6982 	}
6983 
6984 	switch (chan->state) {
6985 	case BT_CONNECT:
6986 	case BT_CONNECT2:
6987 	case BT_CONFIG:
6988 		/* Already connecting */
6989 		err = 0;
6990 		goto done;
6991 
6992 	case BT_CONNECTED:
6993 		/* Already connected */
6994 		err = -EISCONN;
6995 		goto done;
6996 
6997 	case BT_OPEN:
6998 	case BT_BOUND:
6999 		/* Can connect */
7000 		break;
7001 
7002 	default:
7003 		err = -EBADFD;
7004 		goto done;
7005 	}
7006 
7007 	/* Set destination address and psm */
7008 	bacpy(&chan->dst, dst);
7009 	chan->dst_type = dst_type;
7010 
7011 	chan->psm = psm;
7012 	chan->dcid = cid;
7013 
7014 	if (bdaddr_type_is_le(dst_type)) {
7015 		/* Convert from L2CAP channel address type to HCI address type
7016 		 */
7017 		if (dst_type == BDADDR_LE_PUBLIC)
7018 			dst_type = ADDR_LE_DEV_PUBLIC;
7019 		else
7020 			dst_type = ADDR_LE_DEV_RANDOM;
7021 
7022 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7023 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7024 					      chan->sec_level, timeout,
7025 					      HCI_ROLE_SLAVE, 0, 0);
7026 		else
7027 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7028 						   chan->sec_level, timeout,
7029 						   CONN_REASON_L2CAP_CHAN);
7030 
7031 	} else {
7032 		u8 auth_type = l2cap_get_auth_type(chan);
7033 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7034 				       CONN_REASON_L2CAP_CHAN, timeout);
7035 	}
7036 
7037 	if (IS_ERR(hcon)) {
7038 		err = PTR_ERR(hcon);
7039 		goto done;
7040 	}
7041 
7042 	conn = l2cap_conn_add(hcon);
7043 	if (!conn) {
7044 		hci_conn_drop(hcon);
7045 		err = -ENOMEM;
7046 		goto done;
7047 	}
7048 
7049 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7050 		struct l2cap_chan_data data;
7051 
7052 		data.chan = chan;
7053 		data.pid = chan->ops->get_peer_pid(chan);
7054 		data.count = 1;
7055 
7056 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7057 
7058 		/* Check if there isn't too many channels being connected */
7059 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7060 			hci_conn_drop(hcon);
7061 			err = -EPROTO;
7062 			goto done;
7063 		}
7064 	}
7065 
7066 	mutex_lock(&conn->lock);
7067 	l2cap_chan_lock(chan);
7068 
7069 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7070 		hci_conn_drop(hcon);
7071 		err = -EBUSY;
7072 		goto chan_unlock;
7073 	}
7074 
7075 	/* Update source addr of the socket */
7076 	bacpy(&chan->src, &hcon->src);
7077 	chan->src_type = bdaddr_src_type(hcon);
7078 
7079 	__l2cap_chan_add(conn, chan);
7080 
7081 	/* l2cap_chan_add takes its own ref so we can drop this one */
7082 	hci_conn_drop(hcon);
7083 
7084 	l2cap_state_change(chan, BT_CONNECT);
7085 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7086 
7087 	/* Release chan->sport so that it can be reused by other
7088 	 * sockets (as it's only used for listening sockets).
7089 	 */
7090 	write_lock(&chan_list_lock);
7091 	chan->sport = 0;
7092 	write_unlock(&chan_list_lock);
7093 
7094 	if (hcon->state == BT_CONNECTED) {
7095 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7096 			__clear_chan_timer(chan);
7097 			if (l2cap_chan_check_security(chan, true))
7098 				l2cap_state_change(chan, BT_CONNECTED);
7099 		} else
7100 			l2cap_do_start(chan);
7101 	}
7102 
7103 	err = 0;
7104 
7105 chan_unlock:
7106 	l2cap_chan_unlock(chan);
7107 	mutex_unlock(&conn->lock);
7108 done:
7109 	hci_dev_unlock(hdev);
7110 	hci_dev_put(hdev);
7111 	return err;
7112 }
7113 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7114 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7115 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7116 {
7117 	struct l2cap_conn *conn = chan->conn;
7118 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7119 
7120 	pdu->mtu = cpu_to_le16(chan->imtu);
7121 	pdu->mps = cpu_to_le16(chan->mps);
7122 	pdu->scid[0] = cpu_to_le16(chan->scid);
7123 
7124 	chan->ident = l2cap_get_ident(conn);
7125 
7126 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7127 		       sizeof(pdu), &pdu);
7128 }
7129 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7130 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7131 {
7132 	if (chan->imtu > mtu)
7133 		return -EINVAL;
7134 
7135 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7136 
7137 	chan->imtu = mtu;
7138 
7139 	l2cap_ecred_reconfigure(chan);
7140 
7141 	return 0;
7142 }
7143 
7144 /* ---- L2CAP interface with lower layer (HCI) ---- */
7145 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7146 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7147 {
7148 	int exact = 0, lm1 = 0, lm2 = 0;
7149 	struct l2cap_chan *c;
7150 
7151 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7152 
7153 	/* Find listening sockets and check their link_mode */
7154 	read_lock(&chan_list_lock);
7155 	list_for_each_entry(c, &chan_list, global_l) {
7156 		if (c->state != BT_LISTEN)
7157 			continue;
7158 
7159 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7160 			lm1 |= HCI_LM_ACCEPT;
7161 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7162 				lm1 |= HCI_LM_MASTER;
7163 			exact++;
7164 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7165 			lm2 |= HCI_LM_ACCEPT;
7166 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7167 				lm2 |= HCI_LM_MASTER;
7168 		}
7169 	}
7170 	read_unlock(&chan_list_lock);
7171 
7172 	return exact ? lm1 : lm2;
7173 }
7174 
7175 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7176  * from an existing channel in the list or from the beginning of the
7177  * global list (by passing NULL as first parameter).
7178  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7179 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7180 						  struct hci_conn *hcon)
7181 {
7182 	u8 src_type = bdaddr_src_type(hcon);
7183 
7184 	read_lock(&chan_list_lock);
7185 
7186 	if (c)
7187 		c = list_next_entry(c, global_l);
7188 	else
7189 		c = list_entry(chan_list.next, typeof(*c), global_l);
7190 
7191 	list_for_each_entry_from(c, &chan_list, global_l) {
7192 		if (c->chan_type != L2CAP_CHAN_FIXED)
7193 			continue;
7194 		if (c->state != BT_LISTEN)
7195 			continue;
7196 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7197 			continue;
7198 		if (src_type != c->src_type)
7199 			continue;
7200 
7201 		c = l2cap_chan_hold_unless_zero(c);
7202 		read_unlock(&chan_list_lock);
7203 		return c;
7204 	}
7205 
7206 	read_unlock(&chan_list_lock);
7207 
7208 	return NULL;
7209 }
7210 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7211 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7212 {
7213 	struct hci_dev *hdev = hcon->hdev;
7214 	struct l2cap_conn *conn;
7215 	struct l2cap_chan *pchan;
7216 	u8 dst_type;
7217 
7218 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7219 		return;
7220 
7221 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7222 
7223 	if (status) {
7224 		l2cap_conn_del(hcon, bt_to_errno(status));
7225 		return;
7226 	}
7227 
7228 	conn = l2cap_conn_add(hcon);
7229 	if (!conn)
7230 		return;
7231 
7232 	dst_type = bdaddr_dst_type(hcon);
7233 
7234 	/* If device is blocked, do not create channels for it */
7235 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7236 		return;
7237 
7238 	/* Find fixed channels and notify them of the new connection. We
7239 	 * use multiple individual lookups, continuing each time where
7240 	 * we left off, because the list lock would prevent calling the
7241 	 * potentially sleeping l2cap_chan_lock() function.
7242 	 */
7243 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7244 	while (pchan) {
7245 		struct l2cap_chan *chan, *next;
7246 
7247 		/* Client fixed channels should override server ones */
7248 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7249 			goto next;
7250 
7251 		l2cap_chan_lock(pchan);
7252 		chan = pchan->ops->new_connection(pchan);
7253 		if (chan) {
7254 			bacpy(&chan->src, &hcon->src);
7255 			bacpy(&chan->dst, &hcon->dst);
7256 			chan->src_type = bdaddr_src_type(hcon);
7257 			chan->dst_type = dst_type;
7258 
7259 			__l2cap_chan_add(conn, chan);
7260 		}
7261 
7262 		l2cap_chan_unlock(pchan);
7263 next:
7264 		next = l2cap_global_fixed_chan(pchan, hcon);
7265 		l2cap_chan_put(pchan);
7266 		pchan = next;
7267 	}
7268 
7269 	l2cap_conn_ready(conn);
7270 }
7271 
l2cap_disconn_ind(struct hci_conn * hcon)7272 int l2cap_disconn_ind(struct hci_conn *hcon)
7273 {
7274 	struct l2cap_conn *conn = hcon->l2cap_data;
7275 
7276 	BT_DBG("hcon %p", hcon);
7277 
7278 	if (!conn)
7279 		return HCI_ERROR_REMOTE_USER_TERM;
7280 	return conn->disc_reason;
7281 }
7282 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7283 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7284 {
7285 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7286 		return;
7287 
7288 	BT_DBG("hcon %p reason %d", hcon, reason);
7289 
7290 	l2cap_conn_del(hcon, bt_to_errno(reason));
7291 }
7292 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7293 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7294 {
7295 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7296 		return;
7297 
7298 	if (encrypt == 0x00) {
7299 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7300 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7301 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7302 			   chan->sec_level == BT_SECURITY_FIPS)
7303 			l2cap_chan_close(chan, ECONNREFUSED);
7304 	} else {
7305 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7306 			__clear_chan_timer(chan);
7307 	}
7308 }
7309 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7310 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7311 {
7312 	struct l2cap_conn *conn = hcon->l2cap_data;
7313 	struct l2cap_chan *chan;
7314 
7315 	if (!conn)
7316 		return;
7317 
7318 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7319 
7320 	mutex_lock(&conn->lock);
7321 
7322 	list_for_each_entry(chan, &conn->chan_l, list) {
7323 		l2cap_chan_lock(chan);
7324 
7325 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7326 		       state_to_string(chan->state));
7327 
7328 		if (!status && encrypt)
7329 			chan->sec_level = hcon->sec_level;
7330 
7331 		if (!__l2cap_no_conn_pending(chan)) {
7332 			l2cap_chan_unlock(chan);
7333 			continue;
7334 		}
7335 
7336 		if (!status && (chan->state == BT_CONNECTED ||
7337 				chan->state == BT_CONFIG)) {
7338 			chan->ops->resume(chan);
7339 			l2cap_check_encryption(chan, encrypt);
7340 			l2cap_chan_unlock(chan);
7341 			continue;
7342 		}
7343 
7344 		if (chan->state == BT_CONNECT) {
7345 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7346 				l2cap_start_connection(chan);
7347 			else
7348 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7349 		} else if (chan->state == BT_CONNECT2 &&
7350 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7351 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7352 			struct l2cap_conn_rsp rsp;
7353 			__u16 res, stat;
7354 
7355 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7356 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7357 					res = L2CAP_CR_PEND;
7358 					stat = L2CAP_CS_AUTHOR_PEND;
7359 					chan->ops->defer(chan);
7360 				} else {
7361 					l2cap_state_change(chan, BT_CONFIG);
7362 					res = L2CAP_CR_SUCCESS;
7363 					stat = L2CAP_CS_NO_INFO;
7364 				}
7365 			} else {
7366 				l2cap_state_change(chan, BT_DISCONN);
7367 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7368 				res = L2CAP_CR_SEC_BLOCK;
7369 				stat = L2CAP_CS_NO_INFO;
7370 			}
7371 
7372 			rsp.scid   = cpu_to_le16(chan->dcid);
7373 			rsp.dcid   = cpu_to_le16(chan->scid);
7374 			rsp.result = cpu_to_le16(res);
7375 			rsp.status = cpu_to_le16(stat);
7376 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7377 				       sizeof(rsp), &rsp);
7378 
7379 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7380 			    res == L2CAP_CR_SUCCESS) {
7381 				char buf[128];
7382 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7383 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7384 					       L2CAP_CONF_REQ,
7385 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7386 					       buf);
7387 				chan->num_conf_req++;
7388 			}
7389 		}
7390 
7391 		l2cap_chan_unlock(chan);
7392 	}
7393 
7394 	mutex_unlock(&conn->lock);
7395 }
7396 
7397 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7398 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7399 			   u16 len)
7400 {
7401 	if (!conn->rx_skb) {
7402 		/* Allocate skb for the complete frame (with header) */
7403 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7404 		if (!conn->rx_skb)
7405 			return -ENOMEM;
7406 		/* Init rx_len */
7407 		conn->rx_len = len;
7408 
7409 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7410 				      skb->tstamp_type);
7411 	}
7412 
7413 	/* Copy as much as the rx_skb can hold */
7414 	len = min_t(u16, len, skb->len);
7415 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7416 	skb_pull(skb, len);
7417 	conn->rx_len -= len;
7418 
7419 	return len;
7420 }
7421 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7422 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7423 {
7424 	struct sk_buff *rx_skb;
7425 	int len;
7426 
7427 	/* Append just enough to complete the header */
7428 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7429 
7430 	/* If header could not be read just continue */
7431 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7432 		return len;
7433 
7434 	rx_skb = conn->rx_skb;
7435 	len = get_unaligned_le16(rx_skb->data);
7436 
7437 	/* Check if rx_skb has enough space to received all fragments */
7438 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7439 		/* Update expected len */
7440 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7441 		return L2CAP_LEN_SIZE;
7442 	}
7443 
7444 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7445 	 * fit all fragments.
7446 	 */
7447 	conn->rx_skb = NULL;
7448 
7449 	/* Reallocates rx_skb using the exact expected length */
7450 	len = l2cap_recv_frag(conn, rx_skb,
7451 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7452 	kfree_skb(rx_skb);
7453 
7454 	return len;
7455 }
7456 
l2cap_recv_reset(struct l2cap_conn * conn)7457 static void l2cap_recv_reset(struct l2cap_conn *conn)
7458 {
7459 	kfree_skb(conn->rx_skb);
7460 	conn->rx_skb = NULL;
7461 	conn->rx_len = 0;
7462 }
7463 
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7464 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7465 {
7466 	if (!c)
7467 		return NULL;
7468 
7469 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7470 
7471 	if (!kref_get_unless_zero(&c->ref))
7472 		return NULL;
7473 
7474 	return c;
7475 }
7476 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7477 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7478 {
7479 	struct l2cap_conn *conn;
7480 	int len;
7481 
7482 	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7483 	hci_dev_lock(hcon->hdev);
7484 
7485 	conn = hcon->l2cap_data;
7486 
7487 	if (!conn)
7488 		conn = l2cap_conn_add(hcon);
7489 
7490 	conn = l2cap_conn_hold_unless_zero(conn);
7491 
7492 	hci_dev_unlock(hcon->hdev);
7493 
7494 	if (!conn) {
7495 		kfree_skb(skb);
7496 		return;
7497 	}
7498 
7499 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7500 
7501 	mutex_lock(&conn->lock);
7502 
7503 	switch (flags) {
7504 	case ACL_START:
7505 	case ACL_START_NO_FLUSH:
7506 	case ACL_COMPLETE:
7507 		if (conn->rx_skb) {
7508 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7509 			l2cap_recv_reset(conn);
7510 			l2cap_conn_unreliable(conn, ECOMM);
7511 		}
7512 
7513 		/* Start fragment may not contain the L2CAP length so just
7514 		 * copy the initial byte when that happens and use conn->mtu as
7515 		 * expected length.
7516 		 */
7517 		if (skb->len < L2CAP_LEN_SIZE) {
7518 			l2cap_recv_frag(conn, skb, conn->mtu);
7519 			break;
7520 		}
7521 
7522 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7523 
7524 		if (len == skb->len) {
7525 			/* Complete frame received */
7526 			l2cap_recv_frame(conn, skb);
7527 			goto unlock;
7528 		}
7529 
7530 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7531 
7532 		if (skb->len > len) {
7533 			BT_ERR("Frame is too long (len %u, expected len %d)",
7534 			       skb->len, len);
7535 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7536 			 * (Multiple Signaling Command in one PDU, Data
7537 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7538 			 * PDU Length set to 8 and Channel ID set to the
7539 			 * correct signaling channel for the logical link.
7540 			 * The Information payload contains one L2CAP_ECHO_REQ
7541 			 * packet with Data Length set to 0 with 0 octets of
7542 			 * echo data and one invalid command packet due to
7543 			 * data truncated in PDU but present in HCI packet.
7544 			 *
7545 			 * Shorter the socket buffer to the PDU length to
7546 			 * allow to process valid commands from the PDU before
7547 			 * setting the socket unreliable.
7548 			 */
7549 			skb->len = len;
7550 			l2cap_recv_frame(conn, skb);
7551 			l2cap_conn_unreliable(conn, ECOMM);
7552 			goto unlock;
7553 		}
7554 
7555 		/* Append fragment into frame (with header) */
7556 		if (l2cap_recv_frag(conn, skb, len) < 0)
7557 			goto drop;
7558 
7559 		break;
7560 
7561 	case ACL_CONT:
7562 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7563 
7564 		if (!conn->rx_skb) {
7565 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7566 			l2cap_conn_unreliable(conn, ECOMM);
7567 			goto drop;
7568 		}
7569 
7570 		/* Complete the L2CAP length if it has not been read */
7571 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7572 			if (l2cap_recv_len(conn, skb) < 0) {
7573 				l2cap_conn_unreliable(conn, ECOMM);
7574 				goto drop;
7575 			}
7576 
7577 			/* Header still could not be read just continue */
7578 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7579 				break;
7580 		}
7581 
7582 		if (skb->len > conn->rx_len) {
7583 			BT_ERR("Fragment is too long (len %u, expected %u)",
7584 			       skb->len, conn->rx_len);
7585 			l2cap_recv_reset(conn);
7586 			l2cap_conn_unreliable(conn, ECOMM);
7587 			goto drop;
7588 		}
7589 
7590 		/* Append fragment into frame (with header) */
7591 		l2cap_recv_frag(conn, skb, skb->len);
7592 
7593 		if (!conn->rx_len) {
7594 			/* Complete frame received. l2cap_recv_frame
7595 			 * takes ownership of the skb so set the global
7596 			 * rx_skb pointer to NULL first.
7597 			 */
7598 			struct sk_buff *rx_skb = conn->rx_skb;
7599 			conn->rx_skb = NULL;
7600 			l2cap_recv_frame(conn, rx_skb);
7601 		}
7602 		break;
7603 	}
7604 
7605 drop:
7606 	kfree_skb(skb);
7607 unlock:
7608 	mutex_unlock(&conn->lock);
7609 	l2cap_conn_put(conn);
7610 }
7611 
7612 static struct hci_cb l2cap_cb = {
7613 	.name		= "L2CAP",
7614 	.connect_cfm	= l2cap_connect_cfm,
7615 	.disconn_cfm	= l2cap_disconn_cfm,
7616 	.security_cfm	= l2cap_security_cfm,
7617 };
7618 
l2cap_debugfs_show(struct seq_file * f,void * p)7619 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7620 {
7621 	struct l2cap_chan *c;
7622 
7623 	read_lock(&chan_list_lock);
7624 
7625 	list_for_each_entry(c, &chan_list, global_l) {
7626 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7627 			   &c->src, c->src_type, &c->dst, c->dst_type,
7628 			   c->state, __le16_to_cpu(c->psm),
7629 			   c->scid, c->dcid, c->imtu, c->omtu,
7630 			   c->sec_level, c->mode);
7631 	}
7632 
7633 	read_unlock(&chan_list_lock);
7634 
7635 	return 0;
7636 }
7637 
7638 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7639 
7640 static struct dentry *l2cap_debugfs;
7641 
l2cap_init(void)7642 int __init l2cap_init(void)
7643 {
7644 	int err;
7645 
7646 	err = l2cap_init_sockets();
7647 	if (err < 0)
7648 		return err;
7649 
7650 	hci_register_cb(&l2cap_cb);
7651 
7652 	if (IS_ERR_OR_NULL(bt_debugfs))
7653 		return 0;
7654 
7655 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7656 					    NULL, &l2cap_debugfs_fops);
7657 
7658 	return 0;
7659 }
7660 
l2cap_exit(void)7661 void l2cap_exit(void)
7662 {
7663 	debugfs_remove(l2cap_debugfs);
7664 	hci_unregister_cb(&l2cap_cb);
7665 	l2cap_cleanup_sockets();
7666 }
7667 
7668 module_param(disable_ertm, bool, 0644);
7669 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7670 
7671 module_param(enable_ecred, bool, 0644);
7672 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7673