1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
124 if (c) {
125 /* Only lock if chan reference is not 0 */
126 c = l2cap_chan_hold_unless_zero(c);
127 if (c)
128 l2cap_chan_lock(c);
129 }
130 mutex_unlock(&conn->chan_lock);
131
132 return c;
133 }
134
135 /* Find channel with given DCID.
136 * Returns a reference locked channel.
137 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
139 u16 cid)
140 {
141 struct l2cap_chan *c;
142
143 mutex_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_dcid(conn, cid);
145 if (c) {
146 /* Only lock if chan reference is not 0 */
147 c = l2cap_chan_hold_unless_zero(c);
148 if (c)
149 l2cap_chan_lock(c);
150 }
151 mutex_unlock(&conn->chan_lock);
152
153 return c;
154 }
155
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
157 u8 ident)
158 {
159 struct l2cap_chan *c;
160
161 list_for_each_entry(c, &conn->chan_l, list) {
162 if (c->ident == ident)
163 return c;
164 }
165 return NULL;
166 }
167
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
169 u8 src_type)
170 {
171 struct l2cap_chan *c;
172
173 list_for_each_entry(c, &chan_list, global_l) {
174 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
175 continue;
176
177 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
178 continue;
179
180 if (c->sport == psm && !bacmp(&c->src, src))
181 return c;
182 }
183 return NULL;
184 }
185
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
187 {
188 int err;
189
190 write_lock(&chan_list_lock);
191
192 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
193 err = -EADDRINUSE;
194 goto done;
195 }
196
197 if (psm) {
198 chan->psm = psm;
199 chan->sport = psm;
200 err = 0;
201 } else {
202 u16 p, start, end, incr;
203
204 if (chan->src_type == BDADDR_BREDR) {
205 start = L2CAP_PSM_DYN_START;
206 end = L2CAP_PSM_AUTO_END;
207 incr = 2;
208 } else {
209 start = L2CAP_PSM_LE_DYN_START;
210 end = L2CAP_PSM_LE_DYN_END;
211 incr = 1;
212 }
213
214 err = -EINVAL;
215 for (p = start; p <= end; p += incr)
216 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
217 chan->src_type)) {
218 chan->psm = cpu_to_le16(p);
219 chan->sport = cpu_to_le16(p);
220 err = 0;
221 break;
222 }
223 }
224
225 done:
226 write_unlock(&chan_list_lock);
227 return err;
228 }
229 EXPORT_SYMBOL_GPL(l2cap_add_psm);
230
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)231 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
232 {
233 write_lock(&chan_list_lock);
234
235 /* Override the defaults (which are for conn-oriented) */
236 chan->omtu = L2CAP_DEFAULT_MTU;
237 chan->chan_type = L2CAP_CHAN_FIXED;
238
239 chan->scid = scid;
240
241 write_unlock(&chan_list_lock);
242
243 return 0;
244 }
245
l2cap_alloc_cid(struct l2cap_conn * conn)246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
247 {
248 u16 cid, dyn_end;
249
250 if (conn->hcon->type == LE_LINK)
251 dyn_end = L2CAP_CID_LE_DYN_END;
252 else
253 dyn_end = L2CAP_CID_DYN_END;
254
255 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
256 if (!__l2cap_get_chan_by_scid(conn, cid))
257 return cid;
258 }
259
260 return 0;
261 }
262
l2cap_state_change(struct l2cap_chan * chan,int state)263 static void l2cap_state_change(struct l2cap_chan *chan, int state)
264 {
265 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
266 state_to_string(state));
267
268 chan->state = state;
269 chan->ops->state_change(chan, state, 0);
270 }
271
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
273 int state, int err)
274 {
275 chan->state = state;
276 chan->ops->state_change(chan, chan->state, err);
277 }
278
l2cap_chan_set_err(struct l2cap_chan * chan,int err)279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
280 {
281 chan->ops->state_change(chan, chan->state, err);
282 }
283
__set_retrans_timer(struct l2cap_chan * chan)284 static void __set_retrans_timer(struct l2cap_chan *chan)
285 {
286 if (!delayed_work_pending(&chan->monitor_timer) &&
287 chan->retrans_timeout) {
288 l2cap_set_timer(chan, &chan->retrans_timer,
289 msecs_to_jiffies(chan->retrans_timeout));
290 }
291 }
292
__set_monitor_timer(struct l2cap_chan * chan)293 static void __set_monitor_timer(struct l2cap_chan *chan)
294 {
295 __clear_retrans_timer(chan);
296 if (chan->monitor_timeout) {
297 l2cap_set_timer(chan, &chan->monitor_timer,
298 msecs_to_jiffies(chan->monitor_timeout));
299 }
300 }
301
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
303 u16 seq)
304 {
305 struct sk_buff *skb;
306
307 skb_queue_walk(head, skb) {
308 if (bt_cb(skb)->l2cap.txseq == seq)
309 return skb;
310 }
311
312 return NULL;
313 }
314
315 /* ---- L2CAP sequence number lists ---- */
316
317 /* For ERTM, ordered lists of sequence numbers must be tracked for
318 * SREJ requests that are received and for frames that are to be
319 * retransmitted. These seq_list functions implement a singly-linked
320 * list in an array, where membership in the list can also be checked
321 * in constant time. Items can also be added to the tail of the list
322 * and removed from the head in constant time, without further memory
323 * allocs or frees.
324 */
325
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
327 {
328 size_t alloc_size, i;
329
330 /* Allocated size is a power of 2 to map sequence numbers
331 * (which may be up to 14 bits) in to a smaller array that is
332 * sized for the negotiated ERTM transmit windows.
333 */
334 alloc_size = roundup_pow_of_two(size);
335
336 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
337 if (!seq_list->list)
338 return -ENOMEM;
339
340 seq_list->mask = alloc_size - 1;
341 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
342 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 for (i = 0; i < alloc_size; i++)
344 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
345
346 return 0;
347 }
348
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
350 {
351 kfree(seq_list->list);
352 }
353
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 u16 seq)
356 {
357 /* Constant-time check for list membership */
358 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 }
360
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
362 {
363 u16 seq = seq_list->head;
364 u16 mask = seq_list->mask;
365
366 seq_list->head = seq_list->list[seq & mask];
367 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
368
369 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
370 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
371 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
372 }
373
374 return seq;
375 }
376
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
378 {
379 u16 i;
380
381 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 return;
383
384 for (i = 0; i <= seq_list->mask; i++)
385 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
386
387 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 }
390
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
392 {
393 u16 mask = seq_list->mask;
394
395 /* All appends happen in constant time */
396
397 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 return;
399
400 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 seq_list->head = seq;
402 else
403 seq_list->list[seq_list->tail & mask] = seq;
404
405 seq_list->tail = seq;
406 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 }
408
l2cap_chan_timeout(struct work_struct * work)409 static void l2cap_chan_timeout(struct work_struct *work)
410 {
411 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
412 chan_timer.work);
413 struct l2cap_conn *conn = chan->conn;
414 int reason;
415
416 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
417
418 mutex_lock(&conn->chan_lock);
419 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
420 * this work. No need to call l2cap_chan_hold(chan) here again.
421 */
422 l2cap_chan_lock(chan);
423
424 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
425 reason = ECONNREFUSED;
426 else if (chan->state == BT_CONNECT &&
427 chan->sec_level != BT_SECURITY_SDP)
428 reason = ECONNREFUSED;
429 else
430 reason = ETIMEDOUT;
431
432 l2cap_chan_close(chan, reason);
433
434 chan->ops->close(chan);
435
436 l2cap_chan_unlock(chan);
437 l2cap_chan_put(chan);
438
439 mutex_unlock(&conn->chan_lock);
440 }
441
l2cap_chan_create(void)442 struct l2cap_chan *l2cap_chan_create(void)
443 {
444 struct l2cap_chan *chan;
445
446 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
447 if (!chan)
448 return NULL;
449
450 skb_queue_head_init(&chan->tx_q);
451 skb_queue_head_init(&chan->srej_q);
452 mutex_init(&chan->lock);
453
454 /* Set default lock nesting level */
455 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
456
457 write_lock(&chan_list_lock);
458 list_add(&chan->global_l, &chan_list);
459 write_unlock(&chan_list_lock);
460
461 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
462 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
463 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
464 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
465
466 chan->state = BT_OPEN;
467
468 kref_init(&chan->kref);
469
470 /* This flag is cleared in l2cap_chan_ready() */
471 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
472
473 BT_DBG("chan %p", chan);
474
475 return chan;
476 }
477 EXPORT_SYMBOL_GPL(l2cap_chan_create);
478
l2cap_chan_destroy(struct kref * kref)479 static void l2cap_chan_destroy(struct kref *kref)
480 {
481 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
482
483 BT_DBG("chan %p", chan);
484
485 write_lock(&chan_list_lock);
486 list_del(&chan->global_l);
487 write_unlock(&chan_list_lock);
488
489 kfree(chan);
490 }
491
l2cap_chan_hold(struct l2cap_chan * c)492 void l2cap_chan_hold(struct l2cap_chan *c)
493 {
494 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
495
496 kref_get(&c->kref);
497 }
498
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)499 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
500 {
501 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
502
503 if (!kref_get_unless_zero(&c->kref))
504 return NULL;
505
506 return c;
507 }
508
l2cap_chan_put(struct l2cap_chan * c)509 void l2cap_chan_put(struct l2cap_chan *c)
510 {
511 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
512
513 kref_put(&c->kref, l2cap_chan_destroy);
514 }
515 EXPORT_SYMBOL_GPL(l2cap_chan_put);
516
l2cap_chan_set_defaults(struct l2cap_chan * chan)517 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
518 {
519 chan->fcs = L2CAP_FCS_CRC16;
520 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
521 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
522 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
523 chan->remote_max_tx = chan->max_tx;
524 chan->remote_tx_win = chan->tx_win;
525 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
526 chan->sec_level = BT_SECURITY_LOW;
527 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
528 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
529 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
530
531 chan->conf_state = 0;
532 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
533
534 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
535 }
536 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
537
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)538 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
539 {
540 chan->sdu = NULL;
541 chan->sdu_last_frag = NULL;
542 chan->sdu_len = 0;
543 chan->tx_credits = tx_credits;
544 /* Derive MPS from connection MTU to stop HCI fragmentation */
545 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
546 /* Give enough credits for a full packet */
547 chan->rx_credits = (chan->imtu / chan->mps) + 1;
548
549 skb_queue_head_init(&chan->tx_q);
550 }
551
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)552 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
553 {
554 l2cap_le_flowctl_init(chan, tx_credits);
555
556 /* L2CAP implementations shall support a minimum MPS of 64 octets */
557 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
558 chan->mps = L2CAP_ECRED_MIN_MPS;
559 chan->rx_credits = (chan->imtu / chan->mps) + 1;
560 }
561 }
562
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)563 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
564 {
565 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
566 __le16_to_cpu(chan->psm), chan->dcid);
567
568 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
569
570 chan->conn = conn;
571
572 switch (chan->chan_type) {
573 case L2CAP_CHAN_CONN_ORIENTED:
574 /* Alloc CID for connection-oriented socket */
575 chan->scid = l2cap_alloc_cid(conn);
576 if (conn->hcon->type == ACL_LINK)
577 chan->omtu = L2CAP_DEFAULT_MTU;
578 break;
579
580 case L2CAP_CHAN_CONN_LESS:
581 /* Connectionless socket */
582 chan->scid = L2CAP_CID_CONN_LESS;
583 chan->dcid = L2CAP_CID_CONN_LESS;
584 chan->omtu = L2CAP_DEFAULT_MTU;
585 break;
586
587 case L2CAP_CHAN_FIXED:
588 /* Caller will set CID and CID specific MTU values */
589 break;
590
591 default:
592 /* Raw socket can send/recv signalling messages only */
593 chan->scid = L2CAP_CID_SIGNALING;
594 chan->dcid = L2CAP_CID_SIGNALING;
595 chan->omtu = L2CAP_DEFAULT_MTU;
596 }
597
598 chan->local_id = L2CAP_BESTEFFORT_ID;
599 chan->local_stype = L2CAP_SERV_BESTEFFORT;
600 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
601 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
602 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
603 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
604
605 l2cap_chan_hold(chan);
606
607 /* Only keep a reference for fixed channels if they requested it */
608 if (chan->chan_type != L2CAP_CHAN_FIXED ||
609 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
610 hci_conn_hold(conn->hcon);
611
612 list_add(&chan->list, &conn->chan_l);
613 }
614
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)615 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
616 {
617 mutex_lock(&conn->chan_lock);
618 __l2cap_chan_add(conn, chan);
619 mutex_unlock(&conn->chan_lock);
620 }
621
l2cap_chan_del(struct l2cap_chan * chan,int err)622 void l2cap_chan_del(struct l2cap_chan *chan, int err)
623 {
624 struct l2cap_conn *conn = chan->conn;
625
626 __clear_chan_timer(chan);
627
628 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
629 state_to_string(chan->state));
630
631 chan->ops->teardown(chan, err);
632
633 if (conn) {
634 /* Delete from channel list */
635 list_del(&chan->list);
636
637 l2cap_chan_put(chan);
638
639 chan->conn = NULL;
640
641 /* Reference was only held for non-fixed channels or
642 * fixed channels that explicitly requested it using the
643 * FLAG_HOLD_HCI_CONN flag.
644 */
645 if (chan->chan_type != L2CAP_CHAN_FIXED ||
646 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
647 hci_conn_drop(conn->hcon);
648 }
649
650 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
651 return;
652
653 switch (chan->mode) {
654 case L2CAP_MODE_BASIC:
655 break;
656
657 case L2CAP_MODE_LE_FLOWCTL:
658 case L2CAP_MODE_EXT_FLOWCTL:
659 skb_queue_purge(&chan->tx_q);
660 break;
661
662 case L2CAP_MODE_ERTM:
663 __clear_retrans_timer(chan);
664 __clear_monitor_timer(chan);
665 __clear_ack_timer(chan);
666
667 skb_queue_purge(&chan->srej_q);
668
669 l2cap_seq_list_free(&chan->srej_list);
670 l2cap_seq_list_free(&chan->retrans_list);
671 fallthrough;
672
673 case L2CAP_MODE_STREAMING:
674 skb_queue_purge(&chan->tx_q);
675 break;
676 }
677 }
678 EXPORT_SYMBOL_GPL(l2cap_chan_del);
679
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)680 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
681 l2cap_chan_func_t func, void *data)
682 {
683 struct l2cap_chan *chan, *l;
684
685 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
686 if (chan->ident == id)
687 func(chan, data);
688 }
689 }
690
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)691 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
692 void *data)
693 {
694 struct l2cap_chan *chan;
695
696 list_for_each_entry(chan, &conn->chan_l, list) {
697 func(chan, data);
698 }
699 }
700
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)701 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
702 void *data)
703 {
704 if (!conn)
705 return;
706
707 mutex_lock(&conn->chan_lock);
708 __l2cap_chan_list(conn, func, data);
709 mutex_unlock(&conn->chan_lock);
710 }
711
712 EXPORT_SYMBOL_GPL(l2cap_chan_list);
713
l2cap_conn_update_id_addr(struct work_struct * work)714 static void l2cap_conn_update_id_addr(struct work_struct *work)
715 {
716 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
717 id_addr_timer.work);
718 struct hci_conn *hcon = conn->hcon;
719 struct l2cap_chan *chan;
720
721 mutex_lock(&conn->chan_lock);
722
723 list_for_each_entry(chan, &conn->chan_l, list) {
724 l2cap_chan_lock(chan);
725 bacpy(&chan->dst, &hcon->dst);
726 chan->dst_type = bdaddr_dst_type(hcon);
727 l2cap_chan_unlock(chan);
728 }
729
730 mutex_unlock(&conn->chan_lock);
731 }
732
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)733 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
734 {
735 struct l2cap_conn *conn = chan->conn;
736 struct l2cap_le_conn_rsp rsp;
737 u16 result;
738
739 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
740 result = L2CAP_CR_LE_AUTHORIZATION;
741 else
742 result = L2CAP_CR_LE_BAD_PSM;
743
744 l2cap_state_change(chan, BT_DISCONN);
745
746 rsp.dcid = cpu_to_le16(chan->scid);
747 rsp.mtu = cpu_to_le16(chan->imtu);
748 rsp.mps = cpu_to_le16(chan->mps);
749 rsp.credits = cpu_to_le16(chan->rx_credits);
750 rsp.result = cpu_to_le16(result);
751
752 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
753 &rsp);
754 }
755
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)756 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
757 {
758 l2cap_state_change(chan, BT_DISCONN);
759
760 __l2cap_ecred_conn_rsp_defer(chan);
761 }
762
l2cap_chan_connect_reject(struct l2cap_chan * chan)763 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
764 {
765 struct l2cap_conn *conn = chan->conn;
766 struct l2cap_conn_rsp rsp;
767 u16 result;
768
769 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
770 result = L2CAP_CR_SEC_BLOCK;
771 else
772 result = L2CAP_CR_BAD_PSM;
773
774 l2cap_state_change(chan, BT_DISCONN);
775
776 rsp.scid = cpu_to_le16(chan->dcid);
777 rsp.dcid = cpu_to_le16(chan->scid);
778 rsp.result = cpu_to_le16(result);
779 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
780
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
782 }
783
l2cap_chan_close(struct l2cap_chan * chan,int reason)784 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
785 {
786 struct l2cap_conn *conn = chan->conn;
787
788 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
789
790 switch (chan->state) {
791 case BT_LISTEN:
792 chan->ops->teardown(chan, 0);
793 break;
794
795 case BT_CONNECTED:
796 case BT_CONFIG:
797 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
798 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
799 l2cap_send_disconn_req(chan, reason);
800 } else
801 l2cap_chan_del(chan, reason);
802 break;
803
804 case BT_CONNECT2:
805 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
806 if (conn->hcon->type == ACL_LINK)
807 l2cap_chan_connect_reject(chan);
808 else if (conn->hcon->type == LE_LINK) {
809 switch (chan->mode) {
810 case L2CAP_MODE_LE_FLOWCTL:
811 l2cap_chan_le_connect_reject(chan);
812 break;
813 case L2CAP_MODE_EXT_FLOWCTL:
814 l2cap_chan_ecred_connect_reject(chan);
815 return;
816 }
817 }
818 }
819
820 l2cap_chan_del(chan, reason);
821 break;
822
823 case BT_CONNECT:
824 case BT_DISCONN:
825 l2cap_chan_del(chan, reason);
826 break;
827
828 default:
829 chan->ops->teardown(chan, 0);
830 break;
831 }
832 }
833 EXPORT_SYMBOL(l2cap_chan_close);
834
l2cap_get_auth_type(struct l2cap_chan * chan)835 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
836 {
837 switch (chan->chan_type) {
838 case L2CAP_CHAN_RAW:
839 switch (chan->sec_level) {
840 case BT_SECURITY_HIGH:
841 case BT_SECURITY_FIPS:
842 return HCI_AT_DEDICATED_BONDING_MITM;
843 case BT_SECURITY_MEDIUM:
844 return HCI_AT_DEDICATED_BONDING;
845 default:
846 return HCI_AT_NO_BONDING;
847 }
848 break;
849 case L2CAP_CHAN_CONN_LESS:
850 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
851 if (chan->sec_level == BT_SECURITY_LOW)
852 chan->sec_level = BT_SECURITY_SDP;
853 }
854 if (chan->sec_level == BT_SECURITY_HIGH ||
855 chan->sec_level == BT_SECURITY_FIPS)
856 return HCI_AT_NO_BONDING_MITM;
857 else
858 return HCI_AT_NO_BONDING;
859 break;
860 case L2CAP_CHAN_CONN_ORIENTED:
861 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
862 if (chan->sec_level == BT_SECURITY_LOW)
863 chan->sec_level = BT_SECURITY_SDP;
864
865 if (chan->sec_level == BT_SECURITY_HIGH ||
866 chan->sec_level == BT_SECURITY_FIPS)
867 return HCI_AT_NO_BONDING_MITM;
868 else
869 return HCI_AT_NO_BONDING;
870 }
871 fallthrough;
872
873 default:
874 switch (chan->sec_level) {
875 case BT_SECURITY_HIGH:
876 case BT_SECURITY_FIPS:
877 return HCI_AT_GENERAL_BONDING_MITM;
878 case BT_SECURITY_MEDIUM:
879 return HCI_AT_GENERAL_BONDING;
880 default:
881 return HCI_AT_NO_BONDING;
882 }
883 break;
884 }
885 }
886
887 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)888 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
889 {
890 struct l2cap_conn *conn = chan->conn;
891 __u8 auth_type;
892
893 if (conn->hcon->type == LE_LINK)
894 return smp_conn_security(conn->hcon, chan->sec_level);
895
896 auth_type = l2cap_get_auth_type(chan);
897
898 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
899 initiator);
900 }
901
l2cap_get_ident(struct l2cap_conn * conn)902 static u8 l2cap_get_ident(struct l2cap_conn *conn)
903 {
904 u8 id;
905
906 /* Get next available identificator.
907 * 1 - 128 are used by kernel.
908 * 129 - 199 are reserved.
909 * 200 - 254 are used by utilities like l2ping, etc.
910 */
911
912 mutex_lock(&conn->ident_lock);
913
914 if (++conn->tx_ident > 128)
915 conn->tx_ident = 1;
916
917 id = conn->tx_ident;
918
919 mutex_unlock(&conn->ident_lock);
920
921 return id;
922 }
923
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)924 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
925 void *data)
926 {
927 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
928 u8 flags;
929
930 BT_DBG("code 0x%2.2x", code);
931
932 if (!skb)
933 return;
934
935 /* Use NO_FLUSH if supported or we have an LE link (which does
936 * not support auto-flushing packets) */
937 if (lmp_no_flush_capable(conn->hcon->hdev) ||
938 conn->hcon->type == LE_LINK)
939 flags = ACL_START_NO_FLUSH;
940 else
941 flags = ACL_START;
942
943 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
944 skb->priority = HCI_PRIO_MAX;
945
946 hci_send_acl(conn->hchan, skb, flags);
947 }
948
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)949 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
950 {
951 struct hci_conn *hcon = chan->conn->hcon;
952 u16 flags;
953
954 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
955 skb->priority);
956
957 /* Use NO_FLUSH for LE links (where this is the only option) or
958 * if the BR/EDR link supports it and flushing has not been
959 * explicitly requested (through FLAG_FLUSHABLE).
960 */
961 if (hcon->type == LE_LINK ||
962 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
963 lmp_no_flush_capable(hcon->hdev)))
964 flags = ACL_START_NO_FLUSH;
965 else
966 flags = ACL_START;
967
968 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
969 hci_send_acl(chan->conn->hchan, skb, flags);
970 }
971
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)972 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
973 {
974 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
975 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
976
977 if (enh & L2CAP_CTRL_FRAME_TYPE) {
978 /* S-Frame */
979 control->sframe = 1;
980 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
981 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
982
983 control->sar = 0;
984 control->txseq = 0;
985 } else {
986 /* I-Frame */
987 control->sframe = 0;
988 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
989 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
990
991 control->poll = 0;
992 control->super = 0;
993 }
994 }
995
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)996 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
997 {
998 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
999 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1000
1001 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1002 /* S-Frame */
1003 control->sframe = 1;
1004 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1005 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1006
1007 control->sar = 0;
1008 control->txseq = 0;
1009 } else {
1010 /* I-Frame */
1011 control->sframe = 0;
1012 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1013 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1014
1015 control->poll = 0;
1016 control->super = 0;
1017 }
1018 }
1019
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1020 static inline void __unpack_control(struct l2cap_chan *chan,
1021 struct sk_buff *skb)
1022 {
1023 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1024 __unpack_extended_control(get_unaligned_le32(skb->data),
1025 &bt_cb(skb)->l2cap);
1026 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1027 } else {
1028 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1029 &bt_cb(skb)->l2cap);
1030 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1031 }
1032 }
1033
__pack_extended_control(struct l2cap_ctrl * control)1034 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1035 {
1036 u32 packed;
1037
1038 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1039 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1040
1041 if (control->sframe) {
1042 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1043 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1044 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1045 } else {
1046 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1047 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048 }
1049
1050 return packed;
1051 }
1052
__pack_enhanced_control(struct l2cap_ctrl * control)1053 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1054 {
1055 u16 packed;
1056
1057 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1058 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1059
1060 if (control->sframe) {
1061 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1062 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1063 packed |= L2CAP_CTRL_FRAME_TYPE;
1064 } else {
1065 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1066 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1067 }
1068
1069 return packed;
1070 }
1071
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1072 static inline void __pack_control(struct l2cap_chan *chan,
1073 struct l2cap_ctrl *control,
1074 struct sk_buff *skb)
1075 {
1076 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1077 put_unaligned_le32(__pack_extended_control(control),
1078 skb->data + L2CAP_HDR_SIZE);
1079 } else {
1080 put_unaligned_le16(__pack_enhanced_control(control),
1081 skb->data + L2CAP_HDR_SIZE);
1082 }
1083 }
1084
__ertm_hdr_size(struct l2cap_chan * chan)1085 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1086 {
1087 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1088 return L2CAP_EXT_HDR_SIZE;
1089 else
1090 return L2CAP_ENH_HDR_SIZE;
1091 }
1092
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1093 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1094 u32 control)
1095 {
1096 struct sk_buff *skb;
1097 struct l2cap_hdr *lh;
1098 int hlen = __ertm_hdr_size(chan);
1099
1100 if (chan->fcs == L2CAP_FCS_CRC16)
1101 hlen += L2CAP_FCS_SIZE;
1102
1103 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1104
1105 if (!skb)
1106 return ERR_PTR(-ENOMEM);
1107
1108 lh = skb_put(skb, L2CAP_HDR_SIZE);
1109 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1110 lh->cid = cpu_to_le16(chan->dcid);
1111
1112 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1113 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1114 else
1115 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1116
1117 if (chan->fcs == L2CAP_FCS_CRC16) {
1118 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1119 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1120 }
1121
1122 skb->priority = HCI_PRIO_MAX;
1123 return skb;
1124 }
1125
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1126 static void l2cap_send_sframe(struct l2cap_chan *chan,
1127 struct l2cap_ctrl *control)
1128 {
1129 struct sk_buff *skb;
1130 u32 control_field;
1131
1132 BT_DBG("chan %p, control %p", chan, control);
1133
1134 if (!control->sframe)
1135 return;
1136
1137 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1138 !control->poll)
1139 control->final = 1;
1140
1141 if (control->super == L2CAP_SUPER_RR)
1142 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1143 else if (control->super == L2CAP_SUPER_RNR)
1144 set_bit(CONN_RNR_SENT, &chan->conn_state);
1145
1146 if (control->super != L2CAP_SUPER_SREJ) {
1147 chan->last_acked_seq = control->reqseq;
1148 __clear_ack_timer(chan);
1149 }
1150
1151 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1152 control->final, control->poll, control->super);
1153
1154 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1155 control_field = __pack_extended_control(control);
1156 else
1157 control_field = __pack_enhanced_control(control);
1158
1159 skb = l2cap_create_sframe_pdu(chan, control_field);
1160 if (!IS_ERR(skb))
1161 l2cap_do_send(chan, skb);
1162 }
1163
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1164 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1165 {
1166 struct l2cap_ctrl control;
1167
1168 BT_DBG("chan %p, poll %d", chan, poll);
1169
1170 memset(&control, 0, sizeof(control));
1171 control.sframe = 1;
1172 control.poll = poll;
1173
1174 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1175 control.super = L2CAP_SUPER_RNR;
1176 else
1177 control.super = L2CAP_SUPER_RR;
1178
1179 control.reqseq = chan->buffer_seq;
1180 l2cap_send_sframe(chan, &control);
1181 }
1182
__l2cap_no_conn_pending(struct l2cap_chan * chan)1183 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1184 {
1185 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1186 return true;
1187
1188 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1189 }
1190
l2cap_send_conn_req(struct l2cap_chan * chan)1191 void l2cap_send_conn_req(struct l2cap_chan *chan)
1192 {
1193 struct l2cap_conn *conn = chan->conn;
1194 struct l2cap_conn_req req;
1195
1196 req.scid = cpu_to_le16(chan->scid);
1197 req.psm = chan->psm;
1198
1199 chan->ident = l2cap_get_ident(conn);
1200
1201 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1202
1203 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1204 }
1205
l2cap_chan_ready(struct l2cap_chan * chan)1206 static void l2cap_chan_ready(struct l2cap_chan *chan)
1207 {
1208 /* The channel may have already been flagged as connected in
1209 * case of receiving data before the L2CAP info req/rsp
1210 * procedure is complete.
1211 */
1212 if (chan->state == BT_CONNECTED)
1213 return;
1214
1215 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1216 chan->conf_state = 0;
1217 __clear_chan_timer(chan);
1218
1219 switch (chan->mode) {
1220 case L2CAP_MODE_LE_FLOWCTL:
1221 case L2CAP_MODE_EXT_FLOWCTL:
1222 if (!chan->tx_credits)
1223 chan->ops->suspend(chan);
1224 break;
1225 }
1226
1227 chan->state = BT_CONNECTED;
1228
1229 chan->ops->ready(chan);
1230 }
1231
l2cap_le_connect(struct l2cap_chan * chan)1232 static void l2cap_le_connect(struct l2cap_chan *chan)
1233 {
1234 struct l2cap_conn *conn = chan->conn;
1235 struct l2cap_le_conn_req req;
1236
1237 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1238 return;
1239
1240 if (!chan->imtu)
1241 chan->imtu = chan->conn->mtu;
1242
1243 l2cap_le_flowctl_init(chan, 0);
1244
1245 memset(&req, 0, sizeof(req));
1246 req.psm = chan->psm;
1247 req.scid = cpu_to_le16(chan->scid);
1248 req.mtu = cpu_to_le16(chan->imtu);
1249 req.mps = cpu_to_le16(chan->mps);
1250 req.credits = cpu_to_le16(chan->rx_credits);
1251
1252 chan->ident = l2cap_get_ident(conn);
1253
1254 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1255 sizeof(req), &req);
1256 }
1257
1258 struct l2cap_ecred_conn_data {
1259 struct {
1260 struct l2cap_ecred_conn_req req;
1261 __le16 scid[5];
1262 } __packed pdu;
1263 struct l2cap_chan *chan;
1264 struct pid *pid;
1265 int count;
1266 };
1267
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1268 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1269 {
1270 struct l2cap_ecred_conn_data *conn = data;
1271 struct pid *pid;
1272
1273 if (chan == conn->chan)
1274 return;
1275
1276 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1277 return;
1278
1279 pid = chan->ops->get_peer_pid(chan);
1280
1281 /* Only add deferred channels with the same PID/PSM */
1282 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1283 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1284 return;
1285
1286 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1287 return;
1288
1289 l2cap_ecred_init(chan, 0);
1290
1291 /* Set the same ident so we can match on the rsp */
1292 chan->ident = conn->chan->ident;
1293
1294 /* Include all channels deferred */
1295 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1296
1297 conn->count++;
1298 }
1299
l2cap_ecred_connect(struct l2cap_chan * chan)1300 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1301 {
1302 struct l2cap_conn *conn = chan->conn;
1303 struct l2cap_ecred_conn_data data;
1304
1305 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1306 return;
1307
1308 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1309 return;
1310
1311 l2cap_ecred_init(chan, 0);
1312
1313 memset(&data, 0, sizeof(data));
1314 data.pdu.req.psm = chan->psm;
1315 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1316 data.pdu.req.mps = cpu_to_le16(chan->mps);
1317 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1318 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1319
1320 chan->ident = l2cap_get_ident(conn);
1321
1322 data.count = 1;
1323 data.chan = chan;
1324 data.pid = chan->ops->get_peer_pid(chan);
1325
1326 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1327
1328 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1329 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1330 &data.pdu);
1331 }
1332
l2cap_le_start(struct l2cap_chan * chan)1333 static void l2cap_le_start(struct l2cap_chan *chan)
1334 {
1335 struct l2cap_conn *conn = chan->conn;
1336
1337 if (!smp_conn_security(conn->hcon, chan->sec_level))
1338 return;
1339
1340 if (!chan->psm) {
1341 l2cap_chan_ready(chan);
1342 return;
1343 }
1344
1345 if (chan->state == BT_CONNECT) {
1346 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1347 l2cap_ecred_connect(chan);
1348 else
1349 l2cap_le_connect(chan);
1350 }
1351 }
1352
l2cap_start_connection(struct l2cap_chan * chan)1353 static void l2cap_start_connection(struct l2cap_chan *chan)
1354 {
1355 if (chan->conn->hcon->type == LE_LINK) {
1356 l2cap_le_start(chan);
1357 } else {
1358 l2cap_send_conn_req(chan);
1359 }
1360 }
1361
l2cap_request_info(struct l2cap_conn * conn)1362 static void l2cap_request_info(struct l2cap_conn *conn)
1363 {
1364 struct l2cap_info_req req;
1365
1366 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1367 return;
1368
1369 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1370
1371 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1372 conn->info_ident = l2cap_get_ident(conn);
1373
1374 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1375
1376 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1377 sizeof(req), &req);
1378 }
1379
l2cap_check_enc_key_size(struct hci_conn * hcon)1380 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1381 {
1382 /* The minimum encryption key size needs to be enforced by the
1383 * host stack before establishing any L2CAP connections. The
1384 * specification in theory allows a minimum of 1, but to align
1385 * BR/EDR and LE transports, a minimum of 7 is chosen.
1386 *
1387 * This check might also be called for unencrypted connections
1388 * that have no key size requirements. Ensure that the link is
1389 * actually encrypted before enforcing a key size.
1390 */
1391 int min_key_size = hcon->hdev->min_enc_key_size;
1392
1393 /* On FIPS security level, key size must be 16 bytes */
1394 if (hcon->sec_level == BT_SECURITY_FIPS)
1395 min_key_size = 16;
1396
1397 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1398 hcon->enc_key_size >= min_key_size);
1399 }
1400
l2cap_do_start(struct l2cap_chan * chan)1401 static void l2cap_do_start(struct l2cap_chan *chan)
1402 {
1403 struct l2cap_conn *conn = chan->conn;
1404
1405 if (conn->hcon->type == LE_LINK) {
1406 l2cap_le_start(chan);
1407 return;
1408 }
1409
1410 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1411 l2cap_request_info(conn);
1412 return;
1413 }
1414
1415 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1416 return;
1417
1418 if (!l2cap_chan_check_security(chan, true) ||
1419 !__l2cap_no_conn_pending(chan))
1420 return;
1421
1422 if (l2cap_check_enc_key_size(conn->hcon))
1423 l2cap_start_connection(chan);
1424 else
1425 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1426 }
1427
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1428 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1429 {
1430 u32 local_feat_mask = l2cap_feat_mask;
1431 if (!disable_ertm)
1432 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1433
1434 switch (mode) {
1435 case L2CAP_MODE_ERTM:
1436 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1437 case L2CAP_MODE_STREAMING:
1438 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1439 default:
1440 return 0x00;
1441 }
1442 }
1443
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1444 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1445 {
1446 struct l2cap_conn *conn = chan->conn;
1447 struct l2cap_disconn_req req;
1448
1449 if (!conn)
1450 return;
1451
1452 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1453 __clear_retrans_timer(chan);
1454 __clear_monitor_timer(chan);
1455 __clear_ack_timer(chan);
1456 }
1457
1458 req.dcid = cpu_to_le16(chan->dcid);
1459 req.scid = cpu_to_le16(chan->scid);
1460 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1461 sizeof(req), &req);
1462
1463 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1464 }
1465
1466 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1467 static void l2cap_conn_start(struct l2cap_conn *conn)
1468 {
1469 struct l2cap_chan *chan, *tmp;
1470
1471 BT_DBG("conn %p", conn);
1472
1473 mutex_lock(&conn->chan_lock);
1474
1475 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1476 l2cap_chan_lock(chan);
1477
1478 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1479 l2cap_chan_ready(chan);
1480 l2cap_chan_unlock(chan);
1481 continue;
1482 }
1483
1484 if (chan->state == BT_CONNECT) {
1485 if (!l2cap_chan_check_security(chan, true) ||
1486 !__l2cap_no_conn_pending(chan)) {
1487 l2cap_chan_unlock(chan);
1488 continue;
1489 }
1490
1491 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1492 && test_bit(CONF_STATE2_DEVICE,
1493 &chan->conf_state)) {
1494 l2cap_chan_close(chan, ECONNRESET);
1495 l2cap_chan_unlock(chan);
1496 continue;
1497 }
1498
1499 if (l2cap_check_enc_key_size(conn->hcon))
1500 l2cap_start_connection(chan);
1501 else
1502 l2cap_chan_close(chan, ECONNREFUSED);
1503
1504 } else if (chan->state == BT_CONNECT2) {
1505 struct l2cap_conn_rsp rsp;
1506 char buf[128];
1507 rsp.scid = cpu_to_le16(chan->dcid);
1508 rsp.dcid = cpu_to_le16(chan->scid);
1509
1510 if (l2cap_chan_check_security(chan, false)) {
1511 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1512 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1513 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1514 chan->ops->defer(chan);
1515
1516 } else {
1517 l2cap_state_change(chan, BT_CONFIG);
1518 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1519 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1520 }
1521 } else {
1522 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1523 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1524 }
1525
1526 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1527 sizeof(rsp), &rsp);
1528
1529 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1530 rsp.result != L2CAP_CR_SUCCESS) {
1531 l2cap_chan_unlock(chan);
1532 continue;
1533 }
1534
1535 set_bit(CONF_REQ_SENT, &chan->conf_state);
1536 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1537 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1538 chan->num_conf_req++;
1539 }
1540
1541 l2cap_chan_unlock(chan);
1542 }
1543
1544 mutex_unlock(&conn->chan_lock);
1545 }
1546
l2cap_le_conn_ready(struct l2cap_conn * conn)1547 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1548 {
1549 struct hci_conn *hcon = conn->hcon;
1550 struct hci_dev *hdev = hcon->hdev;
1551
1552 BT_DBG("%s conn %p", hdev->name, conn);
1553
1554 /* For outgoing pairing which doesn't necessarily have an
1555 * associated socket (e.g. mgmt_pair_device).
1556 */
1557 if (hcon->out)
1558 smp_conn_security(hcon, hcon->pending_sec_level);
1559
1560 /* For LE peripheral connections, make sure the connection interval
1561 * is in the range of the minimum and maximum interval that has
1562 * been configured for this connection. If not, then trigger
1563 * the connection update procedure.
1564 */
1565 if (hcon->role == HCI_ROLE_SLAVE &&
1566 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1567 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1568 struct l2cap_conn_param_update_req req;
1569
1570 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1571 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1572 req.latency = cpu_to_le16(hcon->le_conn_latency);
1573 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1574
1575 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1576 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1577 }
1578 }
1579
l2cap_conn_ready(struct l2cap_conn * conn)1580 static void l2cap_conn_ready(struct l2cap_conn *conn)
1581 {
1582 struct l2cap_chan *chan;
1583 struct hci_conn *hcon = conn->hcon;
1584
1585 BT_DBG("conn %p", conn);
1586
1587 if (hcon->type == ACL_LINK)
1588 l2cap_request_info(conn);
1589
1590 mutex_lock(&conn->chan_lock);
1591
1592 list_for_each_entry(chan, &conn->chan_l, list) {
1593
1594 l2cap_chan_lock(chan);
1595
1596 if (hcon->type == LE_LINK) {
1597 l2cap_le_start(chan);
1598 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1599 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1600 l2cap_chan_ready(chan);
1601 } else if (chan->state == BT_CONNECT) {
1602 l2cap_do_start(chan);
1603 }
1604
1605 l2cap_chan_unlock(chan);
1606 }
1607
1608 mutex_unlock(&conn->chan_lock);
1609
1610 if (hcon->type == LE_LINK)
1611 l2cap_le_conn_ready(conn);
1612
1613 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1614 }
1615
1616 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1617 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1618 {
1619 struct l2cap_chan *chan;
1620
1621 BT_DBG("conn %p", conn);
1622
1623 mutex_lock(&conn->chan_lock);
1624
1625 list_for_each_entry(chan, &conn->chan_l, list) {
1626 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1627 l2cap_chan_set_err(chan, err);
1628 }
1629
1630 mutex_unlock(&conn->chan_lock);
1631 }
1632
l2cap_info_timeout(struct work_struct * work)1633 static void l2cap_info_timeout(struct work_struct *work)
1634 {
1635 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1636 info_timer.work);
1637
1638 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1639 conn->info_ident = 0;
1640
1641 l2cap_conn_start(conn);
1642 }
1643
1644 /*
1645 * l2cap_user
1646 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1647 * callback is called during registration. The ->remove callback is called
1648 * during unregistration.
1649 * An l2cap_user object can either be explicitly unregistered or when the
1650 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1651 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1652 * External modules must own a reference to the l2cap_conn object if they intend
1653 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1654 * any time if they don't.
1655 */
1656
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1657 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1658 {
1659 struct hci_dev *hdev = conn->hcon->hdev;
1660 int ret;
1661
1662 /* We need to check whether l2cap_conn is registered. If it is not, we
1663 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1664 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1665 * relies on the parent hci_conn object to be locked. This itself relies
1666 * on the hci_dev object to be locked. So we must lock the hci device
1667 * here, too. */
1668
1669 hci_dev_lock(hdev);
1670
1671 if (!list_empty(&user->list)) {
1672 ret = -EINVAL;
1673 goto out_unlock;
1674 }
1675
1676 /* conn->hchan is NULL after l2cap_conn_del() was called */
1677 if (!conn->hchan) {
1678 ret = -ENODEV;
1679 goto out_unlock;
1680 }
1681
1682 ret = user->probe(conn, user);
1683 if (ret)
1684 goto out_unlock;
1685
1686 list_add(&user->list, &conn->users);
1687 ret = 0;
1688
1689 out_unlock:
1690 hci_dev_unlock(hdev);
1691 return ret;
1692 }
1693 EXPORT_SYMBOL(l2cap_register_user);
1694
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1695 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1696 {
1697 struct hci_dev *hdev = conn->hcon->hdev;
1698
1699 hci_dev_lock(hdev);
1700
1701 if (list_empty(&user->list))
1702 goto out_unlock;
1703
1704 list_del_init(&user->list);
1705 user->remove(conn, user);
1706
1707 out_unlock:
1708 hci_dev_unlock(hdev);
1709 }
1710 EXPORT_SYMBOL(l2cap_unregister_user);
1711
l2cap_unregister_all_users(struct l2cap_conn * conn)1712 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1713 {
1714 struct l2cap_user *user;
1715
1716 while (!list_empty(&conn->users)) {
1717 user = list_first_entry(&conn->users, struct l2cap_user, list);
1718 list_del_init(&user->list);
1719 user->remove(conn, user);
1720 }
1721 }
1722
l2cap_conn_del(struct hci_conn * hcon,int err)1723 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1724 {
1725 struct l2cap_conn *conn = hcon->l2cap_data;
1726 struct l2cap_chan *chan, *l;
1727
1728 if (!conn)
1729 return;
1730
1731 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1732
1733 kfree_skb(conn->rx_skb);
1734
1735 skb_queue_purge(&conn->pending_rx);
1736
1737 /* We can not call flush_work(&conn->pending_rx_work) here since we
1738 * might block if we are running on a worker from the same workqueue
1739 * pending_rx_work is waiting on.
1740 */
1741 if (work_pending(&conn->pending_rx_work))
1742 cancel_work_sync(&conn->pending_rx_work);
1743
1744 cancel_delayed_work_sync(&conn->id_addr_timer);
1745
1746 l2cap_unregister_all_users(conn);
1747
1748 /* Force the connection to be immediately dropped */
1749 hcon->disc_timeout = 0;
1750
1751 mutex_lock(&conn->chan_lock);
1752
1753 /* Kill channels */
1754 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1755 l2cap_chan_hold(chan);
1756 l2cap_chan_lock(chan);
1757
1758 l2cap_chan_del(chan, err);
1759
1760 chan->ops->close(chan);
1761
1762 l2cap_chan_unlock(chan);
1763 l2cap_chan_put(chan);
1764 }
1765
1766 mutex_unlock(&conn->chan_lock);
1767
1768 hci_chan_del(conn->hchan);
1769
1770 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1771 cancel_delayed_work_sync(&conn->info_timer);
1772
1773 hcon->l2cap_data = NULL;
1774 conn->hchan = NULL;
1775 l2cap_conn_put(conn);
1776 }
1777
l2cap_conn_free(struct kref * ref)1778 static void l2cap_conn_free(struct kref *ref)
1779 {
1780 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1781
1782 hci_conn_put(conn->hcon);
1783 kfree(conn);
1784 }
1785
l2cap_conn_get(struct l2cap_conn * conn)1786 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1787 {
1788 kref_get(&conn->ref);
1789 return conn;
1790 }
1791 EXPORT_SYMBOL(l2cap_conn_get);
1792
l2cap_conn_put(struct l2cap_conn * conn)1793 void l2cap_conn_put(struct l2cap_conn *conn)
1794 {
1795 kref_put(&conn->ref, l2cap_conn_free);
1796 }
1797 EXPORT_SYMBOL(l2cap_conn_put);
1798
1799 /* ---- Socket interface ---- */
1800
1801 /* Find socket with psm and source / destination bdaddr.
1802 * Returns closest match.
1803 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1804 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1805 bdaddr_t *src,
1806 bdaddr_t *dst,
1807 u8 link_type)
1808 {
1809 struct l2cap_chan *c, *tmp, *c1 = NULL;
1810
1811 read_lock(&chan_list_lock);
1812
1813 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1814 if (state && c->state != state)
1815 continue;
1816
1817 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1818 continue;
1819
1820 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1821 continue;
1822
1823 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1824 int src_match, dst_match;
1825 int src_any, dst_any;
1826
1827 /* Exact match. */
1828 src_match = !bacmp(&c->src, src);
1829 dst_match = !bacmp(&c->dst, dst);
1830 if (src_match && dst_match) {
1831 if (!l2cap_chan_hold_unless_zero(c))
1832 continue;
1833
1834 read_unlock(&chan_list_lock);
1835 return c;
1836 }
1837
1838 /* Closest match */
1839 src_any = !bacmp(&c->src, BDADDR_ANY);
1840 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1841 if ((src_match && dst_any) || (src_any && dst_match) ||
1842 (src_any && dst_any))
1843 c1 = c;
1844 }
1845 }
1846
1847 if (c1)
1848 c1 = l2cap_chan_hold_unless_zero(c1);
1849
1850 read_unlock(&chan_list_lock);
1851
1852 return c1;
1853 }
1854
l2cap_monitor_timeout(struct work_struct * work)1855 static void l2cap_monitor_timeout(struct work_struct *work)
1856 {
1857 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1858 monitor_timer.work);
1859
1860 BT_DBG("chan %p", chan);
1861
1862 l2cap_chan_lock(chan);
1863
1864 if (!chan->conn) {
1865 l2cap_chan_unlock(chan);
1866 l2cap_chan_put(chan);
1867 return;
1868 }
1869
1870 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1871
1872 l2cap_chan_unlock(chan);
1873 l2cap_chan_put(chan);
1874 }
1875
l2cap_retrans_timeout(struct work_struct * work)1876 static void l2cap_retrans_timeout(struct work_struct *work)
1877 {
1878 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1879 retrans_timer.work);
1880
1881 BT_DBG("chan %p", chan);
1882
1883 l2cap_chan_lock(chan);
1884
1885 if (!chan->conn) {
1886 l2cap_chan_unlock(chan);
1887 l2cap_chan_put(chan);
1888 return;
1889 }
1890
1891 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1892 l2cap_chan_unlock(chan);
1893 l2cap_chan_put(chan);
1894 }
1895
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1896 static void l2cap_streaming_send(struct l2cap_chan *chan,
1897 struct sk_buff_head *skbs)
1898 {
1899 struct sk_buff *skb;
1900 struct l2cap_ctrl *control;
1901
1902 BT_DBG("chan %p, skbs %p", chan, skbs);
1903
1904 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1905
1906 while (!skb_queue_empty(&chan->tx_q)) {
1907
1908 skb = skb_dequeue(&chan->tx_q);
1909
1910 bt_cb(skb)->l2cap.retries = 1;
1911 control = &bt_cb(skb)->l2cap;
1912
1913 control->reqseq = 0;
1914 control->txseq = chan->next_tx_seq;
1915
1916 __pack_control(chan, control, skb);
1917
1918 if (chan->fcs == L2CAP_FCS_CRC16) {
1919 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1920 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1921 }
1922
1923 l2cap_do_send(chan, skb);
1924
1925 BT_DBG("Sent txseq %u", control->txseq);
1926
1927 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1928 chan->frames_sent++;
1929 }
1930 }
1931
l2cap_ertm_send(struct l2cap_chan * chan)1932 static int l2cap_ertm_send(struct l2cap_chan *chan)
1933 {
1934 struct sk_buff *skb, *tx_skb;
1935 struct l2cap_ctrl *control;
1936 int sent = 0;
1937
1938 BT_DBG("chan %p", chan);
1939
1940 if (chan->state != BT_CONNECTED)
1941 return -ENOTCONN;
1942
1943 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1944 return 0;
1945
1946 while (chan->tx_send_head &&
1947 chan->unacked_frames < chan->remote_tx_win &&
1948 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1949
1950 skb = chan->tx_send_head;
1951
1952 bt_cb(skb)->l2cap.retries = 1;
1953 control = &bt_cb(skb)->l2cap;
1954
1955 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1956 control->final = 1;
1957
1958 control->reqseq = chan->buffer_seq;
1959 chan->last_acked_seq = chan->buffer_seq;
1960 control->txseq = chan->next_tx_seq;
1961
1962 __pack_control(chan, control, skb);
1963
1964 if (chan->fcs == L2CAP_FCS_CRC16) {
1965 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1966 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1967 }
1968
1969 /* Clone after data has been modified. Data is assumed to be
1970 read-only (for locking purposes) on cloned sk_buffs.
1971 */
1972 tx_skb = skb_clone(skb, GFP_KERNEL);
1973
1974 if (!tx_skb)
1975 break;
1976
1977 __set_retrans_timer(chan);
1978
1979 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1980 chan->unacked_frames++;
1981 chan->frames_sent++;
1982 sent++;
1983
1984 if (skb_queue_is_last(&chan->tx_q, skb))
1985 chan->tx_send_head = NULL;
1986 else
1987 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1988
1989 l2cap_do_send(chan, tx_skb);
1990 BT_DBG("Sent txseq %u", control->txseq);
1991 }
1992
1993 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1994 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1995
1996 return sent;
1997 }
1998
l2cap_ertm_resend(struct l2cap_chan * chan)1999 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2000 {
2001 struct l2cap_ctrl control;
2002 struct sk_buff *skb;
2003 struct sk_buff *tx_skb;
2004 u16 seq;
2005
2006 BT_DBG("chan %p", chan);
2007
2008 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2009 return;
2010
2011 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2012 seq = l2cap_seq_list_pop(&chan->retrans_list);
2013
2014 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2015 if (!skb) {
2016 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2017 seq);
2018 continue;
2019 }
2020
2021 bt_cb(skb)->l2cap.retries++;
2022 control = bt_cb(skb)->l2cap;
2023
2024 if (chan->max_tx != 0 &&
2025 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2026 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2027 l2cap_send_disconn_req(chan, ECONNRESET);
2028 l2cap_seq_list_clear(&chan->retrans_list);
2029 break;
2030 }
2031
2032 control.reqseq = chan->buffer_seq;
2033 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2034 control.final = 1;
2035 else
2036 control.final = 0;
2037
2038 if (skb_cloned(skb)) {
2039 /* Cloned sk_buffs are read-only, so we need a
2040 * writeable copy
2041 */
2042 tx_skb = skb_copy(skb, GFP_KERNEL);
2043 } else {
2044 tx_skb = skb_clone(skb, GFP_KERNEL);
2045 }
2046
2047 if (!tx_skb) {
2048 l2cap_seq_list_clear(&chan->retrans_list);
2049 break;
2050 }
2051
2052 /* Update skb contents */
2053 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2054 put_unaligned_le32(__pack_extended_control(&control),
2055 tx_skb->data + L2CAP_HDR_SIZE);
2056 } else {
2057 put_unaligned_le16(__pack_enhanced_control(&control),
2058 tx_skb->data + L2CAP_HDR_SIZE);
2059 }
2060
2061 /* Update FCS */
2062 if (chan->fcs == L2CAP_FCS_CRC16) {
2063 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2064 tx_skb->len - L2CAP_FCS_SIZE);
2065 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2066 L2CAP_FCS_SIZE);
2067 }
2068
2069 l2cap_do_send(chan, tx_skb);
2070
2071 BT_DBG("Resent txseq %d", control.txseq);
2072
2073 chan->last_acked_seq = chan->buffer_seq;
2074 }
2075 }
2076
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2077 static void l2cap_retransmit(struct l2cap_chan *chan,
2078 struct l2cap_ctrl *control)
2079 {
2080 BT_DBG("chan %p, control %p", chan, control);
2081
2082 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2083 l2cap_ertm_resend(chan);
2084 }
2085
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2086 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2087 struct l2cap_ctrl *control)
2088 {
2089 struct sk_buff *skb;
2090
2091 BT_DBG("chan %p, control %p", chan, control);
2092
2093 if (control->poll)
2094 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2095
2096 l2cap_seq_list_clear(&chan->retrans_list);
2097
2098 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2099 return;
2100
2101 if (chan->unacked_frames) {
2102 skb_queue_walk(&chan->tx_q, skb) {
2103 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2104 skb == chan->tx_send_head)
2105 break;
2106 }
2107
2108 skb_queue_walk_from(&chan->tx_q, skb) {
2109 if (skb == chan->tx_send_head)
2110 break;
2111
2112 l2cap_seq_list_append(&chan->retrans_list,
2113 bt_cb(skb)->l2cap.txseq);
2114 }
2115
2116 l2cap_ertm_resend(chan);
2117 }
2118 }
2119
l2cap_send_ack(struct l2cap_chan * chan)2120 static void l2cap_send_ack(struct l2cap_chan *chan)
2121 {
2122 struct l2cap_ctrl control;
2123 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2124 chan->last_acked_seq);
2125 int threshold;
2126
2127 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2128 chan, chan->last_acked_seq, chan->buffer_seq);
2129
2130 memset(&control, 0, sizeof(control));
2131 control.sframe = 1;
2132
2133 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2134 chan->rx_state == L2CAP_RX_STATE_RECV) {
2135 __clear_ack_timer(chan);
2136 control.super = L2CAP_SUPER_RNR;
2137 control.reqseq = chan->buffer_seq;
2138 l2cap_send_sframe(chan, &control);
2139 } else {
2140 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2141 l2cap_ertm_send(chan);
2142 /* If any i-frames were sent, they included an ack */
2143 if (chan->buffer_seq == chan->last_acked_seq)
2144 frames_to_ack = 0;
2145 }
2146
2147 /* Ack now if the window is 3/4ths full.
2148 * Calculate without mul or div
2149 */
2150 threshold = chan->ack_win;
2151 threshold += threshold << 1;
2152 threshold >>= 2;
2153
2154 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2155 threshold);
2156
2157 if (frames_to_ack >= threshold) {
2158 __clear_ack_timer(chan);
2159 control.super = L2CAP_SUPER_RR;
2160 control.reqseq = chan->buffer_seq;
2161 l2cap_send_sframe(chan, &control);
2162 frames_to_ack = 0;
2163 }
2164
2165 if (frames_to_ack)
2166 __set_ack_timer(chan);
2167 }
2168 }
2169
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2170 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2171 struct msghdr *msg, int len,
2172 int count, struct sk_buff *skb)
2173 {
2174 struct l2cap_conn *conn = chan->conn;
2175 struct sk_buff **frag;
2176 int sent = 0;
2177
2178 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2179 return -EFAULT;
2180
2181 sent += count;
2182 len -= count;
2183
2184 /* Continuation fragments (no L2CAP header) */
2185 frag = &skb_shinfo(skb)->frag_list;
2186 while (len) {
2187 struct sk_buff *tmp;
2188
2189 count = min_t(unsigned int, conn->mtu, len);
2190
2191 tmp = chan->ops->alloc_skb(chan, 0, count,
2192 msg->msg_flags & MSG_DONTWAIT);
2193 if (IS_ERR(tmp))
2194 return PTR_ERR(tmp);
2195
2196 *frag = tmp;
2197
2198 if (!copy_from_iter_full(skb_put(*frag, count), count,
2199 &msg->msg_iter))
2200 return -EFAULT;
2201
2202 sent += count;
2203 len -= count;
2204
2205 skb->len += (*frag)->len;
2206 skb->data_len += (*frag)->len;
2207
2208 frag = &(*frag)->next;
2209 }
2210
2211 return sent;
2212 }
2213
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2214 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len)
2216 {
2217 struct l2cap_conn *conn = chan->conn;
2218 struct sk_buff *skb;
2219 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2220 struct l2cap_hdr *lh;
2221
2222 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2223 __le16_to_cpu(chan->psm), len);
2224
2225 count = min_t(unsigned int, (conn->mtu - hlen), len);
2226
2227 skb = chan->ops->alloc_skb(chan, hlen, count,
2228 msg->msg_flags & MSG_DONTWAIT);
2229 if (IS_ERR(skb))
2230 return skb;
2231
2232 /* Create L2CAP header */
2233 lh = skb_put(skb, L2CAP_HDR_SIZE);
2234 lh->cid = cpu_to_le16(chan->dcid);
2235 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2236 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2237
2238 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2239 if (unlikely(err < 0)) {
2240 kfree_skb(skb);
2241 return ERR_PTR(err);
2242 }
2243 return skb;
2244 }
2245
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2246 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2247 struct msghdr *msg, size_t len)
2248 {
2249 struct l2cap_conn *conn = chan->conn;
2250 struct sk_buff *skb;
2251 int err, count;
2252 struct l2cap_hdr *lh;
2253
2254 BT_DBG("chan %p len %zu", chan, len);
2255
2256 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2257
2258 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2259 msg->msg_flags & MSG_DONTWAIT);
2260 if (IS_ERR(skb))
2261 return skb;
2262
2263 /* Create L2CAP header */
2264 lh = skb_put(skb, L2CAP_HDR_SIZE);
2265 lh->cid = cpu_to_le16(chan->dcid);
2266 lh->len = cpu_to_le16(len);
2267
2268 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2269 if (unlikely(err < 0)) {
2270 kfree_skb(skb);
2271 return ERR_PTR(err);
2272 }
2273 return skb;
2274 }
2275
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2276 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2277 struct msghdr *msg, size_t len,
2278 u16 sdulen)
2279 {
2280 struct l2cap_conn *conn = chan->conn;
2281 struct sk_buff *skb;
2282 int err, count, hlen;
2283 struct l2cap_hdr *lh;
2284
2285 BT_DBG("chan %p len %zu", chan, len);
2286
2287 if (!conn)
2288 return ERR_PTR(-ENOTCONN);
2289
2290 hlen = __ertm_hdr_size(chan);
2291
2292 if (sdulen)
2293 hlen += L2CAP_SDULEN_SIZE;
2294
2295 if (chan->fcs == L2CAP_FCS_CRC16)
2296 hlen += L2CAP_FCS_SIZE;
2297
2298 count = min_t(unsigned int, (conn->mtu - hlen), len);
2299
2300 skb = chan->ops->alloc_skb(chan, hlen, count,
2301 msg->msg_flags & MSG_DONTWAIT);
2302 if (IS_ERR(skb))
2303 return skb;
2304
2305 /* Create L2CAP header */
2306 lh = skb_put(skb, L2CAP_HDR_SIZE);
2307 lh->cid = cpu_to_le16(chan->dcid);
2308 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2309
2310 /* Control header is populated later */
2311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2312 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2313 else
2314 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2315
2316 if (sdulen)
2317 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2318
2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 if (unlikely(err < 0)) {
2321 kfree_skb(skb);
2322 return ERR_PTR(err);
2323 }
2324
2325 bt_cb(skb)->l2cap.fcs = chan->fcs;
2326 bt_cb(skb)->l2cap.retries = 0;
2327 return skb;
2328 }
2329
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2330 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2331 struct sk_buff_head *seg_queue,
2332 struct msghdr *msg, size_t len)
2333 {
2334 struct sk_buff *skb;
2335 u16 sdu_len;
2336 size_t pdu_len;
2337 u8 sar;
2338
2339 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2340
2341 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2342 * so fragmented skbs are not used. The HCI layer's handling
2343 * of fragmented skbs is not compatible with ERTM's queueing.
2344 */
2345
2346 /* PDU size is derived from the HCI MTU */
2347 pdu_len = chan->conn->mtu;
2348
2349 /* Constrain PDU size for BR/EDR connections */
2350 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2351
2352 /* Adjust for largest possible L2CAP overhead. */
2353 if (chan->fcs)
2354 pdu_len -= L2CAP_FCS_SIZE;
2355
2356 pdu_len -= __ertm_hdr_size(chan);
2357
2358 /* Remote device may have requested smaller PDUs */
2359 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2360
2361 if (len <= pdu_len) {
2362 sar = L2CAP_SAR_UNSEGMENTED;
2363 sdu_len = 0;
2364 pdu_len = len;
2365 } else {
2366 sar = L2CAP_SAR_START;
2367 sdu_len = len;
2368 }
2369
2370 while (len > 0) {
2371 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2372
2373 if (IS_ERR(skb)) {
2374 __skb_queue_purge(seg_queue);
2375 return PTR_ERR(skb);
2376 }
2377
2378 bt_cb(skb)->l2cap.sar = sar;
2379 __skb_queue_tail(seg_queue, skb);
2380
2381 len -= pdu_len;
2382 if (sdu_len)
2383 sdu_len = 0;
2384
2385 if (len <= pdu_len) {
2386 sar = L2CAP_SAR_END;
2387 pdu_len = len;
2388 } else {
2389 sar = L2CAP_SAR_CONTINUE;
2390 }
2391 }
2392
2393 return 0;
2394 }
2395
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2396 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2397 struct msghdr *msg,
2398 size_t len, u16 sdulen)
2399 {
2400 struct l2cap_conn *conn = chan->conn;
2401 struct sk_buff *skb;
2402 int err, count, hlen;
2403 struct l2cap_hdr *lh;
2404
2405 BT_DBG("chan %p len %zu", chan, len);
2406
2407 if (!conn)
2408 return ERR_PTR(-ENOTCONN);
2409
2410 hlen = L2CAP_HDR_SIZE;
2411
2412 if (sdulen)
2413 hlen += L2CAP_SDULEN_SIZE;
2414
2415 count = min_t(unsigned int, (conn->mtu - hlen), len);
2416
2417 skb = chan->ops->alloc_skb(chan, hlen, count,
2418 msg->msg_flags & MSG_DONTWAIT);
2419 if (IS_ERR(skb))
2420 return skb;
2421
2422 /* Create L2CAP header */
2423 lh = skb_put(skb, L2CAP_HDR_SIZE);
2424 lh->cid = cpu_to_le16(chan->dcid);
2425 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2426
2427 if (sdulen)
2428 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2429
2430 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2431 if (unlikely(err < 0)) {
2432 kfree_skb(skb);
2433 return ERR_PTR(err);
2434 }
2435
2436 return skb;
2437 }
2438
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2439 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2440 struct sk_buff_head *seg_queue,
2441 struct msghdr *msg, size_t len)
2442 {
2443 struct sk_buff *skb;
2444 size_t pdu_len;
2445 u16 sdu_len;
2446
2447 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2448
2449 sdu_len = len;
2450 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2451
2452 while (len > 0) {
2453 if (len <= pdu_len)
2454 pdu_len = len;
2455
2456 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2457 if (IS_ERR(skb)) {
2458 __skb_queue_purge(seg_queue);
2459 return PTR_ERR(skb);
2460 }
2461
2462 __skb_queue_tail(seg_queue, skb);
2463
2464 len -= pdu_len;
2465
2466 if (sdu_len) {
2467 sdu_len = 0;
2468 pdu_len += L2CAP_SDULEN_SIZE;
2469 }
2470 }
2471
2472 return 0;
2473 }
2474
l2cap_le_flowctl_send(struct l2cap_chan * chan)2475 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2476 {
2477 int sent = 0;
2478
2479 BT_DBG("chan %p", chan);
2480
2481 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2482 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2483 chan->tx_credits--;
2484 sent++;
2485 }
2486
2487 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2488 skb_queue_len(&chan->tx_q));
2489 }
2490
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2491 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2492 {
2493 struct sk_buff *skb;
2494 int err;
2495 struct sk_buff_head seg_queue;
2496
2497 if (!chan->conn)
2498 return -ENOTCONN;
2499
2500 /* Connectionless channel */
2501 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2502 skb = l2cap_create_connless_pdu(chan, msg, len);
2503 if (IS_ERR(skb))
2504 return PTR_ERR(skb);
2505
2506 l2cap_do_send(chan, skb);
2507 return len;
2508 }
2509
2510 switch (chan->mode) {
2511 case L2CAP_MODE_LE_FLOWCTL:
2512 case L2CAP_MODE_EXT_FLOWCTL:
2513 /* Check outgoing MTU */
2514 if (len > chan->omtu)
2515 return -EMSGSIZE;
2516
2517 __skb_queue_head_init(&seg_queue);
2518
2519 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2520
2521 if (chan->state != BT_CONNECTED) {
2522 __skb_queue_purge(&seg_queue);
2523 err = -ENOTCONN;
2524 }
2525
2526 if (err)
2527 return err;
2528
2529 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2530
2531 l2cap_le_flowctl_send(chan);
2532
2533 if (!chan->tx_credits)
2534 chan->ops->suspend(chan);
2535
2536 err = len;
2537
2538 break;
2539
2540 case L2CAP_MODE_BASIC:
2541 /* Check outgoing MTU */
2542 if (len > chan->omtu)
2543 return -EMSGSIZE;
2544
2545 /* Create a basic PDU */
2546 skb = l2cap_create_basic_pdu(chan, msg, len);
2547 if (IS_ERR(skb))
2548 return PTR_ERR(skb);
2549
2550 l2cap_do_send(chan, skb);
2551 err = len;
2552 break;
2553
2554 case L2CAP_MODE_ERTM:
2555 case L2CAP_MODE_STREAMING:
2556 /* Check outgoing MTU */
2557 if (len > chan->omtu) {
2558 err = -EMSGSIZE;
2559 break;
2560 }
2561
2562 __skb_queue_head_init(&seg_queue);
2563
2564 /* Do segmentation before calling in to the state machine,
2565 * since it's possible to block while waiting for memory
2566 * allocation.
2567 */
2568 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2569
2570 if (err)
2571 break;
2572
2573 if (chan->mode == L2CAP_MODE_ERTM)
2574 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2575 else
2576 l2cap_streaming_send(chan, &seg_queue);
2577
2578 err = len;
2579
2580 /* If the skbs were not queued for sending, they'll still be in
2581 * seg_queue and need to be purged.
2582 */
2583 __skb_queue_purge(&seg_queue);
2584 break;
2585
2586 default:
2587 BT_DBG("bad state %1.1x", chan->mode);
2588 err = -EBADFD;
2589 }
2590
2591 return err;
2592 }
2593 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2594
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2595 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2596 {
2597 struct l2cap_ctrl control;
2598 u16 seq;
2599
2600 BT_DBG("chan %p, txseq %u", chan, txseq);
2601
2602 memset(&control, 0, sizeof(control));
2603 control.sframe = 1;
2604 control.super = L2CAP_SUPER_SREJ;
2605
2606 for (seq = chan->expected_tx_seq; seq != txseq;
2607 seq = __next_seq(chan, seq)) {
2608 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2609 control.reqseq = seq;
2610 l2cap_send_sframe(chan, &control);
2611 l2cap_seq_list_append(&chan->srej_list, seq);
2612 }
2613 }
2614
2615 chan->expected_tx_seq = __next_seq(chan, txseq);
2616 }
2617
l2cap_send_srej_tail(struct l2cap_chan * chan)2618 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2619 {
2620 struct l2cap_ctrl control;
2621
2622 BT_DBG("chan %p", chan);
2623
2624 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2625 return;
2626
2627 memset(&control, 0, sizeof(control));
2628 control.sframe = 1;
2629 control.super = L2CAP_SUPER_SREJ;
2630 control.reqseq = chan->srej_list.tail;
2631 l2cap_send_sframe(chan, &control);
2632 }
2633
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2634 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2635 {
2636 struct l2cap_ctrl control;
2637 u16 initial_head;
2638 u16 seq;
2639
2640 BT_DBG("chan %p, txseq %u", chan, txseq);
2641
2642 memset(&control, 0, sizeof(control));
2643 control.sframe = 1;
2644 control.super = L2CAP_SUPER_SREJ;
2645
2646 /* Capture initial list head to allow only one pass through the list. */
2647 initial_head = chan->srej_list.head;
2648
2649 do {
2650 seq = l2cap_seq_list_pop(&chan->srej_list);
2651 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2652 break;
2653
2654 control.reqseq = seq;
2655 l2cap_send_sframe(chan, &control);
2656 l2cap_seq_list_append(&chan->srej_list, seq);
2657 } while (chan->srej_list.head != initial_head);
2658 }
2659
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2660 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2661 {
2662 struct sk_buff *acked_skb;
2663 u16 ackseq;
2664
2665 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2666
2667 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2668 return;
2669
2670 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2671 chan->expected_ack_seq, chan->unacked_frames);
2672
2673 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2674 ackseq = __next_seq(chan, ackseq)) {
2675
2676 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2677 if (acked_skb) {
2678 skb_unlink(acked_skb, &chan->tx_q);
2679 kfree_skb(acked_skb);
2680 chan->unacked_frames--;
2681 }
2682 }
2683
2684 chan->expected_ack_seq = reqseq;
2685
2686 if (chan->unacked_frames == 0)
2687 __clear_retrans_timer(chan);
2688
2689 BT_DBG("unacked_frames %u", chan->unacked_frames);
2690 }
2691
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2692 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2693 {
2694 BT_DBG("chan %p", chan);
2695
2696 chan->expected_tx_seq = chan->buffer_seq;
2697 l2cap_seq_list_clear(&chan->srej_list);
2698 skb_queue_purge(&chan->srej_q);
2699 chan->rx_state = L2CAP_RX_STATE_RECV;
2700 }
2701
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2702 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2703 struct l2cap_ctrl *control,
2704 struct sk_buff_head *skbs, u8 event)
2705 {
2706 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2707 event);
2708
2709 switch (event) {
2710 case L2CAP_EV_DATA_REQUEST:
2711 if (chan->tx_send_head == NULL)
2712 chan->tx_send_head = skb_peek(skbs);
2713
2714 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2715 l2cap_ertm_send(chan);
2716 break;
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2724 */
2725 l2cap_abort_rx_srej_sent(chan);
2726 }
2727
2728 l2cap_send_ack(chan);
2729
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2737
2738 memset(&local_control, 0, sizeof(local_control));
2739 local_control.sframe = 1;
2740 local_control.super = L2CAP_SUPER_RR;
2741 local_control.poll = 1;
2742 local_control.reqseq = chan->buffer_seq;
2743 l2cap_send_sframe(chan, &local_control);
2744
2745 chan->retry_count = 1;
2746 __set_monitor_timer(chan);
2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 }
2749 break;
2750 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2751 l2cap_process_reqseq(chan, control->reqseq);
2752 break;
2753 case L2CAP_EV_EXPLICIT_POLL:
2754 l2cap_send_rr_or_rnr(chan, 1);
2755 chan->retry_count = 1;
2756 __set_monitor_timer(chan);
2757 __clear_ack_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 break;
2760 case L2CAP_EV_RETRANS_TO:
2761 l2cap_send_rr_or_rnr(chan, 1);
2762 chan->retry_count = 1;
2763 __set_monitor_timer(chan);
2764 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2765 break;
2766 case L2CAP_EV_RECV_FBIT:
2767 /* Nothing to process */
2768 break;
2769 default:
2770 break;
2771 }
2772 }
2773
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2774 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2775 struct l2cap_ctrl *control,
2776 struct sk_buff_head *skbs, u8 event)
2777 {
2778 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2779 event);
2780
2781 switch (event) {
2782 case L2CAP_EV_DATA_REQUEST:
2783 if (chan->tx_send_head == NULL)
2784 chan->tx_send_head = skb_peek(skbs);
2785 /* Queue data, but don't send. */
2786 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2787 break;
2788 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2789 BT_DBG("Enter LOCAL_BUSY");
2790 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2791
2792 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2793 /* The SREJ_SENT state must be aborted if we are to
2794 * enter the LOCAL_BUSY state.
2795 */
2796 l2cap_abort_rx_srej_sent(chan);
2797 }
2798
2799 l2cap_send_ack(chan);
2800
2801 break;
2802 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2803 BT_DBG("Exit LOCAL_BUSY");
2804 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805
2806 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2807 struct l2cap_ctrl local_control;
2808 memset(&local_control, 0, sizeof(local_control));
2809 local_control.sframe = 1;
2810 local_control.super = L2CAP_SUPER_RR;
2811 local_control.poll = 1;
2812 local_control.reqseq = chan->buffer_seq;
2813 l2cap_send_sframe(chan, &local_control);
2814
2815 chan->retry_count = 1;
2816 __set_monitor_timer(chan);
2817 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2818 }
2819 break;
2820 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2821 l2cap_process_reqseq(chan, control->reqseq);
2822 fallthrough;
2823
2824 case L2CAP_EV_RECV_FBIT:
2825 if (control && control->final) {
2826 __clear_monitor_timer(chan);
2827 if (chan->unacked_frames > 0)
2828 __set_retrans_timer(chan);
2829 chan->retry_count = 0;
2830 chan->tx_state = L2CAP_TX_STATE_XMIT;
2831 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2832 }
2833 break;
2834 case L2CAP_EV_EXPLICIT_POLL:
2835 /* Ignore */
2836 break;
2837 case L2CAP_EV_MONITOR_TO:
2838 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2839 l2cap_send_rr_or_rnr(chan, 1);
2840 __set_monitor_timer(chan);
2841 chan->retry_count++;
2842 } else {
2843 l2cap_send_disconn_req(chan, ECONNABORTED);
2844 }
2845 break;
2846 default:
2847 break;
2848 }
2849 }
2850
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2851 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2852 struct sk_buff_head *skbs, u8 event)
2853 {
2854 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2855 chan, control, skbs, event, chan->tx_state);
2856
2857 switch (chan->tx_state) {
2858 case L2CAP_TX_STATE_XMIT:
2859 l2cap_tx_state_xmit(chan, control, skbs, event);
2860 break;
2861 case L2CAP_TX_STATE_WAIT_F:
2862 l2cap_tx_state_wait_f(chan, control, skbs, event);
2863 break;
2864 default:
2865 /* Ignore event */
2866 break;
2867 }
2868 }
2869
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2870 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2871 struct l2cap_ctrl *control)
2872 {
2873 BT_DBG("chan %p, control %p", chan, control);
2874 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2875 }
2876
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2877 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2878 struct l2cap_ctrl *control)
2879 {
2880 BT_DBG("chan %p, control %p", chan, control);
2881 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2882 }
2883
2884 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2885 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2886 {
2887 struct sk_buff *nskb;
2888 struct l2cap_chan *chan;
2889
2890 BT_DBG("conn %p", conn);
2891
2892 mutex_lock(&conn->chan_lock);
2893
2894 list_for_each_entry(chan, &conn->chan_l, list) {
2895 if (chan->chan_type != L2CAP_CHAN_RAW)
2896 continue;
2897
2898 /* Don't send frame to the channel it came from */
2899 if (bt_cb(skb)->l2cap.chan == chan)
2900 continue;
2901
2902 nskb = skb_clone(skb, GFP_KERNEL);
2903 if (!nskb)
2904 continue;
2905 if (chan->ops->recv(chan, nskb))
2906 kfree_skb(nskb);
2907 }
2908
2909 mutex_unlock(&conn->chan_lock);
2910 }
2911
2912 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2913 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2914 u8 ident, u16 dlen, void *data)
2915 {
2916 struct sk_buff *skb, **frag;
2917 struct l2cap_cmd_hdr *cmd;
2918 struct l2cap_hdr *lh;
2919 int len, count;
2920
2921 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2922 conn, code, ident, dlen);
2923
2924 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2925 return NULL;
2926
2927 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2928 count = min_t(unsigned int, conn->mtu, len);
2929
2930 skb = bt_skb_alloc(count, GFP_KERNEL);
2931 if (!skb)
2932 return NULL;
2933
2934 lh = skb_put(skb, L2CAP_HDR_SIZE);
2935 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2936
2937 if (conn->hcon->type == LE_LINK)
2938 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2939 else
2940 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2941
2942 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2943 cmd->code = code;
2944 cmd->ident = ident;
2945 cmd->len = cpu_to_le16(dlen);
2946
2947 if (dlen) {
2948 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2949 skb_put_data(skb, data, count);
2950 data += count;
2951 }
2952
2953 len -= skb->len;
2954
2955 /* Continuation fragments (no L2CAP header) */
2956 frag = &skb_shinfo(skb)->frag_list;
2957 while (len) {
2958 count = min_t(unsigned int, conn->mtu, len);
2959
2960 *frag = bt_skb_alloc(count, GFP_KERNEL);
2961 if (!*frag)
2962 goto fail;
2963
2964 skb_put_data(*frag, data, count);
2965
2966 len -= count;
2967 data += count;
2968
2969 frag = &(*frag)->next;
2970 }
2971
2972 return skb;
2973
2974 fail:
2975 kfree_skb(skb);
2976 return NULL;
2977 }
2978
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2979 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2980 unsigned long *val)
2981 {
2982 struct l2cap_conf_opt *opt = *ptr;
2983 int len;
2984
2985 len = L2CAP_CONF_OPT_SIZE + opt->len;
2986 *ptr += len;
2987
2988 *type = opt->type;
2989 *olen = opt->len;
2990
2991 switch (opt->len) {
2992 case 1:
2993 *val = *((u8 *) opt->val);
2994 break;
2995
2996 case 2:
2997 *val = get_unaligned_le16(opt->val);
2998 break;
2999
3000 case 4:
3001 *val = get_unaligned_le32(opt->val);
3002 break;
3003
3004 default:
3005 *val = (unsigned long) opt->val;
3006 break;
3007 }
3008
3009 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3010 return len;
3011 }
3012
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3013 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3014 {
3015 struct l2cap_conf_opt *opt = *ptr;
3016
3017 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3018
3019 if (size < L2CAP_CONF_OPT_SIZE + len)
3020 return;
3021
3022 opt->type = type;
3023 opt->len = len;
3024
3025 switch (len) {
3026 case 1:
3027 *((u8 *) opt->val) = val;
3028 break;
3029
3030 case 2:
3031 put_unaligned_le16(val, opt->val);
3032 break;
3033
3034 case 4:
3035 put_unaligned_le32(val, opt->val);
3036 break;
3037
3038 default:
3039 memcpy(opt->val, (void *) val, len);
3040 break;
3041 }
3042
3043 *ptr += L2CAP_CONF_OPT_SIZE + len;
3044 }
3045
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3046 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3047 {
3048 struct l2cap_conf_efs efs;
3049
3050 switch (chan->mode) {
3051 case L2CAP_MODE_ERTM:
3052 efs.id = chan->local_id;
3053 efs.stype = chan->local_stype;
3054 efs.msdu = cpu_to_le16(chan->local_msdu);
3055 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3056 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3057 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3058 break;
3059
3060 case L2CAP_MODE_STREAMING:
3061 efs.id = 1;
3062 efs.stype = L2CAP_SERV_BESTEFFORT;
3063 efs.msdu = cpu_to_le16(chan->local_msdu);
3064 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3065 efs.acc_lat = 0;
3066 efs.flush_to = 0;
3067 break;
3068
3069 default:
3070 return;
3071 }
3072
3073 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3074 (unsigned long) &efs, size);
3075 }
3076
l2cap_ack_timeout(struct work_struct * work)3077 static void l2cap_ack_timeout(struct work_struct *work)
3078 {
3079 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3080 ack_timer.work);
3081 u16 frames_to_ack;
3082
3083 BT_DBG("chan %p", chan);
3084
3085 l2cap_chan_lock(chan);
3086
3087 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3088 chan->last_acked_seq);
3089
3090 if (frames_to_ack)
3091 l2cap_send_rr_or_rnr(chan, 0);
3092
3093 l2cap_chan_unlock(chan);
3094 l2cap_chan_put(chan);
3095 }
3096
l2cap_ertm_init(struct l2cap_chan * chan)3097 int l2cap_ertm_init(struct l2cap_chan *chan)
3098 {
3099 int err;
3100
3101 chan->next_tx_seq = 0;
3102 chan->expected_tx_seq = 0;
3103 chan->expected_ack_seq = 0;
3104 chan->unacked_frames = 0;
3105 chan->buffer_seq = 0;
3106 chan->frames_sent = 0;
3107 chan->last_acked_seq = 0;
3108 chan->sdu = NULL;
3109 chan->sdu_last_frag = NULL;
3110 chan->sdu_len = 0;
3111
3112 skb_queue_head_init(&chan->tx_q);
3113
3114 if (chan->mode != L2CAP_MODE_ERTM)
3115 return 0;
3116
3117 chan->rx_state = L2CAP_RX_STATE_RECV;
3118 chan->tx_state = L2CAP_TX_STATE_XMIT;
3119
3120 skb_queue_head_init(&chan->srej_q);
3121
3122 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3123 if (err < 0)
3124 return err;
3125
3126 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3127 if (err < 0)
3128 l2cap_seq_list_free(&chan->srej_list);
3129
3130 return err;
3131 }
3132
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3133 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3134 {
3135 switch (mode) {
3136 case L2CAP_MODE_STREAMING:
3137 case L2CAP_MODE_ERTM:
3138 if (l2cap_mode_supported(mode, remote_feat_mask))
3139 return mode;
3140 fallthrough;
3141 default:
3142 return L2CAP_MODE_BASIC;
3143 }
3144 }
3145
__l2cap_ews_supported(struct l2cap_conn * conn)3146 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3147 {
3148 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3149 }
3150
__l2cap_efs_supported(struct l2cap_conn * conn)3151 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3152 {
3153 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3154 }
3155
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3156 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3157 struct l2cap_conf_rfc *rfc)
3158 {
3159 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3160 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3161 }
3162
l2cap_txwin_setup(struct l2cap_chan * chan)3163 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3164 {
3165 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3166 __l2cap_ews_supported(chan->conn)) {
3167 /* use extended control field */
3168 set_bit(FLAG_EXT_CTRL, &chan->flags);
3169 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3170 } else {
3171 chan->tx_win = min_t(u16, chan->tx_win,
3172 L2CAP_DEFAULT_TX_WINDOW);
3173 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3174 }
3175 chan->ack_win = chan->tx_win;
3176 }
3177
l2cap_mtu_auto(struct l2cap_chan * chan)3178 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3179 {
3180 struct hci_conn *conn = chan->conn->hcon;
3181
3182 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3183
3184 /* The 2-DH1 packet has between 2 and 56 information bytes
3185 * (including the 2-byte payload header)
3186 */
3187 if (!(conn->pkt_type & HCI_2DH1))
3188 chan->imtu = 54;
3189
3190 /* The 3-DH1 packet has between 2 and 85 information bytes
3191 * (including the 2-byte payload header)
3192 */
3193 if (!(conn->pkt_type & HCI_3DH1))
3194 chan->imtu = 83;
3195
3196 /* The 2-DH3 packet has between 2 and 369 information bytes
3197 * (including the 2-byte payload header)
3198 */
3199 if (!(conn->pkt_type & HCI_2DH3))
3200 chan->imtu = 367;
3201
3202 /* The 3-DH3 packet has between 2 and 554 information bytes
3203 * (including the 2-byte payload header)
3204 */
3205 if (!(conn->pkt_type & HCI_3DH3))
3206 chan->imtu = 552;
3207
3208 /* The 2-DH5 packet has between 2 and 681 information bytes
3209 * (including the 2-byte payload header)
3210 */
3211 if (!(conn->pkt_type & HCI_2DH5))
3212 chan->imtu = 679;
3213
3214 /* The 3-DH5 packet has between 2 and 1023 information bytes
3215 * (including the 2-byte payload header)
3216 */
3217 if (!(conn->pkt_type & HCI_3DH5))
3218 chan->imtu = 1021;
3219 }
3220
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3221 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3222 {
3223 struct l2cap_conf_req *req = data;
3224 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3225 void *ptr = req->data;
3226 void *endptr = data + data_size;
3227 u16 size;
3228
3229 BT_DBG("chan %p", chan);
3230
3231 if (chan->num_conf_req || chan->num_conf_rsp)
3232 goto done;
3233
3234 switch (chan->mode) {
3235 case L2CAP_MODE_STREAMING:
3236 case L2CAP_MODE_ERTM:
3237 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3238 break;
3239
3240 if (__l2cap_efs_supported(chan->conn))
3241 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3242
3243 fallthrough;
3244 default:
3245 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3246 break;
3247 }
3248
3249 done:
3250 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3251 if (!chan->imtu)
3252 l2cap_mtu_auto(chan);
3253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3254 endptr - ptr);
3255 }
3256
3257 switch (chan->mode) {
3258 case L2CAP_MODE_BASIC:
3259 if (disable_ertm)
3260 break;
3261
3262 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3263 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3264 break;
3265
3266 rfc.mode = L2CAP_MODE_BASIC;
3267 rfc.txwin_size = 0;
3268 rfc.max_transmit = 0;
3269 rfc.retrans_timeout = 0;
3270 rfc.monitor_timeout = 0;
3271 rfc.max_pdu_size = 0;
3272
3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3274 (unsigned long) &rfc, endptr - ptr);
3275 break;
3276
3277 case L2CAP_MODE_ERTM:
3278 rfc.mode = L2CAP_MODE_ERTM;
3279 rfc.max_transmit = chan->max_tx;
3280
3281 __l2cap_set_ertm_timeouts(chan, &rfc);
3282
3283 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3284 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3285 L2CAP_FCS_SIZE);
3286 rfc.max_pdu_size = cpu_to_le16(size);
3287
3288 l2cap_txwin_setup(chan);
3289
3290 rfc.txwin_size = min_t(u16, chan->tx_win,
3291 L2CAP_DEFAULT_TX_WINDOW);
3292
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3294 (unsigned long) &rfc, endptr - ptr);
3295
3296 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3297 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3298
3299 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3301 chan->tx_win, endptr - ptr);
3302
3303 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3304 if (chan->fcs == L2CAP_FCS_NONE ||
3305 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3306 chan->fcs = L2CAP_FCS_NONE;
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3308 chan->fcs, endptr - ptr);
3309 }
3310 break;
3311
3312 case L2CAP_MODE_STREAMING:
3313 l2cap_txwin_setup(chan);
3314 rfc.mode = L2CAP_MODE_STREAMING;
3315 rfc.txwin_size = 0;
3316 rfc.max_transmit = 0;
3317 rfc.retrans_timeout = 0;
3318 rfc.monitor_timeout = 0;
3319
3320 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3321 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3322 L2CAP_FCS_SIZE);
3323 rfc.max_pdu_size = cpu_to_le16(size);
3324
3325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3326 (unsigned long) &rfc, endptr - ptr);
3327
3328 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3329 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3330
3331 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3332 if (chan->fcs == L2CAP_FCS_NONE ||
3333 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3334 chan->fcs = L2CAP_FCS_NONE;
3335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3336 chan->fcs, endptr - ptr);
3337 }
3338 break;
3339 }
3340
3341 req->dcid = cpu_to_le16(chan->dcid);
3342 req->flags = cpu_to_le16(0);
3343
3344 return ptr - data;
3345 }
3346
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3347 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3348 {
3349 struct l2cap_conf_rsp *rsp = data;
3350 void *ptr = rsp->data;
3351 void *endptr = data + data_size;
3352 void *req = chan->conf_req;
3353 int len = chan->conf_len;
3354 int type, hint, olen;
3355 unsigned long val;
3356 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3357 struct l2cap_conf_efs efs;
3358 u8 remote_efs = 0;
3359 u16 mtu = L2CAP_DEFAULT_MTU;
3360 u16 result = L2CAP_CONF_SUCCESS;
3361 u16 size;
3362
3363 BT_DBG("chan %p", chan);
3364
3365 while (len >= L2CAP_CONF_OPT_SIZE) {
3366 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3367 if (len < 0)
3368 break;
3369
3370 hint = type & L2CAP_CONF_HINT;
3371 type &= L2CAP_CONF_MASK;
3372
3373 switch (type) {
3374 case L2CAP_CONF_MTU:
3375 if (olen != 2)
3376 break;
3377 mtu = val;
3378 break;
3379
3380 case L2CAP_CONF_FLUSH_TO:
3381 if (olen != 2)
3382 break;
3383 chan->flush_to = val;
3384 break;
3385
3386 case L2CAP_CONF_QOS:
3387 break;
3388
3389 case L2CAP_CONF_RFC:
3390 if (olen != sizeof(rfc))
3391 break;
3392 memcpy(&rfc, (void *) val, olen);
3393 break;
3394
3395 case L2CAP_CONF_FCS:
3396 if (olen != 1)
3397 break;
3398 if (val == L2CAP_FCS_NONE)
3399 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3400 break;
3401
3402 case L2CAP_CONF_EFS:
3403 if (olen != sizeof(efs))
3404 break;
3405 remote_efs = 1;
3406 memcpy(&efs, (void *) val, olen);
3407 break;
3408
3409 case L2CAP_CONF_EWS:
3410 if (olen != 2)
3411 break;
3412 return -ECONNREFUSED;
3413
3414 default:
3415 if (hint)
3416 break;
3417 result = L2CAP_CONF_UNKNOWN;
3418 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3419 break;
3420 }
3421 }
3422
3423 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3424 goto done;
3425
3426 switch (chan->mode) {
3427 case L2CAP_MODE_STREAMING:
3428 case L2CAP_MODE_ERTM:
3429 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3430 chan->mode = l2cap_select_mode(rfc.mode,
3431 chan->conn->feat_mask);
3432 break;
3433 }
3434
3435 if (remote_efs) {
3436 if (__l2cap_efs_supported(chan->conn))
3437 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3438 else
3439 return -ECONNREFUSED;
3440 }
3441
3442 if (chan->mode != rfc.mode)
3443 return -ECONNREFUSED;
3444
3445 break;
3446 }
3447
3448 done:
3449 if (chan->mode != rfc.mode) {
3450 result = L2CAP_CONF_UNACCEPT;
3451 rfc.mode = chan->mode;
3452
3453 if (chan->num_conf_rsp == 1)
3454 return -ECONNREFUSED;
3455
3456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3457 (unsigned long) &rfc, endptr - ptr);
3458 }
3459
3460 if (result == L2CAP_CONF_SUCCESS) {
3461 /* Configure output options and let the other side know
3462 * which ones we don't like. */
3463
3464 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3465 result = L2CAP_CONF_UNACCEPT;
3466 else {
3467 chan->omtu = mtu;
3468 set_bit(CONF_MTU_DONE, &chan->conf_state);
3469 }
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3471
3472 if (remote_efs) {
3473 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3474 efs.stype != L2CAP_SERV_NOTRAFIC &&
3475 efs.stype != chan->local_stype) {
3476
3477 result = L2CAP_CONF_UNACCEPT;
3478
3479 if (chan->num_conf_req >= 1)
3480 return -ECONNREFUSED;
3481
3482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3483 sizeof(efs),
3484 (unsigned long) &efs, endptr - ptr);
3485 } else {
3486 /* Send PENDING Conf Rsp */
3487 result = L2CAP_CONF_PENDING;
3488 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3489 }
3490 }
3491
3492 switch (rfc.mode) {
3493 case L2CAP_MODE_BASIC:
3494 chan->fcs = L2CAP_FCS_NONE;
3495 set_bit(CONF_MODE_DONE, &chan->conf_state);
3496 break;
3497
3498 case L2CAP_MODE_ERTM:
3499 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3500 chan->remote_tx_win = rfc.txwin_size;
3501 else
3502 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3503
3504 chan->remote_max_tx = rfc.max_transmit;
3505
3506 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3507 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3508 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3509 rfc.max_pdu_size = cpu_to_le16(size);
3510 chan->remote_mps = size;
3511
3512 __l2cap_set_ertm_timeouts(chan, &rfc);
3513
3514 set_bit(CONF_MODE_DONE, &chan->conf_state);
3515
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3517 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3518
3519 if (remote_efs &&
3520 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3521 chan->remote_id = efs.id;
3522 chan->remote_stype = efs.stype;
3523 chan->remote_msdu = le16_to_cpu(efs.msdu);
3524 chan->remote_flush_to =
3525 le32_to_cpu(efs.flush_to);
3526 chan->remote_acc_lat =
3527 le32_to_cpu(efs.acc_lat);
3528 chan->remote_sdu_itime =
3529 le32_to_cpu(efs.sdu_itime);
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3531 sizeof(efs),
3532 (unsigned long) &efs, endptr - ptr);
3533 }
3534 break;
3535
3536 case L2CAP_MODE_STREAMING:
3537 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3538 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3539 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3540 rfc.max_pdu_size = cpu_to_le16(size);
3541 chan->remote_mps = size;
3542
3543 set_bit(CONF_MODE_DONE, &chan->conf_state);
3544
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3546 (unsigned long) &rfc, endptr - ptr);
3547
3548 break;
3549
3550 default:
3551 result = L2CAP_CONF_UNACCEPT;
3552
3553 memset(&rfc, 0, sizeof(rfc));
3554 rfc.mode = chan->mode;
3555 }
3556
3557 if (result == L2CAP_CONF_SUCCESS)
3558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3559 }
3560 rsp->scid = cpu_to_le16(chan->dcid);
3561 rsp->result = cpu_to_le16(result);
3562 rsp->flags = cpu_to_le16(0);
3563
3564 return ptr - data;
3565 }
3566
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3567 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3568 void *data, size_t size, u16 *result)
3569 {
3570 struct l2cap_conf_req *req = data;
3571 void *ptr = req->data;
3572 void *endptr = data + size;
3573 int type, olen;
3574 unsigned long val;
3575 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3576 struct l2cap_conf_efs efs;
3577
3578 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3579
3580 while (len >= L2CAP_CONF_OPT_SIZE) {
3581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3582 if (len < 0)
3583 break;
3584
3585 switch (type) {
3586 case L2CAP_CONF_MTU:
3587 if (olen != 2)
3588 break;
3589 if (val < L2CAP_DEFAULT_MIN_MTU) {
3590 *result = L2CAP_CONF_UNACCEPT;
3591 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3592 } else
3593 chan->imtu = val;
3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3595 endptr - ptr);
3596 break;
3597
3598 case L2CAP_CONF_FLUSH_TO:
3599 if (olen != 2)
3600 break;
3601 chan->flush_to = val;
3602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3603 chan->flush_to, endptr - ptr);
3604 break;
3605
3606 case L2CAP_CONF_RFC:
3607 if (olen != sizeof(rfc))
3608 break;
3609 memcpy(&rfc, (void *)val, olen);
3610 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3611 rfc.mode != chan->mode)
3612 return -ECONNREFUSED;
3613 chan->fcs = 0;
3614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3615 (unsigned long) &rfc, endptr - ptr);
3616 break;
3617
3618 case L2CAP_CONF_EWS:
3619 if (olen != 2)
3620 break;
3621 chan->ack_win = min_t(u16, val, chan->ack_win);
3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3623 chan->tx_win, endptr - ptr);
3624 break;
3625
3626 case L2CAP_CONF_EFS:
3627 if (olen != sizeof(efs))
3628 break;
3629 memcpy(&efs, (void *)val, olen);
3630 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3631 efs.stype != L2CAP_SERV_NOTRAFIC &&
3632 efs.stype != chan->local_stype)
3633 return -ECONNREFUSED;
3634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3635 (unsigned long) &efs, endptr - ptr);
3636 break;
3637
3638 case L2CAP_CONF_FCS:
3639 if (olen != 1)
3640 break;
3641 if (*result == L2CAP_CONF_PENDING)
3642 if (val == L2CAP_FCS_NONE)
3643 set_bit(CONF_RECV_NO_FCS,
3644 &chan->conf_state);
3645 break;
3646 }
3647 }
3648
3649 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3650 return -ECONNREFUSED;
3651
3652 chan->mode = rfc.mode;
3653
3654 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3655 switch (rfc.mode) {
3656 case L2CAP_MODE_ERTM:
3657 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3658 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3659 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3660 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3661 chan->ack_win = min_t(u16, chan->ack_win,
3662 rfc.txwin_size);
3663
3664 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3665 chan->local_msdu = le16_to_cpu(efs.msdu);
3666 chan->local_sdu_itime =
3667 le32_to_cpu(efs.sdu_itime);
3668 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3669 chan->local_flush_to =
3670 le32_to_cpu(efs.flush_to);
3671 }
3672 break;
3673
3674 case L2CAP_MODE_STREAMING:
3675 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3676 }
3677 }
3678
3679 req->dcid = cpu_to_le16(chan->dcid);
3680 req->flags = cpu_to_le16(0);
3681
3682 return ptr - data;
3683 }
3684
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3685 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3686 u16 result, u16 flags)
3687 {
3688 struct l2cap_conf_rsp *rsp = data;
3689 void *ptr = rsp->data;
3690
3691 BT_DBG("chan %p", chan);
3692
3693 rsp->scid = cpu_to_le16(chan->dcid);
3694 rsp->result = cpu_to_le16(result);
3695 rsp->flags = cpu_to_le16(flags);
3696
3697 return ptr - data;
3698 }
3699
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3700 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3701 {
3702 struct l2cap_le_conn_rsp rsp;
3703 struct l2cap_conn *conn = chan->conn;
3704
3705 BT_DBG("chan %p", chan);
3706
3707 rsp.dcid = cpu_to_le16(chan->scid);
3708 rsp.mtu = cpu_to_le16(chan->imtu);
3709 rsp.mps = cpu_to_le16(chan->mps);
3710 rsp.credits = cpu_to_le16(chan->rx_credits);
3711 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3712
3713 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3714 &rsp);
3715 }
3716
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3717 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3718 {
3719 int *result = data;
3720
3721 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3722 return;
3723
3724 switch (chan->state) {
3725 case BT_CONNECT2:
3726 /* If channel still pending accept add to result */
3727 (*result)++;
3728 return;
3729 case BT_CONNECTED:
3730 return;
3731 default:
3732 /* If not connected or pending accept it has been refused */
3733 *result = -ECONNREFUSED;
3734 return;
3735 }
3736 }
3737
3738 struct l2cap_ecred_rsp_data {
3739 struct {
3740 struct l2cap_ecred_conn_rsp rsp;
3741 __le16 scid[L2CAP_ECRED_MAX_CID];
3742 } __packed pdu;
3743 int count;
3744 };
3745
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3746 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3747 {
3748 struct l2cap_ecred_rsp_data *rsp = data;
3749
3750 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3751 return;
3752
3753 /* Reset ident so only one response is sent */
3754 chan->ident = 0;
3755
3756 /* Include all channels pending with the same ident */
3757 if (!rsp->pdu.rsp.result)
3758 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3759 else
3760 l2cap_chan_del(chan, ECONNRESET);
3761 }
3762
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3763 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3764 {
3765 struct l2cap_conn *conn = chan->conn;
3766 struct l2cap_ecred_rsp_data data;
3767 u16 id = chan->ident;
3768 int result = 0;
3769
3770 if (!id)
3771 return;
3772
3773 BT_DBG("chan %p id %d", chan, id);
3774
3775 memset(&data, 0, sizeof(data));
3776
3777 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3778 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3779 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3780 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3781
3782 /* Verify that all channels are ready */
3783 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3784
3785 if (result > 0)
3786 return;
3787
3788 if (result < 0)
3789 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3790
3791 /* Build response */
3792 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3793
3794 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3795 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3796 &data.pdu);
3797 }
3798
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3799 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3800 {
3801 struct l2cap_conn_rsp rsp;
3802 struct l2cap_conn *conn = chan->conn;
3803 u8 buf[128];
3804 u8 rsp_code;
3805
3806 rsp.scid = cpu_to_le16(chan->dcid);
3807 rsp.dcid = cpu_to_le16(chan->scid);
3808 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3809 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3810 rsp_code = L2CAP_CONN_RSP;
3811
3812 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3813
3814 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3815
3816 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3817 return;
3818
3819 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3820 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3821 chan->num_conf_req++;
3822 }
3823
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3824 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3825 {
3826 int type, olen;
3827 unsigned long val;
3828 /* Use sane default values in case a misbehaving remote device
3829 * did not send an RFC or extended window size option.
3830 */
3831 u16 txwin_ext = chan->ack_win;
3832 struct l2cap_conf_rfc rfc = {
3833 .mode = chan->mode,
3834 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3835 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3836 .max_pdu_size = cpu_to_le16(chan->imtu),
3837 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3838 };
3839
3840 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3841
3842 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3843 return;
3844
3845 while (len >= L2CAP_CONF_OPT_SIZE) {
3846 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3847 if (len < 0)
3848 break;
3849
3850 switch (type) {
3851 case L2CAP_CONF_RFC:
3852 if (olen != sizeof(rfc))
3853 break;
3854 memcpy(&rfc, (void *)val, olen);
3855 break;
3856 case L2CAP_CONF_EWS:
3857 if (olen != 2)
3858 break;
3859 txwin_ext = val;
3860 break;
3861 }
3862 }
3863
3864 switch (rfc.mode) {
3865 case L2CAP_MODE_ERTM:
3866 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3867 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3868 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3869 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3870 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3871 else
3872 chan->ack_win = min_t(u16, chan->ack_win,
3873 rfc.txwin_size);
3874 break;
3875 case L2CAP_MODE_STREAMING:
3876 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3877 }
3878 }
3879
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3880 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3881 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3882 u8 *data)
3883 {
3884 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3885
3886 if (cmd_len < sizeof(*rej))
3887 return -EPROTO;
3888
3889 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3890 return 0;
3891
3892 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3893 cmd->ident == conn->info_ident) {
3894 cancel_delayed_work(&conn->info_timer);
3895
3896 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3897 conn->info_ident = 0;
3898
3899 l2cap_conn_start(conn);
3900 }
3901
3902 return 0;
3903 }
3904
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3905 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3906 struct l2cap_cmd_hdr *cmd,
3907 u8 *data, u8 rsp_code, u8 amp_id)
3908 {
3909 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3910 struct l2cap_conn_rsp rsp;
3911 struct l2cap_chan *chan = NULL, *pchan;
3912 int result, status = L2CAP_CS_NO_INFO;
3913
3914 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3915 __le16 psm = req->psm;
3916
3917 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3918
3919 /* Check if we have socket listening on psm */
3920 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3921 &conn->hcon->dst, ACL_LINK);
3922 if (!pchan) {
3923 result = L2CAP_CR_BAD_PSM;
3924 goto sendresp;
3925 }
3926
3927 mutex_lock(&conn->chan_lock);
3928 l2cap_chan_lock(pchan);
3929
3930 /* Check if the ACL is secure enough (if not SDP) */
3931 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3932 !hci_conn_check_link_mode(conn->hcon)) {
3933 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3934 result = L2CAP_CR_SEC_BLOCK;
3935 goto response;
3936 }
3937
3938 result = L2CAP_CR_NO_MEM;
3939
3940 /* Check for valid dynamic CID range (as per Erratum 3253) */
3941 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3942 result = L2CAP_CR_INVALID_SCID;
3943 goto response;
3944 }
3945
3946 /* Check if we already have channel with that dcid */
3947 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3948 result = L2CAP_CR_SCID_IN_USE;
3949 goto response;
3950 }
3951
3952 chan = pchan->ops->new_connection(pchan);
3953 if (!chan)
3954 goto response;
3955
3956 /* For certain devices (ex: HID mouse), support for authentication,
3957 * pairing and bonding is optional. For such devices, inorder to avoid
3958 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3959 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3960 */
3961 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3962
3963 bacpy(&chan->src, &conn->hcon->src);
3964 bacpy(&chan->dst, &conn->hcon->dst);
3965 chan->src_type = bdaddr_src_type(conn->hcon);
3966 chan->dst_type = bdaddr_dst_type(conn->hcon);
3967 chan->psm = psm;
3968 chan->dcid = scid;
3969
3970 __l2cap_chan_add(conn, chan);
3971
3972 dcid = chan->scid;
3973
3974 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3975
3976 chan->ident = cmd->ident;
3977
3978 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3979 if (l2cap_chan_check_security(chan, false)) {
3980 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3981 l2cap_state_change(chan, BT_CONNECT2);
3982 result = L2CAP_CR_PEND;
3983 status = L2CAP_CS_AUTHOR_PEND;
3984 chan->ops->defer(chan);
3985 } else {
3986 /* Force pending result for AMP controllers.
3987 * The connection will succeed after the
3988 * physical link is up.
3989 */
3990 if (amp_id == AMP_ID_BREDR) {
3991 l2cap_state_change(chan, BT_CONFIG);
3992 result = L2CAP_CR_SUCCESS;
3993 } else {
3994 l2cap_state_change(chan, BT_CONNECT2);
3995 result = L2CAP_CR_PEND;
3996 }
3997 status = L2CAP_CS_NO_INFO;
3998 }
3999 } else {
4000 l2cap_state_change(chan, BT_CONNECT2);
4001 result = L2CAP_CR_PEND;
4002 status = L2CAP_CS_AUTHEN_PEND;
4003 }
4004 } else {
4005 l2cap_state_change(chan, BT_CONNECT2);
4006 result = L2CAP_CR_PEND;
4007 status = L2CAP_CS_NO_INFO;
4008 }
4009
4010 response:
4011 l2cap_chan_unlock(pchan);
4012 mutex_unlock(&conn->chan_lock);
4013 l2cap_chan_put(pchan);
4014
4015 sendresp:
4016 rsp.scid = cpu_to_le16(scid);
4017 rsp.dcid = cpu_to_le16(dcid);
4018 rsp.result = cpu_to_le16(result);
4019 rsp.status = cpu_to_le16(status);
4020 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4021
4022 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4023 struct l2cap_info_req info;
4024 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4025
4026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4027 conn->info_ident = l2cap_get_ident(conn);
4028
4029 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4030
4031 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4032 sizeof(info), &info);
4033 }
4034
4035 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4036 result == L2CAP_CR_SUCCESS) {
4037 u8 buf[128];
4038 set_bit(CONF_REQ_SENT, &chan->conf_state);
4039 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4040 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4041 chan->num_conf_req++;
4042 }
4043
4044 return chan;
4045 }
4046
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4047 static int l2cap_connect_req(struct l2cap_conn *conn,
4048 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4049 {
4050 struct hci_dev *hdev = conn->hcon->hdev;
4051 struct hci_conn *hcon = conn->hcon;
4052
4053 if (cmd_len < sizeof(struct l2cap_conn_req))
4054 return -EPROTO;
4055
4056 hci_dev_lock(hdev);
4057 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4058 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4059 mgmt_device_connected(hdev, hcon, NULL, 0);
4060 hci_dev_unlock(hdev);
4061
4062 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4063 return 0;
4064 }
4065
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4066 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4067 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4068 u8 *data)
4069 {
4070 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4071 u16 scid, dcid, result, status;
4072 struct l2cap_chan *chan;
4073 u8 req[128];
4074 int err;
4075
4076 if (cmd_len < sizeof(*rsp))
4077 return -EPROTO;
4078
4079 scid = __le16_to_cpu(rsp->scid);
4080 dcid = __le16_to_cpu(rsp->dcid);
4081 result = __le16_to_cpu(rsp->result);
4082 status = __le16_to_cpu(rsp->status);
4083
4084 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4085 dcid > L2CAP_CID_DYN_END))
4086 return -EPROTO;
4087
4088 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4089 dcid, scid, result, status);
4090
4091 mutex_lock(&conn->chan_lock);
4092
4093 if (scid) {
4094 chan = __l2cap_get_chan_by_scid(conn, scid);
4095 if (!chan) {
4096 err = -EBADSLT;
4097 goto unlock;
4098 }
4099 } else {
4100 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4101 if (!chan) {
4102 err = -EBADSLT;
4103 goto unlock;
4104 }
4105 }
4106
4107 chan = l2cap_chan_hold_unless_zero(chan);
4108 if (!chan) {
4109 err = -EBADSLT;
4110 goto unlock;
4111 }
4112
4113 err = 0;
4114
4115 l2cap_chan_lock(chan);
4116
4117 switch (result) {
4118 case L2CAP_CR_SUCCESS:
4119 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4120 err = -EBADSLT;
4121 break;
4122 }
4123
4124 l2cap_state_change(chan, BT_CONFIG);
4125 chan->ident = 0;
4126 chan->dcid = dcid;
4127 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4128
4129 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4130 break;
4131
4132 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4133 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4134 chan->num_conf_req++;
4135 break;
4136
4137 case L2CAP_CR_PEND:
4138 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4139 break;
4140
4141 default:
4142 l2cap_chan_del(chan, ECONNREFUSED);
4143 break;
4144 }
4145
4146 l2cap_chan_unlock(chan);
4147 l2cap_chan_put(chan);
4148
4149 unlock:
4150 mutex_unlock(&conn->chan_lock);
4151
4152 return err;
4153 }
4154
set_default_fcs(struct l2cap_chan * chan)4155 static inline void set_default_fcs(struct l2cap_chan *chan)
4156 {
4157 /* FCS is enabled only in ERTM or streaming mode, if one or both
4158 * sides request it.
4159 */
4160 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4161 chan->fcs = L2CAP_FCS_NONE;
4162 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4163 chan->fcs = L2CAP_FCS_CRC16;
4164 }
4165
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4166 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4167 u8 ident, u16 flags)
4168 {
4169 struct l2cap_conn *conn = chan->conn;
4170
4171 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4172 flags);
4173
4174 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4175 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4176
4177 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4178 l2cap_build_conf_rsp(chan, data,
4179 L2CAP_CONF_SUCCESS, flags), data);
4180 }
4181
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4182 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4183 u16 scid, u16 dcid)
4184 {
4185 struct l2cap_cmd_rej_cid rej;
4186
4187 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4188 rej.scid = __cpu_to_le16(scid);
4189 rej.dcid = __cpu_to_le16(dcid);
4190
4191 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4192 }
4193
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4194 static inline int l2cap_config_req(struct l2cap_conn *conn,
4195 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4196 u8 *data)
4197 {
4198 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4199 u16 dcid, flags;
4200 u8 rsp[64];
4201 struct l2cap_chan *chan;
4202 int len, err = 0;
4203
4204 if (cmd_len < sizeof(*req))
4205 return -EPROTO;
4206
4207 dcid = __le16_to_cpu(req->dcid);
4208 flags = __le16_to_cpu(req->flags);
4209
4210 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4211
4212 chan = l2cap_get_chan_by_scid(conn, dcid);
4213 if (!chan) {
4214 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4215 return 0;
4216 }
4217
4218 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4219 chan->state != BT_CONNECTED) {
4220 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4221 chan->dcid);
4222 goto unlock;
4223 }
4224
4225 /* Reject if config buffer is too small. */
4226 len = cmd_len - sizeof(*req);
4227 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4228 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4229 l2cap_build_conf_rsp(chan, rsp,
4230 L2CAP_CONF_REJECT, flags), rsp);
4231 goto unlock;
4232 }
4233
4234 /* Store config. */
4235 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4236 chan->conf_len += len;
4237
4238 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4239 /* Incomplete config. Send empty response. */
4240 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4241 l2cap_build_conf_rsp(chan, rsp,
4242 L2CAP_CONF_SUCCESS, flags), rsp);
4243 goto unlock;
4244 }
4245
4246 /* Complete config. */
4247 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4248 if (len < 0) {
4249 l2cap_send_disconn_req(chan, ECONNRESET);
4250 goto unlock;
4251 }
4252
4253 chan->ident = cmd->ident;
4254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4255 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4256 chan->num_conf_rsp++;
4257
4258 /* Reset config buffer. */
4259 chan->conf_len = 0;
4260
4261 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4262 goto unlock;
4263
4264 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4265 set_default_fcs(chan);
4266
4267 if (chan->mode == L2CAP_MODE_ERTM ||
4268 chan->mode == L2CAP_MODE_STREAMING)
4269 err = l2cap_ertm_init(chan);
4270
4271 if (err < 0)
4272 l2cap_send_disconn_req(chan, -err);
4273 else
4274 l2cap_chan_ready(chan);
4275
4276 goto unlock;
4277 }
4278
4279 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4280 u8 buf[64];
4281 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4282 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4283 chan->num_conf_req++;
4284 }
4285
4286 /* Got Conf Rsp PENDING from remote side and assume we sent
4287 Conf Rsp PENDING in the code above */
4288 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4289 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4290
4291 /* check compatibility */
4292
4293 /* Send rsp for BR/EDR channel */
4294 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4295 }
4296
4297 unlock:
4298 l2cap_chan_unlock(chan);
4299 l2cap_chan_put(chan);
4300 return err;
4301 }
4302
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4303 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4304 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4305 u8 *data)
4306 {
4307 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4308 u16 scid, flags, result;
4309 struct l2cap_chan *chan;
4310 int len = cmd_len - sizeof(*rsp);
4311 int err = 0;
4312
4313 if (cmd_len < sizeof(*rsp))
4314 return -EPROTO;
4315
4316 scid = __le16_to_cpu(rsp->scid);
4317 flags = __le16_to_cpu(rsp->flags);
4318 result = __le16_to_cpu(rsp->result);
4319
4320 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4321 result, len);
4322
4323 chan = l2cap_get_chan_by_scid(conn, scid);
4324 if (!chan)
4325 return 0;
4326
4327 switch (result) {
4328 case L2CAP_CONF_SUCCESS:
4329 l2cap_conf_rfc_get(chan, rsp->data, len);
4330 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4331 break;
4332
4333 case L2CAP_CONF_PENDING:
4334 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4335
4336 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4337 char buf[64];
4338
4339 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4340 buf, sizeof(buf), &result);
4341 if (len < 0) {
4342 l2cap_send_disconn_req(chan, ECONNRESET);
4343 goto done;
4344 }
4345
4346 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4347 }
4348 goto done;
4349
4350 case L2CAP_CONF_UNKNOWN:
4351 case L2CAP_CONF_UNACCEPT:
4352 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4353 char req[64];
4354
4355 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4356 l2cap_send_disconn_req(chan, ECONNRESET);
4357 goto done;
4358 }
4359
4360 /* throw out any old stored conf requests */
4361 result = L2CAP_CONF_SUCCESS;
4362 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4363 req, sizeof(req), &result);
4364 if (len < 0) {
4365 l2cap_send_disconn_req(chan, ECONNRESET);
4366 goto done;
4367 }
4368
4369 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4370 L2CAP_CONF_REQ, len, req);
4371 chan->num_conf_req++;
4372 if (result != L2CAP_CONF_SUCCESS)
4373 goto done;
4374 break;
4375 }
4376 fallthrough;
4377
4378 default:
4379 l2cap_chan_set_err(chan, ECONNRESET);
4380
4381 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4382 l2cap_send_disconn_req(chan, ECONNRESET);
4383 goto done;
4384 }
4385
4386 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4387 goto done;
4388
4389 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4390
4391 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4392 set_default_fcs(chan);
4393
4394 if (chan->mode == L2CAP_MODE_ERTM ||
4395 chan->mode == L2CAP_MODE_STREAMING)
4396 err = l2cap_ertm_init(chan);
4397
4398 if (err < 0)
4399 l2cap_send_disconn_req(chan, -err);
4400 else
4401 l2cap_chan_ready(chan);
4402 }
4403
4404 done:
4405 l2cap_chan_unlock(chan);
4406 l2cap_chan_put(chan);
4407 return err;
4408 }
4409
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4410 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4412 u8 *data)
4413 {
4414 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4415 struct l2cap_disconn_rsp rsp;
4416 u16 dcid, scid;
4417 struct l2cap_chan *chan;
4418
4419 if (cmd_len != sizeof(*req))
4420 return -EPROTO;
4421
4422 scid = __le16_to_cpu(req->scid);
4423 dcid = __le16_to_cpu(req->dcid);
4424
4425 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4426
4427 chan = l2cap_get_chan_by_scid(conn, dcid);
4428 if (!chan) {
4429 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4430 return 0;
4431 }
4432
4433 rsp.dcid = cpu_to_le16(chan->scid);
4434 rsp.scid = cpu_to_le16(chan->dcid);
4435 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4436
4437 chan->ops->set_shutdown(chan);
4438
4439 l2cap_chan_unlock(chan);
4440 mutex_lock(&conn->chan_lock);
4441 l2cap_chan_lock(chan);
4442 l2cap_chan_del(chan, ECONNRESET);
4443 mutex_unlock(&conn->chan_lock);
4444
4445 chan->ops->close(chan);
4446
4447 l2cap_chan_unlock(chan);
4448 l2cap_chan_put(chan);
4449
4450 return 0;
4451 }
4452
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4453 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4454 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4455 u8 *data)
4456 {
4457 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4458 u16 dcid, scid;
4459 struct l2cap_chan *chan;
4460
4461 if (cmd_len != sizeof(*rsp))
4462 return -EPROTO;
4463
4464 scid = __le16_to_cpu(rsp->scid);
4465 dcid = __le16_to_cpu(rsp->dcid);
4466
4467 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4468
4469 chan = l2cap_get_chan_by_scid(conn, scid);
4470 if (!chan) {
4471 return 0;
4472 }
4473
4474 if (chan->state != BT_DISCONN) {
4475 l2cap_chan_unlock(chan);
4476 l2cap_chan_put(chan);
4477 return 0;
4478 }
4479
4480 l2cap_chan_unlock(chan);
4481 mutex_lock(&conn->chan_lock);
4482 l2cap_chan_lock(chan);
4483 l2cap_chan_del(chan, 0);
4484 mutex_unlock(&conn->chan_lock);
4485
4486 chan->ops->close(chan);
4487
4488 l2cap_chan_unlock(chan);
4489 l2cap_chan_put(chan);
4490
4491 return 0;
4492 }
4493
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4494 static inline int l2cap_information_req(struct l2cap_conn *conn,
4495 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4496 u8 *data)
4497 {
4498 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4499 u16 type;
4500
4501 if (cmd_len != sizeof(*req))
4502 return -EPROTO;
4503
4504 type = __le16_to_cpu(req->type);
4505
4506 BT_DBG("type 0x%4.4x", type);
4507
4508 if (type == L2CAP_IT_FEAT_MASK) {
4509 u8 buf[8];
4510 u32 feat_mask = l2cap_feat_mask;
4511 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4512 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4513 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4514 if (!disable_ertm)
4515 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4516 | L2CAP_FEAT_FCS;
4517
4518 put_unaligned_le32(feat_mask, rsp->data);
4519 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4520 buf);
4521 } else if (type == L2CAP_IT_FIXED_CHAN) {
4522 u8 buf[12];
4523 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4524
4525 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4526 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4527 rsp->data[0] = conn->local_fixed_chan;
4528 memset(rsp->data + 1, 0, 7);
4529 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4530 buf);
4531 } else {
4532 struct l2cap_info_rsp rsp;
4533 rsp.type = cpu_to_le16(type);
4534 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4535 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4536 &rsp);
4537 }
4538
4539 return 0;
4540 }
4541
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4542 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4543 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4544 u8 *data)
4545 {
4546 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4547 u16 type, result;
4548
4549 if (cmd_len < sizeof(*rsp))
4550 return -EPROTO;
4551
4552 type = __le16_to_cpu(rsp->type);
4553 result = __le16_to_cpu(rsp->result);
4554
4555 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4556
4557 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4558 if (cmd->ident != conn->info_ident ||
4559 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4560 return 0;
4561
4562 cancel_delayed_work(&conn->info_timer);
4563
4564 if (result != L2CAP_IR_SUCCESS) {
4565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4566 conn->info_ident = 0;
4567
4568 l2cap_conn_start(conn);
4569
4570 return 0;
4571 }
4572
4573 switch (type) {
4574 case L2CAP_IT_FEAT_MASK:
4575 conn->feat_mask = get_unaligned_le32(rsp->data);
4576
4577 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4578 struct l2cap_info_req req;
4579 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4580
4581 conn->info_ident = l2cap_get_ident(conn);
4582
4583 l2cap_send_cmd(conn, conn->info_ident,
4584 L2CAP_INFO_REQ, sizeof(req), &req);
4585 } else {
4586 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4587 conn->info_ident = 0;
4588
4589 l2cap_conn_start(conn);
4590 }
4591 break;
4592
4593 case L2CAP_IT_FIXED_CHAN:
4594 conn->remote_fixed_chan = rsp->data[0];
4595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4596 conn->info_ident = 0;
4597
4598 l2cap_conn_start(conn);
4599 break;
4600 }
4601
4602 return 0;
4603 }
4604
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4605 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4606 struct l2cap_cmd_hdr *cmd,
4607 u16 cmd_len, u8 *data)
4608 {
4609 struct hci_conn *hcon = conn->hcon;
4610 struct l2cap_conn_param_update_req *req;
4611 struct l2cap_conn_param_update_rsp rsp;
4612 u16 min, max, latency, to_multiplier;
4613 int err;
4614
4615 if (hcon->role != HCI_ROLE_MASTER)
4616 return -EINVAL;
4617
4618 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4619 return -EPROTO;
4620
4621 req = (struct l2cap_conn_param_update_req *) data;
4622 min = __le16_to_cpu(req->min);
4623 max = __le16_to_cpu(req->max);
4624 latency = __le16_to_cpu(req->latency);
4625 to_multiplier = __le16_to_cpu(req->to_multiplier);
4626
4627 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4628 min, max, latency, to_multiplier);
4629
4630 memset(&rsp, 0, sizeof(rsp));
4631
4632 if (max > hcon->le_conn_max_interval) {
4633 BT_DBG("requested connection interval exceeds current bounds.");
4634 err = -EINVAL;
4635 } else {
4636 err = hci_check_conn_params(min, max, latency, to_multiplier);
4637 }
4638
4639 if (err)
4640 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4641 else
4642 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4643
4644 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4645 sizeof(rsp), &rsp);
4646
4647 if (!err) {
4648 u8 store_hint;
4649
4650 store_hint = hci_le_conn_update(hcon, min, max, latency,
4651 to_multiplier);
4652 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4653 store_hint, min, max, latency,
4654 to_multiplier);
4655
4656 }
4657
4658 return 0;
4659 }
4660
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4661 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4662 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4663 u8 *data)
4664 {
4665 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4666 struct hci_conn *hcon = conn->hcon;
4667 u16 dcid, mtu, mps, credits, result;
4668 struct l2cap_chan *chan;
4669 int err, sec_level;
4670
4671 if (cmd_len < sizeof(*rsp))
4672 return -EPROTO;
4673
4674 dcid = __le16_to_cpu(rsp->dcid);
4675 mtu = __le16_to_cpu(rsp->mtu);
4676 mps = __le16_to_cpu(rsp->mps);
4677 credits = __le16_to_cpu(rsp->credits);
4678 result = __le16_to_cpu(rsp->result);
4679
4680 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4681 dcid < L2CAP_CID_DYN_START ||
4682 dcid > L2CAP_CID_LE_DYN_END))
4683 return -EPROTO;
4684
4685 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4686 dcid, mtu, mps, credits, result);
4687
4688 mutex_lock(&conn->chan_lock);
4689
4690 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4691 if (!chan) {
4692 err = -EBADSLT;
4693 goto unlock;
4694 }
4695
4696 err = 0;
4697
4698 l2cap_chan_lock(chan);
4699
4700 switch (result) {
4701 case L2CAP_CR_LE_SUCCESS:
4702 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4703 err = -EBADSLT;
4704 break;
4705 }
4706
4707 chan->ident = 0;
4708 chan->dcid = dcid;
4709 chan->omtu = mtu;
4710 chan->remote_mps = mps;
4711 chan->tx_credits = credits;
4712 l2cap_chan_ready(chan);
4713 break;
4714
4715 case L2CAP_CR_LE_AUTHENTICATION:
4716 case L2CAP_CR_LE_ENCRYPTION:
4717 /* If we already have MITM protection we can't do
4718 * anything.
4719 */
4720 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4721 l2cap_chan_del(chan, ECONNREFUSED);
4722 break;
4723 }
4724
4725 sec_level = hcon->sec_level + 1;
4726 if (chan->sec_level < sec_level)
4727 chan->sec_level = sec_level;
4728
4729 /* We'll need to send a new Connect Request */
4730 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4731
4732 smp_conn_security(hcon, chan->sec_level);
4733 break;
4734
4735 default:
4736 l2cap_chan_del(chan, ECONNREFUSED);
4737 break;
4738 }
4739
4740 l2cap_chan_unlock(chan);
4741
4742 unlock:
4743 mutex_unlock(&conn->chan_lock);
4744
4745 return err;
4746 }
4747
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4748 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4749 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4750 u8 *data)
4751 {
4752 int err = 0;
4753
4754 switch (cmd->code) {
4755 case L2CAP_COMMAND_REJ:
4756 l2cap_command_rej(conn, cmd, cmd_len, data);
4757 break;
4758
4759 case L2CAP_CONN_REQ:
4760 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4761 break;
4762
4763 case L2CAP_CONN_RSP:
4764 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4765 break;
4766
4767 case L2CAP_CONF_REQ:
4768 err = l2cap_config_req(conn, cmd, cmd_len, data);
4769 break;
4770
4771 case L2CAP_CONF_RSP:
4772 l2cap_config_rsp(conn, cmd, cmd_len, data);
4773 break;
4774
4775 case L2CAP_DISCONN_REQ:
4776 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4777 break;
4778
4779 case L2CAP_DISCONN_RSP:
4780 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4781 break;
4782
4783 case L2CAP_ECHO_REQ:
4784 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4785 break;
4786
4787 case L2CAP_ECHO_RSP:
4788 break;
4789
4790 case L2CAP_INFO_REQ:
4791 err = l2cap_information_req(conn, cmd, cmd_len, data);
4792 break;
4793
4794 case L2CAP_INFO_RSP:
4795 l2cap_information_rsp(conn, cmd, cmd_len, data);
4796 break;
4797
4798 default:
4799 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4800 err = -EINVAL;
4801 break;
4802 }
4803
4804 return err;
4805 }
4806
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4807 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4808 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4809 u8 *data)
4810 {
4811 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4812 struct l2cap_le_conn_rsp rsp;
4813 struct l2cap_chan *chan, *pchan;
4814 u16 dcid, scid, credits, mtu, mps;
4815 __le16 psm;
4816 u8 result;
4817
4818 if (cmd_len != sizeof(*req))
4819 return -EPROTO;
4820
4821 scid = __le16_to_cpu(req->scid);
4822 mtu = __le16_to_cpu(req->mtu);
4823 mps = __le16_to_cpu(req->mps);
4824 psm = req->psm;
4825 dcid = 0;
4826 credits = 0;
4827
4828 if (mtu < 23 || mps < 23)
4829 return -EPROTO;
4830
4831 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4832 scid, mtu, mps);
4833
4834 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4835 * page 1059:
4836 *
4837 * Valid range: 0x0001-0x00ff
4838 *
4839 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4840 */
4841 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4842 result = L2CAP_CR_LE_BAD_PSM;
4843 chan = NULL;
4844 goto response;
4845 }
4846
4847 /* Check if we have socket listening on psm */
4848 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4849 &conn->hcon->dst, LE_LINK);
4850 if (!pchan) {
4851 result = L2CAP_CR_LE_BAD_PSM;
4852 chan = NULL;
4853 goto response;
4854 }
4855
4856 mutex_lock(&conn->chan_lock);
4857 l2cap_chan_lock(pchan);
4858
4859 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4860 SMP_ALLOW_STK)) {
4861 result = L2CAP_CR_LE_AUTHENTICATION;
4862 chan = NULL;
4863 goto response_unlock;
4864 }
4865
4866 /* Check for valid dynamic CID range */
4867 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4868 result = L2CAP_CR_LE_INVALID_SCID;
4869 chan = NULL;
4870 goto response_unlock;
4871 }
4872
4873 /* Check if we already have channel with that dcid */
4874 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4875 result = L2CAP_CR_LE_SCID_IN_USE;
4876 chan = NULL;
4877 goto response_unlock;
4878 }
4879
4880 chan = pchan->ops->new_connection(pchan);
4881 if (!chan) {
4882 result = L2CAP_CR_LE_NO_MEM;
4883 goto response_unlock;
4884 }
4885
4886 bacpy(&chan->src, &conn->hcon->src);
4887 bacpy(&chan->dst, &conn->hcon->dst);
4888 chan->src_type = bdaddr_src_type(conn->hcon);
4889 chan->dst_type = bdaddr_dst_type(conn->hcon);
4890 chan->psm = psm;
4891 chan->dcid = scid;
4892 chan->omtu = mtu;
4893 chan->remote_mps = mps;
4894
4895 __l2cap_chan_add(conn, chan);
4896
4897 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4898
4899 dcid = chan->scid;
4900 credits = chan->rx_credits;
4901
4902 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4903
4904 chan->ident = cmd->ident;
4905
4906 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4907 l2cap_state_change(chan, BT_CONNECT2);
4908 /* The following result value is actually not defined
4909 * for LE CoC but we use it to let the function know
4910 * that it should bail out after doing its cleanup
4911 * instead of sending a response.
4912 */
4913 result = L2CAP_CR_PEND;
4914 chan->ops->defer(chan);
4915 } else {
4916 l2cap_chan_ready(chan);
4917 result = L2CAP_CR_LE_SUCCESS;
4918 }
4919
4920 response_unlock:
4921 l2cap_chan_unlock(pchan);
4922 mutex_unlock(&conn->chan_lock);
4923 l2cap_chan_put(pchan);
4924
4925 if (result == L2CAP_CR_PEND)
4926 return 0;
4927
4928 response:
4929 if (chan) {
4930 rsp.mtu = cpu_to_le16(chan->imtu);
4931 rsp.mps = cpu_to_le16(chan->mps);
4932 } else {
4933 rsp.mtu = 0;
4934 rsp.mps = 0;
4935 }
4936
4937 rsp.dcid = cpu_to_le16(dcid);
4938 rsp.credits = cpu_to_le16(credits);
4939 rsp.result = cpu_to_le16(result);
4940
4941 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4942
4943 return 0;
4944 }
4945
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4946 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4948 u8 *data)
4949 {
4950 struct l2cap_le_credits *pkt;
4951 struct l2cap_chan *chan;
4952 u16 cid, credits, max_credits;
4953
4954 if (cmd_len != sizeof(*pkt))
4955 return -EPROTO;
4956
4957 pkt = (struct l2cap_le_credits *) data;
4958 cid = __le16_to_cpu(pkt->cid);
4959 credits = __le16_to_cpu(pkt->credits);
4960
4961 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4962
4963 chan = l2cap_get_chan_by_dcid(conn, cid);
4964 if (!chan)
4965 return -EBADSLT;
4966
4967 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4968 if (credits > max_credits) {
4969 BT_ERR("LE credits overflow");
4970 l2cap_send_disconn_req(chan, ECONNRESET);
4971
4972 /* Return 0 so that we don't trigger an unnecessary
4973 * command reject packet.
4974 */
4975 goto unlock;
4976 }
4977
4978 chan->tx_credits += credits;
4979
4980 /* Resume sending */
4981 l2cap_le_flowctl_send(chan);
4982
4983 if (chan->tx_credits)
4984 chan->ops->resume(chan);
4985
4986 unlock:
4987 l2cap_chan_unlock(chan);
4988 l2cap_chan_put(chan);
4989
4990 return 0;
4991 }
4992
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4993 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4994 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4995 u8 *data)
4996 {
4997 struct l2cap_ecred_conn_req *req = (void *) data;
4998 struct {
4999 struct l2cap_ecred_conn_rsp rsp;
5000 __le16 dcid[L2CAP_ECRED_MAX_CID];
5001 } __packed pdu;
5002 struct l2cap_chan *chan, *pchan;
5003 u16 mtu, mps;
5004 __le16 psm;
5005 u8 result, len = 0;
5006 int i, num_scid;
5007 bool defer = false;
5008
5009 if (!enable_ecred)
5010 return -EINVAL;
5011
5012 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5013 result = L2CAP_CR_LE_INVALID_PARAMS;
5014 goto response;
5015 }
5016
5017 cmd_len -= sizeof(*req);
5018 num_scid = cmd_len / sizeof(u16);
5019
5020 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5021 result = L2CAP_CR_LE_INVALID_PARAMS;
5022 goto response;
5023 }
5024
5025 mtu = __le16_to_cpu(req->mtu);
5026 mps = __le16_to_cpu(req->mps);
5027
5028 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5029 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5030 goto response;
5031 }
5032
5033 psm = req->psm;
5034
5035 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5036 * page 1059:
5037 *
5038 * Valid range: 0x0001-0x00ff
5039 *
5040 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5041 */
5042 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5043 result = L2CAP_CR_LE_BAD_PSM;
5044 goto response;
5045 }
5046
5047 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5048
5049 memset(&pdu, 0, sizeof(pdu));
5050
5051 /* Check if we have socket listening on psm */
5052 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5053 &conn->hcon->dst, LE_LINK);
5054 if (!pchan) {
5055 result = L2CAP_CR_LE_BAD_PSM;
5056 goto response;
5057 }
5058
5059 mutex_lock(&conn->chan_lock);
5060 l2cap_chan_lock(pchan);
5061
5062 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5063 SMP_ALLOW_STK)) {
5064 result = L2CAP_CR_LE_AUTHENTICATION;
5065 goto unlock;
5066 }
5067
5068 result = L2CAP_CR_LE_SUCCESS;
5069
5070 for (i = 0; i < num_scid; i++) {
5071 u16 scid = __le16_to_cpu(req->scid[i]);
5072
5073 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5074
5075 pdu.dcid[i] = 0x0000;
5076 len += sizeof(*pdu.dcid);
5077
5078 /* Check for valid dynamic CID range */
5079 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5080 result = L2CAP_CR_LE_INVALID_SCID;
5081 continue;
5082 }
5083
5084 /* Check if we already have channel with that dcid */
5085 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5086 result = L2CAP_CR_LE_SCID_IN_USE;
5087 continue;
5088 }
5089
5090 chan = pchan->ops->new_connection(pchan);
5091 if (!chan) {
5092 result = L2CAP_CR_LE_NO_MEM;
5093 continue;
5094 }
5095
5096 bacpy(&chan->src, &conn->hcon->src);
5097 bacpy(&chan->dst, &conn->hcon->dst);
5098 chan->src_type = bdaddr_src_type(conn->hcon);
5099 chan->dst_type = bdaddr_dst_type(conn->hcon);
5100 chan->psm = psm;
5101 chan->dcid = scid;
5102 chan->omtu = mtu;
5103 chan->remote_mps = mps;
5104
5105 __l2cap_chan_add(conn, chan);
5106
5107 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5108
5109 /* Init response */
5110 if (!pdu.rsp.credits) {
5111 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5112 pdu.rsp.mps = cpu_to_le16(chan->mps);
5113 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5114 }
5115
5116 pdu.dcid[i] = cpu_to_le16(chan->scid);
5117
5118 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5119
5120 chan->ident = cmd->ident;
5121 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5122
5123 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5124 l2cap_state_change(chan, BT_CONNECT2);
5125 defer = true;
5126 chan->ops->defer(chan);
5127 } else {
5128 l2cap_chan_ready(chan);
5129 }
5130 }
5131
5132 unlock:
5133 l2cap_chan_unlock(pchan);
5134 mutex_unlock(&conn->chan_lock);
5135 l2cap_chan_put(pchan);
5136
5137 response:
5138 pdu.rsp.result = cpu_to_le16(result);
5139
5140 if (defer)
5141 return 0;
5142
5143 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5144 sizeof(pdu.rsp) + len, &pdu);
5145
5146 return 0;
5147 }
5148
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5149 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5150 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5151 u8 *data)
5152 {
5153 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5154 struct hci_conn *hcon = conn->hcon;
5155 u16 mtu, mps, credits, result;
5156 struct l2cap_chan *chan, *tmp;
5157 int err = 0, sec_level;
5158 int i = 0;
5159
5160 if (cmd_len < sizeof(*rsp))
5161 return -EPROTO;
5162
5163 mtu = __le16_to_cpu(rsp->mtu);
5164 mps = __le16_to_cpu(rsp->mps);
5165 credits = __le16_to_cpu(rsp->credits);
5166 result = __le16_to_cpu(rsp->result);
5167
5168 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5169 result);
5170
5171 mutex_lock(&conn->chan_lock);
5172
5173 cmd_len -= sizeof(*rsp);
5174
5175 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5176 u16 dcid;
5177
5178 if (chan->ident != cmd->ident ||
5179 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5180 chan->state == BT_CONNECTED)
5181 continue;
5182
5183 l2cap_chan_lock(chan);
5184
5185 /* Check that there is a dcid for each pending channel */
5186 if (cmd_len < sizeof(dcid)) {
5187 l2cap_chan_del(chan, ECONNREFUSED);
5188 l2cap_chan_unlock(chan);
5189 continue;
5190 }
5191
5192 dcid = __le16_to_cpu(rsp->dcid[i++]);
5193 cmd_len -= sizeof(u16);
5194
5195 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5196
5197 /* Check if dcid is already in use */
5198 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5199 /* If a device receives a
5200 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5201 * already-assigned Destination CID, then both the
5202 * original channel and the new channel shall be
5203 * immediately discarded and not used.
5204 */
5205 l2cap_chan_del(chan, ECONNREFUSED);
5206 l2cap_chan_unlock(chan);
5207 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5208 l2cap_chan_lock(chan);
5209 l2cap_chan_del(chan, ECONNRESET);
5210 l2cap_chan_unlock(chan);
5211 continue;
5212 }
5213
5214 switch (result) {
5215 case L2CAP_CR_LE_AUTHENTICATION:
5216 case L2CAP_CR_LE_ENCRYPTION:
5217 /* If we already have MITM protection we can't do
5218 * anything.
5219 */
5220 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5221 l2cap_chan_del(chan, ECONNREFUSED);
5222 break;
5223 }
5224
5225 sec_level = hcon->sec_level + 1;
5226 if (chan->sec_level < sec_level)
5227 chan->sec_level = sec_level;
5228
5229 /* We'll need to send a new Connect Request */
5230 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5231
5232 smp_conn_security(hcon, chan->sec_level);
5233 break;
5234
5235 case L2CAP_CR_LE_BAD_PSM:
5236 l2cap_chan_del(chan, ECONNREFUSED);
5237 break;
5238
5239 default:
5240 /* If dcid was not set it means channels was refused */
5241 if (!dcid) {
5242 l2cap_chan_del(chan, ECONNREFUSED);
5243 break;
5244 }
5245
5246 chan->ident = 0;
5247 chan->dcid = dcid;
5248 chan->omtu = mtu;
5249 chan->remote_mps = mps;
5250 chan->tx_credits = credits;
5251 l2cap_chan_ready(chan);
5252 break;
5253 }
5254
5255 l2cap_chan_unlock(chan);
5256 }
5257
5258 mutex_unlock(&conn->chan_lock);
5259
5260 return err;
5261 }
5262
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5263 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5265 u8 *data)
5266 {
5267 struct l2cap_ecred_reconf_req *req = (void *) data;
5268 struct l2cap_ecred_reconf_rsp rsp;
5269 u16 mtu, mps, result;
5270 struct l2cap_chan *chan;
5271 int i, num_scid;
5272
5273 if (!enable_ecred)
5274 return -EINVAL;
5275
5276 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5277 result = L2CAP_CR_LE_INVALID_PARAMS;
5278 goto respond;
5279 }
5280
5281 mtu = __le16_to_cpu(req->mtu);
5282 mps = __le16_to_cpu(req->mps);
5283
5284 BT_DBG("mtu %u mps %u", mtu, mps);
5285
5286 if (mtu < L2CAP_ECRED_MIN_MTU) {
5287 result = L2CAP_RECONF_INVALID_MTU;
5288 goto respond;
5289 }
5290
5291 if (mps < L2CAP_ECRED_MIN_MPS) {
5292 result = L2CAP_RECONF_INVALID_MPS;
5293 goto respond;
5294 }
5295
5296 cmd_len -= sizeof(*req);
5297 num_scid = cmd_len / sizeof(u16);
5298 result = L2CAP_RECONF_SUCCESS;
5299
5300 for (i = 0; i < num_scid; i++) {
5301 u16 scid;
5302
5303 scid = __le16_to_cpu(req->scid[i]);
5304 if (!scid)
5305 return -EPROTO;
5306
5307 chan = __l2cap_get_chan_by_dcid(conn, scid);
5308 if (!chan)
5309 continue;
5310
5311 /* If the MTU value is decreased for any of the included
5312 * channels, then the receiver shall disconnect all
5313 * included channels.
5314 */
5315 if (chan->omtu > mtu) {
5316 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5317 chan->omtu, mtu);
5318 result = L2CAP_RECONF_INVALID_MTU;
5319 }
5320
5321 chan->omtu = mtu;
5322 chan->remote_mps = mps;
5323 }
5324
5325 respond:
5326 rsp.result = cpu_to_le16(result);
5327
5328 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5329 &rsp);
5330
5331 return 0;
5332 }
5333
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5334 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5335 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5336 u8 *data)
5337 {
5338 struct l2cap_chan *chan, *tmp;
5339 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5340 u16 result;
5341
5342 if (cmd_len < sizeof(*rsp))
5343 return -EPROTO;
5344
5345 result = __le16_to_cpu(rsp->result);
5346
5347 BT_DBG("result 0x%4.4x", rsp->result);
5348
5349 if (!result)
5350 return 0;
5351
5352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5353 if (chan->ident != cmd->ident)
5354 continue;
5355
5356 l2cap_chan_del(chan, ECONNRESET);
5357 }
5358
5359 return 0;
5360 }
5361
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5362 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5363 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5364 u8 *data)
5365 {
5366 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5367 struct l2cap_chan *chan;
5368
5369 if (cmd_len < sizeof(*rej))
5370 return -EPROTO;
5371
5372 mutex_lock(&conn->chan_lock);
5373
5374 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5375 if (!chan)
5376 goto done;
5377
5378 chan = l2cap_chan_hold_unless_zero(chan);
5379 if (!chan)
5380 goto done;
5381
5382 l2cap_chan_lock(chan);
5383 l2cap_chan_del(chan, ECONNREFUSED);
5384 l2cap_chan_unlock(chan);
5385 l2cap_chan_put(chan);
5386
5387 done:
5388 mutex_unlock(&conn->chan_lock);
5389 return 0;
5390 }
5391
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5392 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5393 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5394 u8 *data)
5395 {
5396 int err = 0;
5397
5398 switch (cmd->code) {
5399 case L2CAP_COMMAND_REJ:
5400 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5401 break;
5402
5403 case L2CAP_CONN_PARAM_UPDATE_REQ:
5404 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5405 break;
5406
5407 case L2CAP_CONN_PARAM_UPDATE_RSP:
5408 break;
5409
5410 case L2CAP_LE_CONN_RSP:
5411 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5412 break;
5413
5414 case L2CAP_LE_CONN_REQ:
5415 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5416 break;
5417
5418 case L2CAP_LE_CREDITS:
5419 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5420 break;
5421
5422 case L2CAP_ECRED_CONN_REQ:
5423 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5424 break;
5425
5426 case L2CAP_ECRED_CONN_RSP:
5427 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5428 break;
5429
5430 case L2CAP_ECRED_RECONF_REQ:
5431 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5432 break;
5433
5434 case L2CAP_ECRED_RECONF_RSP:
5435 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5436 break;
5437
5438 case L2CAP_DISCONN_REQ:
5439 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5440 break;
5441
5442 case L2CAP_DISCONN_RSP:
5443 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5444 break;
5445
5446 default:
5447 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5448 err = -EINVAL;
5449 break;
5450 }
5451
5452 return err;
5453 }
5454
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5455 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5456 struct sk_buff *skb)
5457 {
5458 struct hci_conn *hcon = conn->hcon;
5459 struct l2cap_cmd_hdr *cmd;
5460 u16 len;
5461 int err;
5462
5463 if (hcon->type != LE_LINK)
5464 goto drop;
5465
5466 if (skb->len < L2CAP_CMD_HDR_SIZE)
5467 goto drop;
5468
5469 cmd = (void *) skb->data;
5470 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5471
5472 len = le16_to_cpu(cmd->len);
5473
5474 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5475
5476 if (len != skb->len || !cmd->ident) {
5477 BT_DBG("corrupted command");
5478 goto drop;
5479 }
5480
5481 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5482 if (err) {
5483 struct l2cap_cmd_rej_unk rej;
5484
5485 BT_ERR("Wrong link type (%d)", err);
5486
5487 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5488 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5489 sizeof(rej), &rej);
5490 }
5491
5492 drop:
5493 kfree_skb(skb);
5494 }
5495
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5496 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5497 {
5498 struct l2cap_cmd_rej_unk rej;
5499
5500 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5501 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5502 }
5503
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5504 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5505 struct sk_buff *skb)
5506 {
5507 struct hci_conn *hcon = conn->hcon;
5508 struct l2cap_cmd_hdr *cmd;
5509 int err;
5510
5511 l2cap_raw_recv(conn, skb);
5512
5513 if (hcon->type != ACL_LINK)
5514 goto drop;
5515
5516 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5517 u16 len;
5518
5519 cmd = (void *) skb->data;
5520 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5521
5522 len = le16_to_cpu(cmd->len);
5523
5524 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5525 cmd->ident);
5526
5527 if (len > skb->len || !cmd->ident) {
5528 BT_DBG("corrupted command");
5529 l2cap_sig_send_rej(conn, cmd->ident);
5530 skb_pull(skb, len > skb->len ? skb->len : len);
5531 continue;
5532 }
5533
5534 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5535 if (err) {
5536 BT_ERR("Wrong link type (%d)", err);
5537 l2cap_sig_send_rej(conn, cmd->ident);
5538 }
5539
5540 skb_pull(skb, len);
5541 }
5542
5543 if (skb->len > 0) {
5544 BT_DBG("corrupted command");
5545 l2cap_sig_send_rej(conn, 0);
5546 }
5547
5548 drop:
5549 kfree_skb(skb);
5550 }
5551
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5552 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5553 {
5554 u16 our_fcs, rcv_fcs;
5555 int hdr_size;
5556
5557 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5558 hdr_size = L2CAP_EXT_HDR_SIZE;
5559 else
5560 hdr_size = L2CAP_ENH_HDR_SIZE;
5561
5562 if (chan->fcs == L2CAP_FCS_CRC16) {
5563 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5564 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5565 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5566
5567 if (our_fcs != rcv_fcs)
5568 return -EBADMSG;
5569 }
5570 return 0;
5571 }
5572
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5573 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5574 {
5575 struct l2cap_ctrl control;
5576
5577 BT_DBG("chan %p", chan);
5578
5579 memset(&control, 0, sizeof(control));
5580 control.sframe = 1;
5581 control.final = 1;
5582 control.reqseq = chan->buffer_seq;
5583 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5584
5585 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5586 control.super = L2CAP_SUPER_RNR;
5587 l2cap_send_sframe(chan, &control);
5588 }
5589
5590 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5591 chan->unacked_frames > 0)
5592 __set_retrans_timer(chan);
5593
5594 /* Send pending iframes */
5595 l2cap_ertm_send(chan);
5596
5597 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5598 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5599 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5600 * send it now.
5601 */
5602 control.super = L2CAP_SUPER_RR;
5603 l2cap_send_sframe(chan, &control);
5604 }
5605 }
5606
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5607 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5608 struct sk_buff **last_frag)
5609 {
5610 /* skb->len reflects data in skb as well as all fragments
5611 * skb->data_len reflects only data in fragments
5612 */
5613 if (!skb_has_frag_list(skb))
5614 skb_shinfo(skb)->frag_list = new_frag;
5615
5616 new_frag->next = NULL;
5617
5618 (*last_frag)->next = new_frag;
5619 *last_frag = new_frag;
5620
5621 skb->len += new_frag->len;
5622 skb->data_len += new_frag->len;
5623 skb->truesize += new_frag->truesize;
5624 }
5625
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5626 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5627 struct l2cap_ctrl *control)
5628 {
5629 int err = -EINVAL;
5630
5631 switch (control->sar) {
5632 case L2CAP_SAR_UNSEGMENTED:
5633 if (chan->sdu)
5634 break;
5635
5636 err = chan->ops->recv(chan, skb);
5637 break;
5638
5639 case L2CAP_SAR_START:
5640 if (chan->sdu)
5641 break;
5642
5643 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5644 break;
5645
5646 chan->sdu_len = get_unaligned_le16(skb->data);
5647 skb_pull(skb, L2CAP_SDULEN_SIZE);
5648
5649 if (chan->sdu_len > chan->imtu) {
5650 err = -EMSGSIZE;
5651 break;
5652 }
5653
5654 if (skb->len >= chan->sdu_len)
5655 break;
5656
5657 chan->sdu = skb;
5658 chan->sdu_last_frag = skb;
5659
5660 skb = NULL;
5661 err = 0;
5662 break;
5663
5664 case L2CAP_SAR_CONTINUE:
5665 if (!chan->sdu)
5666 break;
5667
5668 append_skb_frag(chan->sdu, skb,
5669 &chan->sdu_last_frag);
5670 skb = NULL;
5671
5672 if (chan->sdu->len >= chan->sdu_len)
5673 break;
5674
5675 err = 0;
5676 break;
5677
5678 case L2CAP_SAR_END:
5679 if (!chan->sdu)
5680 break;
5681
5682 append_skb_frag(chan->sdu, skb,
5683 &chan->sdu_last_frag);
5684 skb = NULL;
5685
5686 if (chan->sdu->len != chan->sdu_len)
5687 break;
5688
5689 err = chan->ops->recv(chan, chan->sdu);
5690
5691 if (!err) {
5692 /* Reassembly complete */
5693 chan->sdu = NULL;
5694 chan->sdu_last_frag = NULL;
5695 chan->sdu_len = 0;
5696 }
5697 break;
5698 }
5699
5700 if (err) {
5701 kfree_skb(skb);
5702 kfree_skb(chan->sdu);
5703 chan->sdu = NULL;
5704 chan->sdu_last_frag = NULL;
5705 chan->sdu_len = 0;
5706 }
5707
5708 return err;
5709 }
5710
l2cap_resegment(struct l2cap_chan * chan)5711 static int l2cap_resegment(struct l2cap_chan *chan)
5712 {
5713 /* Placeholder */
5714 return 0;
5715 }
5716
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5717 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5718 {
5719 u8 event;
5720
5721 if (chan->mode != L2CAP_MODE_ERTM)
5722 return;
5723
5724 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5725 l2cap_tx(chan, NULL, NULL, event);
5726 }
5727
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5728 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5729 {
5730 int err = 0;
5731 /* Pass sequential frames to l2cap_reassemble_sdu()
5732 * until a gap is encountered.
5733 */
5734
5735 BT_DBG("chan %p", chan);
5736
5737 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5738 struct sk_buff *skb;
5739 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5740 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5741
5742 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5743
5744 if (!skb)
5745 break;
5746
5747 skb_unlink(skb, &chan->srej_q);
5748 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5749 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5750 if (err)
5751 break;
5752 }
5753
5754 if (skb_queue_empty(&chan->srej_q)) {
5755 chan->rx_state = L2CAP_RX_STATE_RECV;
5756 l2cap_send_ack(chan);
5757 }
5758
5759 return err;
5760 }
5761
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5762 static void l2cap_handle_srej(struct l2cap_chan *chan,
5763 struct l2cap_ctrl *control)
5764 {
5765 struct sk_buff *skb;
5766
5767 BT_DBG("chan %p, control %p", chan, control);
5768
5769 if (control->reqseq == chan->next_tx_seq) {
5770 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5771 l2cap_send_disconn_req(chan, ECONNRESET);
5772 return;
5773 }
5774
5775 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5776
5777 if (skb == NULL) {
5778 BT_DBG("Seq %d not available for retransmission",
5779 control->reqseq);
5780 return;
5781 }
5782
5783 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5784 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5785 l2cap_send_disconn_req(chan, ECONNRESET);
5786 return;
5787 }
5788
5789 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5790
5791 if (control->poll) {
5792 l2cap_pass_to_tx(chan, control);
5793
5794 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5795 l2cap_retransmit(chan, control);
5796 l2cap_ertm_send(chan);
5797
5798 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5799 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5800 chan->srej_save_reqseq = control->reqseq;
5801 }
5802 } else {
5803 l2cap_pass_to_tx_fbit(chan, control);
5804
5805 if (control->final) {
5806 if (chan->srej_save_reqseq != control->reqseq ||
5807 !test_and_clear_bit(CONN_SREJ_ACT,
5808 &chan->conn_state))
5809 l2cap_retransmit(chan, control);
5810 } else {
5811 l2cap_retransmit(chan, control);
5812 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5813 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5814 chan->srej_save_reqseq = control->reqseq;
5815 }
5816 }
5817 }
5818 }
5819
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5820 static void l2cap_handle_rej(struct l2cap_chan *chan,
5821 struct l2cap_ctrl *control)
5822 {
5823 struct sk_buff *skb;
5824
5825 BT_DBG("chan %p, control %p", chan, control);
5826
5827 if (control->reqseq == chan->next_tx_seq) {
5828 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5829 l2cap_send_disconn_req(chan, ECONNRESET);
5830 return;
5831 }
5832
5833 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5834
5835 if (chan->max_tx && skb &&
5836 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5837 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5838 l2cap_send_disconn_req(chan, ECONNRESET);
5839 return;
5840 }
5841
5842 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5843
5844 l2cap_pass_to_tx(chan, control);
5845
5846 if (control->final) {
5847 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5848 l2cap_retransmit_all(chan, control);
5849 } else {
5850 l2cap_retransmit_all(chan, control);
5851 l2cap_ertm_send(chan);
5852 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5853 set_bit(CONN_REJ_ACT, &chan->conn_state);
5854 }
5855 }
5856
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5857 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5858 {
5859 BT_DBG("chan %p, txseq %d", chan, txseq);
5860
5861 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5862 chan->expected_tx_seq);
5863
5864 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5865 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5866 chan->tx_win) {
5867 /* See notes below regarding "double poll" and
5868 * invalid packets.
5869 */
5870 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5871 BT_DBG("Invalid/Ignore - after SREJ");
5872 return L2CAP_TXSEQ_INVALID_IGNORE;
5873 } else {
5874 BT_DBG("Invalid - in window after SREJ sent");
5875 return L2CAP_TXSEQ_INVALID;
5876 }
5877 }
5878
5879 if (chan->srej_list.head == txseq) {
5880 BT_DBG("Expected SREJ");
5881 return L2CAP_TXSEQ_EXPECTED_SREJ;
5882 }
5883
5884 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5885 BT_DBG("Duplicate SREJ - txseq already stored");
5886 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5887 }
5888
5889 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5890 BT_DBG("Unexpected SREJ - not requested");
5891 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5892 }
5893 }
5894
5895 if (chan->expected_tx_seq == txseq) {
5896 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5897 chan->tx_win) {
5898 BT_DBG("Invalid - txseq outside tx window");
5899 return L2CAP_TXSEQ_INVALID;
5900 } else {
5901 BT_DBG("Expected");
5902 return L2CAP_TXSEQ_EXPECTED;
5903 }
5904 }
5905
5906 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5907 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5908 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5909 return L2CAP_TXSEQ_DUPLICATE;
5910 }
5911
5912 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5913 /* A source of invalid packets is a "double poll" condition,
5914 * where delays cause us to send multiple poll packets. If
5915 * the remote stack receives and processes both polls,
5916 * sequence numbers can wrap around in such a way that a
5917 * resent frame has a sequence number that looks like new data
5918 * with a sequence gap. This would trigger an erroneous SREJ
5919 * request.
5920 *
5921 * Fortunately, this is impossible with a tx window that's
5922 * less than half of the maximum sequence number, which allows
5923 * invalid frames to be safely ignored.
5924 *
5925 * With tx window sizes greater than half of the tx window
5926 * maximum, the frame is invalid and cannot be ignored. This
5927 * causes a disconnect.
5928 */
5929
5930 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5931 BT_DBG("Invalid/Ignore - txseq outside tx window");
5932 return L2CAP_TXSEQ_INVALID_IGNORE;
5933 } else {
5934 BT_DBG("Invalid - txseq outside tx window");
5935 return L2CAP_TXSEQ_INVALID;
5936 }
5937 } else {
5938 BT_DBG("Unexpected - txseq indicates missing frames");
5939 return L2CAP_TXSEQ_UNEXPECTED;
5940 }
5941 }
5942
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5943 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5944 struct l2cap_ctrl *control,
5945 struct sk_buff *skb, u8 event)
5946 {
5947 struct l2cap_ctrl local_control;
5948 int err = 0;
5949 bool skb_in_use = false;
5950
5951 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5952 event);
5953
5954 switch (event) {
5955 case L2CAP_EV_RECV_IFRAME:
5956 switch (l2cap_classify_txseq(chan, control->txseq)) {
5957 case L2CAP_TXSEQ_EXPECTED:
5958 l2cap_pass_to_tx(chan, control);
5959
5960 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5961 BT_DBG("Busy, discarding expected seq %d",
5962 control->txseq);
5963 break;
5964 }
5965
5966 chan->expected_tx_seq = __next_seq(chan,
5967 control->txseq);
5968
5969 chan->buffer_seq = chan->expected_tx_seq;
5970 skb_in_use = true;
5971
5972 /* l2cap_reassemble_sdu may free skb, hence invalidate
5973 * control, so make a copy in advance to use it after
5974 * l2cap_reassemble_sdu returns and to avoid the race
5975 * condition, for example:
5976 *
5977 * The current thread calls:
5978 * l2cap_reassemble_sdu
5979 * chan->ops->recv == l2cap_sock_recv_cb
5980 * __sock_queue_rcv_skb
5981 * Another thread calls:
5982 * bt_sock_recvmsg
5983 * skb_recv_datagram
5984 * skb_free_datagram
5985 * Then the current thread tries to access control, but
5986 * it was freed by skb_free_datagram.
5987 */
5988 local_control = *control;
5989 err = l2cap_reassemble_sdu(chan, skb, control);
5990 if (err)
5991 break;
5992
5993 if (local_control.final) {
5994 if (!test_and_clear_bit(CONN_REJ_ACT,
5995 &chan->conn_state)) {
5996 local_control.final = 0;
5997 l2cap_retransmit_all(chan, &local_control);
5998 l2cap_ertm_send(chan);
5999 }
6000 }
6001
6002 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6003 l2cap_send_ack(chan);
6004 break;
6005 case L2CAP_TXSEQ_UNEXPECTED:
6006 l2cap_pass_to_tx(chan, control);
6007
6008 /* Can't issue SREJ frames in the local busy state.
6009 * Drop this frame, it will be seen as missing
6010 * when local busy is exited.
6011 */
6012 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6013 BT_DBG("Busy, discarding unexpected seq %d",
6014 control->txseq);
6015 break;
6016 }
6017
6018 /* There was a gap in the sequence, so an SREJ
6019 * must be sent for each missing frame. The
6020 * current frame is stored for later use.
6021 */
6022 skb_queue_tail(&chan->srej_q, skb);
6023 skb_in_use = true;
6024 BT_DBG("Queued %p (queue len %d)", skb,
6025 skb_queue_len(&chan->srej_q));
6026
6027 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6028 l2cap_seq_list_clear(&chan->srej_list);
6029 l2cap_send_srej(chan, control->txseq);
6030
6031 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6032 break;
6033 case L2CAP_TXSEQ_DUPLICATE:
6034 l2cap_pass_to_tx(chan, control);
6035 break;
6036 case L2CAP_TXSEQ_INVALID_IGNORE:
6037 break;
6038 case L2CAP_TXSEQ_INVALID:
6039 default:
6040 l2cap_send_disconn_req(chan, ECONNRESET);
6041 break;
6042 }
6043 break;
6044 case L2CAP_EV_RECV_RR:
6045 l2cap_pass_to_tx(chan, control);
6046 if (control->final) {
6047 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6048
6049 if (!test_and_clear_bit(CONN_REJ_ACT,
6050 &chan->conn_state)) {
6051 control->final = 0;
6052 l2cap_retransmit_all(chan, control);
6053 }
6054
6055 l2cap_ertm_send(chan);
6056 } else if (control->poll) {
6057 l2cap_send_i_or_rr_or_rnr(chan);
6058 } else {
6059 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6060 &chan->conn_state) &&
6061 chan->unacked_frames)
6062 __set_retrans_timer(chan);
6063
6064 l2cap_ertm_send(chan);
6065 }
6066 break;
6067 case L2CAP_EV_RECV_RNR:
6068 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6069 l2cap_pass_to_tx(chan, control);
6070 if (control && control->poll) {
6071 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6072 l2cap_send_rr_or_rnr(chan, 0);
6073 }
6074 __clear_retrans_timer(chan);
6075 l2cap_seq_list_clear(&chan->retrans_list);
6076 break;
6077 case L2CAP_EV_RECV_REJ:
6078 l2cap_handle_rej(chan, control);
6079 break;
6080 case L2CAP_EV_RECV_SREJ:
6081 l2cap_handle_srej(chan, control);
6082 break;
6083 default:
6084 break;
6085 }
6086
6087 if (skb && !skb_in_use) {
6088 BT_DBG("Freeing %p", skb);
6089 kfree_skb(skb);
6090 }
6091
6092 return err;
6093 }
6094
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6095 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6096 struct l2cap_ctrl *control,
6097 struct sk_buff *skb, u8 event)
6098 {
6099 int err = 0;
6100 u16 txseq = control->txseq;
6101 bool skb_in_use = false;
6102
6103 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6104 event);
6105
6106 switch (event) {
6107 case L2CAP_EV_RECV_IFRAME:
6108 switch (l2cap_classify_txseq(chan, txseq)) {
6109 case L2CAP_TXSEQ_EXPECTED:
6110 /* Keep frame for reassembly later */
6111 l2cap_pass_to_tx(chan, control);
6112 skb_queue_tail(&chan->srej_q, skb);
6113 skb_in_use = true;
6114 BT_DBG("Queued %p (queue len %d)", skb,
6115 skb_queue_len(&chan->srej_q));
6116
6117 chan->expected_tx_seq = __next_seq(chan, txseq);
6118 break;
6119 case L2CAP_TXSEQ_EXPECTED_SREJ:
6120 l2cap_seq_list_pop(&chan->srej_list);
6121
6122 l2cap_pass_to_tx(chan, control);
6123 skb_queue_tail(&chan->srej_q, skb);
6124 skb_in_use = true;
6125 BT_DBG("Queued %p (queue len %d)", skb,
6126 skb_queue_len(&chan->srej_q));
6127
6128 err = l2cap_rx_queued_iframes(chan);
6129 if (err)
6130 break;
6131
6132 break;
6133 case L2CAP_TXSEQ_UNEXPECTED:
6134 /* Got a frame that can't be reassembled yet.
6135 * Save it for later, and send SREJs to cover
6136 * the missing frames.
6137 */
6138 skb_queue_tail(&chan->srej_q, skb);
6139 skb_in_use = true;
6140 BT_DBG("Queued %p (queue len %d)", skb,
6141 skb_queue_len(&chan->srej_q));
6142
6143 l2cap_pass_to_tx(chan, control);
6144 l2cap_send_srej(chan, control->txseq);
6145 break;
6146 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6147 /* This frame was requested with an SREJ, but
6148 * some expected retransmitted frames are
6149 * missing. Request retransmission of missing
6150 * SREJ'd frames.
6151 */
6152 skb_queue_tail(&chan->srej_q, skb);
6153 skb_in_use = true;
6154 BT_DBG("Queued %p (queue len %d)", skb,
6155 skb_queue_len(&chan->srej_q));
6156
6157 l2cap_pass_to_tx(chan, control);
6158 l2cap_send_srej_list(chan, control->txseq);
6159 break;
6160 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6161 /* We've already queued this frame. Drop this copy. */
6162 l2cap_pass_to_tx(chan, control);
6163 break;
6164 case L2CAP_TXSEQ_DUPLICATE:
6165 /* Expecting a later sequence number, so this frame
6166 * was already received. Ignore it completely.
6167 */
6168 break;
6169 case L2CAP_TXSEQ_INVALID_IGNORE:
6170 break;
6171 case L2CAP_TXSEQ_INVALID:
6172 default:
6173 l2cap_send_disconn_req(chan, ECONNRESET);
6174 break;
6175 }
6176 break;
6177 case L2CAP_EV_RECV_RR:
6178 l2cap_pass_to_tx(chan, control);
6179 if (control->final) {
6180 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6181
6182 if (!test_and_clear_bit(CONN_REJ_ACT,
6183 &chan->conn_state)) {
6184 control->final = 0;
6185 l2cap_retransmit_all(chan, control);
6186 }
6187
6188 l2cap_ertm_send(chan);
6189 } else if (control->poll) {
6190 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6191 &chan->conn_state) &&
6192 chan->unacked_frames) {
6193 __set_retrans_timer(chan);
6194 }
6195
6196 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6197 l2cap_send_srej_tail(chan);
6198 } else {
6199 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6200 &chan->conn_state) &&
6201 chan->unacked_frames)
6202 __set_retrans_timer(chan);
6203
6204 l2cap_send_ack(chan);
6205 }
6206 break;
6207 case L2CAP_EV_RECV_RNR:
6208 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6209 l2cap_pass_to_tx(chan, control);
6210 if (control->poll) {
6211 l2cap_send_srej_tail(chan);
6212 } else {
6213 struct l2cap_ctrl rr_control;
6214 memset(&rr_control, 0, sizeof(rr_control));
6215 rr_control.sframe = 1;
6216 rr_control.super = L2CAP_SUPER_RR;
6217 rr_control.reqseq = chan->buffer_seq;
6218 l2cap_send_sframe(chan, &rr_control);
6219 }
6220
6221 break;
6222 case L2CAP_EV_RECV_REJ:
6223 l2cap_handle_rej(chan, control);
6224 break;
6225 case L2CAP_EV_RECV_SREJ:
6226 l2cap_handle_srej(chan, control);
6227 break;
6228 }
6229
6230 if (skb && !skb_in_use) {
6231 BT_DBG("Freeing %p", skb);
6232 kfree_skb(skb);
6233 }
6234
6235 return err;
6236 }
6237
l2cap_finish_move(struct l2cap_chan * chan)6238 static int l2cap_finish_move(struct l2cap_chan *chan)
6239 {
6240 BT_DBG("chan %p", chan);
6241
6242 chan->rx_state = L2CAP_RX_STATE_RECV;
6243 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6244
6245 return l2cap_resegment(chan);
6246 }
6247
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6248 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6249 struct l2cap_ctrl *control,
6250 struct sk_buff *skb, u8 event)
6251 {
6252 int err;
6253
6254 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6255 event);
6256
6257 if (!control->poll)
6258 return -EPROTO;
6259
6260 l2cap_process_reqseq(chan, control->reqseq);
6261
6262 if (!skb_queue_empty(&chan->tx_q))
6263 chan->tx_send_head = skb_peek(&chan->tx_q);
6264 else
6265 chan->tx_send_head = NULL;
6266
6267 /* Rewind next_tx_seq to the point expected
6268 * by the receiver.
6269 */
6270 chan->next_tx_seq = control->reqseq;
6271 chan->unacked_frames = 0;
6272
6273 err = l2cap_finish_move(chan);
6274 if (err)
6275 return err;
6276
6277 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6278 l2cap_send_i_or_rr_or_rnr(chan);
6279
6280 if (event == L2CAP_EV_RECV_IFRAME)
6281 return -EPROTO;
6282
6283 return l2cap_rx_state_recv(chan, control, NULL, event);
6284 }
6285
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6286 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6287 struct l2cap_ctrl *control,
6288 struct sk_buff *skb, u8 event)
6289 {
6290 int err;
6291
6292 if (!control->final)
6293 return -EPROTO;
6294
6295 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6296
6297 chan->rx_state = L2CAP_RX_STATE_RECV;
6298 l2cap_process_reqseq(chan, control->reqseq);
6299
6300 if (!skb_queue_empty(&chan->tx_q))
6301 chan->tx_send_head = skb_peek(&chan->tx_q);
6302 else
6303 chan->tx_send_head = NULL;
6304
6305 /* Rewind next_tx_seq to the point expected
6306 * by the receiver.
6307 */
6308 chan->next_tx_seq = control->reqseq;
6309 chan->unacked_frames = 0;
6310 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6311
6312 err = l2cap_resegment(chan);
6313
6314 if (!err)
6315 err = l2cap_rx_state_recv(chan, control, skb, event);
6316
6317 return err;
6318 }
6319
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6320 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6321 {
6322 /* Make sure reqseq is for a packet that has been sent but not acked */
6323 u16 unacked;
6324
6325 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6326 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6327 }
6328
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6329 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6330 struct sk_buff *skb, u8 event)
6331 {
6332 int err = 0;
6333
6334 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6335 control, skb, event, chan->rx_state);
6336
6337 if (__valid_reqseq(chan, control->reqseq)) {
6338 switch (chan->rx_state) {
6339 case L2CAP_RX_STATE_RECV:
6340 err = l2cap_rx_state_recv(chan, control, skb, event);
6341 break;
6342 case L2CAP_RX_STATE_SREJ_SENT:
6343 err = l2cap_rx_state_srej_sent(chan, control, skb,
6344 event);
6345 break;
6346 case L2CAP_RX_STATE_WAIT_P:
6347 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6348 break;
6349 case L2CAP_RX_STATE_WAIT_F:
6350 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6351 break;
6352 default:
6353 /* shut it down */
6354 break;
6355 }
6356 } else {
6357 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6358 control->reqseq, chan->next_tx_seq,
6359 chan->expected_ack_seq);
6360 l2cap_send_disconn_req(chan, ECONNRESET);
6361 }
6362
6363 return err;
6364 }
6365
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6366 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6367 struct sk_buff *skb)
6368 {
6369 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6370 * the txseq field in advance to use it after l2cap_reassemble_sdu
6371 * returns and to avoid the race condition, for example:
6372 *
6373 * The current thread calls:
6374 * l2cap_reassemble_sdu
6375 * chan->ops->recv == l2cap_sock_recv_cb
6376 * __sock_queue_rcv_skb
6377 * Another thread calls:
6378 * bt_sock_recvmsg
6379 * skb_recv_datagram
6380 * skb_free_datagram
6381 * Then the current thread tries to access control, but it was freed by
6382 * skb_free_datagram.
6383 */
6384 u16 txseq = control->txseq;
6385
6386 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6387 chan->rx_state);
6388
6389 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6390 l2cap_pass_to_tx(chan, control);
6391
6392 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6393 __next_seq(chan, chan->buffer_seq));
6394
6395 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6396
6397 l2cap_reassemble_sdu(chan, skb, control);
6398 } else {
6399 if (chan->sdu) {
6400 kfree_skb(chan->sdu);
6401 chan->sdu = NULL;
6402 }
6403 chan->sdu_last_frag = NULL;
6404 chan->sdu_len = 0;
6405
6406 if (skb) {
6407 BT_DBG("Freeing %p", skb);
6408 kfree_skb(skb);
6409 }
6410 }
6411
6412 chan->last_acked_seq = txseq;
6413 chan->expected_tx_seq = __next_seq(chan, txseq);
6414
6415 return 0;
6416 }
6417
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6418 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6419 {
6420 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6421 u16 len;
6422 u8 event;
6423
6424 __unpack_control(chan, skb);
6425
6426 len = skb->len;
6427
6428 /*
6429 * We can just drop the corrupted I-frame here.
6430 * Receiver will miss it and start proper recovery
6431 * procedures and ask for retransmission.
6432 */
6433 if (l2cap_check_fcs(chan, skb))
6434 goto drop;
6435
6436 if (!control->sframe && control->sar == L2CAP_SAR_START)
6437 len -= L2CAP_SDULEN_SIZE;
6438
6439 if (chan->fcs == L2CAP_FCS_CRC16)
6440 len -= L2CAP_FCS_SIZE;
6441
6442 if (len > chan->mps) {
6443 l2cap_send_disconn_req(chan, ECONNRESET);
6444 goto drop;
6445 }
6446
6447 if (chan->ops->filter) {
6448 if (chan->ops->filter(chan, skb))
6449 goto drop;
6450 }
6451
6452 if (!control->sframe) {
6453 int err;
6454
6455 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6456 control->sar, control->reqseq, control->final,
6457 control->txseq);
6458
6459 /* Validate F-bit - F=0 always valid, F=1 only
6460 * valid in TX WAIT_F
6461 */
6462 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6463 goto drop;
6464
6465 if (chan->mode != L2CAP_MODE_STREAMING) {
6466 event = L2CAP_EV_RECV_IFRAME;
6467 err = l2cap_rx(chan, control, skb, event);
6468 } else {
6469 err = l2cap_stream_rx(chan, control, skb);
6470 }
6471
6472 if (err)
6473 l2cap_send_disconn_req(chan, ECONNRESET);
6474 } else {
6475 const u8 rx_func_to_event[4] = {
6476 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6477 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6478 };
6479
6480 /* Only I-frames are expected in streaming mode */
6481 if (chan->mode == L2CAP_MODE_STREAMING)
6482 goto drop;
6483
6484 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6485 control->reqseq, control->final, control->poll,
6486 control->super);
6487
6488 if (len != 0) {
6489 BT_ERR("Trailing bytes: %d in sframe", len);
6490 l2cap_send_disconn_req(chan, ECONNRESET);
6491 goto drop;
6492 }
6493
6494 /* Validate F and P bits */
6495 if (control->final && (control->poll ||
6496 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6497 goto drop;
6498
6499 event = rx_func_to_event[control->super];
6500 if (l2cap_rx(chan, control, skb, event))
6501 l2cap_send_disconn_req(chan, ECONNRESET);
6502 }
6503
6504 return 0;
6505
6506 drop:
6507 kfree_skb(skb);
6508 return 0;
6509 }
6510
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6511 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6512 {
6513 struct l2cap_conn *conn = chan->conn;
6514 struct l2cap_le_credits pkt;
6515 u16 return_credits;
6516
6517 return_credits = (chan->imtu / chan->mps) + 1;
6518
6519 if (chan->rx_credits >= return_credits)
6520 return;
6521
6522 return_credits -= chan->rx_credits;
6523
6524 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6525
6526 chan->rx_credits += return_credits;
6527
6528 pkt.cid = cpu_to_le16(chan->scid);
6529 pkt.credits = cpu_to_le16(return_credits);
6530
6531 chan->ident = l2cap_get_ident(conn);
6532
6533 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6534 }
6535
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6536 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6537 {
6538 int err;
6539
6540 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6541
6542 /* Wait recv to confirm reception before updating the credits */
6543 err = chan->ops->recv(chan, skb);
6544
6545 /* Update credits whenever an SDU is received */
6546 l2cap_chan_le_send_credits(chan);
6547
6548 return err;
6549 }
6550
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6551 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6552 {
6553 int err;
6554
6555 if (!chan->rx_credits) {
6556 BT_ERR("No credits to receive LE L2CAP data");
6557 l2cap_send_disconn_req(chan, ECONNRESET);
6558 return -ENOBUFS;
6559 }
6560
6561 if (chan->imtu < skb->len) {
6562 BT_ERR("Too big LE L2CAP PDU");
6563 return -ENOBUFS;
6564 }
6565
6566 chan->rx_credits--;
6567 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6568
6569 /* Update if remote had run out of credits, this should only happens
6570 * if the remote is not using the entire MPS.
6571 */
6572 if (!chan->rx_credits)
6573 l2cap_chan_le_send_credits(chan);
6574
6575 err = 0;
6576
6577 if (!chan->sdu) {
6578 u16 sdu_len;
6579
6580 sdu_len = get_unaligned_le16(skb->data);
6581 skb_pull(skb, L2CAP_SDULEN_SIZE);
6582
6583 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6584 sdu_len, skb->len, chan->imtu);
6585
6586 if (sdu_len > chan->imtu) {
6587 BT_ERR("Too big LE L2CAP SDU length received");
6588 err = -EMSGSIZE;
6589 goto failed;
6590 }
6591
6592 if (skb->len > sdu_len) {
6593 BT_ERR("Too much LE L2CAP data received");
6594 err = -EINVAL;
6595 goto failed;
6596 }
6597
6598 if (skb->len == sdu_len)
6599 return l2cap_ecred_recv(chan, skb);
6600
6601 chan->sdu = skb;
6602 chan->sdu_len = sdu_len;
6603 chan->sdu_last_frag = skb;
6604
6605 /* Detect if remote is not able to use the selected MPS */
6606 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6607 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6608
6609 /* Adjust the number of credits */
6610 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6611 chan->mps = mps_len;
6612 l2cap_chan_le_send_credits(chan);
6613 }
6614
6615 return 0;
6616 }
6617
6618 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6619 chan->sdu->len, skb->len, chan->sdu_len);
6620
6621 if (chan->sdu->len + skb->len > chan->sdu_len) {
6622 BT_ERR("Too much LE L2CAP data received");
6623 err = -EINVAL;
6624 goto failed;
6625 }
6626
6627 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6628 skb = NULL;
6629
6630 if (chan->sdu->len == chan->sdu_len) {
6631 err = l2cap_ecred_recv(chan, chan->sdu);
6632 if (!err) {
6633 chan->sdu = NULL;
6634 chan->sdu_last_frag = NULL;
6635 chan->sdu_len = 0;
6636 }
6637 }
6638
6639 failed:
6640 if (err) {
6641 kfree_skb(skb);
6642 kfree_skb(chan->sdu);
6643 chan->sdu = NULL;
6644 chan->sdu_last_frag = NULL;
6645 chan->sdu_len = 0;
6646 }
6647
6648 /* We can't return an error here since we took care of the skb
6649 * freeing internally. An error return would cause the caller to
6650 * do a double-free of the skb.
6651 */
6652 return 0;
6653 }
6654
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6655 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6656 struct sk_buff *skb)
6657 {
6658 struct l2cap_chan *chan;
6659
6660 chan = l2cap_get_chan_by_scid(conn, cid);
6661 if (!chan) {
6662 BT_DBG("unknown cid 0x%4.4x", cid);
6663 /* Drop packet and return */
6664 kfree_skb(skb);
6665 return;
6666 }
6667
6668 BT_DBG("chan %p, len %d", chan, skb->len);
6669
6670 /* If we receive data on a fixed channel before the info req/rsp
6671 * procedure is done simply assume that the channel is supported
6672 * and mark it as ready.
6673 */
6674 if (chan->chan_type == L2CAP_CHAN_FIXED)
6675 l2cap_chan_ready(chan);
6676
6677 if (chan->state != BT_CONNECTED)
6678 goto drop;
6679
6680 switch (chan->mode) {
6681 case L2CAP_MODE_LE_FLOWCTL:
6682 case L2CAP_MODE_EXT_FLOWCTL:
6683 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6684 goto drop;
6685
6686 goto done;
6687
6688 case L2CAP_MODE_BASIC:
6689 /* If socket recv buffers overflows we drop data here
6690 * which is *bad* because L2CAP has to be reliable.
6691 * But we don't have any other choice. L2CAP doesn't
6692 * provide flow control mechanism. */
6693
6694 if (chan->imtu < skb->len) {
6695 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6696 goto drop;
6697 }
6698
6699 if (!chan->ops->recv(chan, skb))
6700 goto done;
6701 break;
6702
6703 case L2CAP_MODE_ERTM:
6704 case L2CAP_MODE_STREAMING:
6705 l2cap_data_rcv(chan, skb);
6706 goto done;
6707
6708 default:
6709 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6710 break;
6711 }
6712
6713 drop:
6714 kfree_skb(skb);
6715
6716 done:
6717 l2cap_chan_unlock(chan);
6718 l2cap_chan_put(chan);
6719 }
6720
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6721 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6722 struct sk_buff *skb)
6723 {
6724 struct hci_conn *hcon = conn->hcon;
6725 struct l2cap_chan *chan;
6726
6727 if (hcon->type != ACL_LINK)
6728 goto free_skb;
6729
6730 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6731 ACL_LINK);
6732 if (!chan)
6733 goto free_skb;
6734
6735 BT_DBG("chan %p, len %d", chan, skb->len);
6736
6737 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6738 goto drop;
6739
6740 if (chan->imtu < skb->len)
6741 goto drop;
6742
6743 /* Store remote BD_ADDR and PSM for msg_name */
6744 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6745 bt_cb(skb)->l2cap.psm = psm;
6746
6747 if (!chan->ops->recv(chan, skb)) {
6748 l2cap_chan_put(chan);
6749 return;
6750 }
6751
6752 drop:
6753 l2cap_chan_put(chan);
6754 free_skb:
6755 kfree_skb(skb);
6756 }
6757
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6758 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6759 {
6760 struct l2cap_hdr *lh = (void *) skb->data;
6761 struct hci_conn *hcon = conn->hcon;
6762 u16 cid, len;
6763 __le16 psm;
6764
6765 if (hcon->state != BT_CONNECTED) {
6766 BT_DBG("queueing pending rx skb");
6767 skb_queue_tail(&conn->pending_rx, skb);
6768 return;
6769 }
6770
6771 skb_pull(skb, L2CAP_HDR_SIZE);
6772 cid = __le16_to_cpu(lh->cid);
6773 len = __le16_to_cpu(lh->len);
6774
6775 if (len != skb->len) {
6776 kfree_skb(skb);
6777 return;
6778 }
6779
6780 /* Since we can't actively block incoming LE connections we must
6781 * at least ensure that we ignore incoming data from them.
6782 */
6783 if (hcon->type == LE_LINK &&
6784 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6785 bdaddr_dst_type(hcon))) {
6786 kfree_skb(skb);
6787 return;
6788 }
6789
6790 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6791
6792 switch (cid) {
6793 case L2CAP_CID_SIGNALING:
6794 l2cap_sig_channel(conn, skb);
6795 break;
6796
6797 case L2CAP_CID_CONN_LESS:
6798 psm = get_unaligned((__le16 *) skb->data);
6799 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6800 l2cap_conless_channel(conn, psm, skb);
6801 break;
6802
6803 case L2CAP_CID_LE_SIGNALING:
6804 l2cap_le_sig_channel(conn, skb);
6805 break;
6806
6807 default:
6808 l2cap_data_channel(conn, cid, skb);
6809 break;
6810 }
6811 }
6812
process_pending_rx(struct work_struct * work)6813 static void process_pending_rx(struct work_struct *work)
6814 {
6815 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6816 pending_rx_work);
6817 struct sk_buff *skb;
6818
6819 BT_DBG("");
6820
6821 while ((skb = skb_dequeue(&conn->pending_rx)))
6822 l2cap_recv_frame(conn, skb);
6823 }
6824
l2cap_conn_add(struct hci_conn * hcon)6825 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6826 {
6827 struct l2cap_conn *conn = hcon->l2cap_data;
6828 struct hci_chan *hchan;
6829
6830 if (conn)
6831 return conn;
6832
6833 hchan = hci_chan_create(hcon);
6834 if (!hchan)
6835 return NULL;
6836
6837 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6838 if (!conn) {
6839 hci_chan_del(hchan);
6840 return NULL;
6841 }
6842
6843 kref_init(&conn->ref);
6844 hcon->l2cap_data = conn;
6845 conn->hcon = hci_conn_get(hcon);
6846 conn->hchan = hchan;
6847
6848 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6849
6850 switch (hcon->type) {
6851 case LE_LINK:
6852 if (hcon->hdev->le_mtu) {
6853 conn->mtu = hcon->hdev->le_mtu;
6854 break;
6855 }
6856 fallthrough;
6857 default:
6858 conn->mtu = hcon->hdev->acl_mtu;
6859 break;
6860 }
6861
6862 conn->feat_mask = 0;
6863
6864 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6865
6866 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6867 (bredr_sc_enabled(hcon->hdev) ||
6868 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6869 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6870
6871 mutex_init(&conn->ident_lock);
6872 mutex_init(&conn->chan_lock);
6873
6874 INIT_LIST_HEAD(&conn->chan_l);
6875 INIT_LIST_HEAD(&conn->users);
6876
6877 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6878
6879 skb_queue_head_init(&conn->pending_rx);
6880 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6881 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6882
6883 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6884
6885 return conn;
6886 }
6887
is_valid_psm(u16 psm,u8 dst_type)6888 static bool is_valid_psm(u16 psm, u8 dst_type)
6889 {
6890 if (!psm)
6891 return false;
6892
6893 if (bdaddr_type_is_le(dst_type))
6894 return (psm <= 0x00ff);
6895
6896 /* PSM must be odd and lsb of upper byte must be 0 */
6897 return ((psm & 0x0101) == 0x0001);
6898 }
6899
6900 struct l2cap_chan_data {
6901 struct l2cap_chan *chan;
6902 struct pid *pid;
6903 int count;
6904 };
6905
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6906 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6907 {
6908 struct l2cap_chan_data *d = data;
6909 struct pid *pid;
6910
6911 if (chan == d->chan)
6912 return;
6913
6914 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6915 return;
6916
6917 pid = chan->ops->get_peer_pid(chan);
6918
6919 /* Only count deferred channels with the same PID/PSM */
6920 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6921 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6922 return;
6923
6924 d->count++;
6925 }
6926
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)6927 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6928 bdaddr_t *dst, u8 dst_type)
6929 {
6930 struct l2cap_conn *conn;
6931 struct hci_conn *hcon;
6932 struct hci_dev *hdev;
6933 int err;
6934
6935 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6936 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6937
6938 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6939 if (!hdev)
6940 return -EHOSTUNREACH;
6941
6942 hci_dev_lock(hdev);
6943
6944 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6945 chan->chan_type != L2CAP_CHAN_RAW) {
6946 err = -EINVAL;
6947 goto done;
6948 }
6949
6950 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6951 err = -EINVAL;
6952 goto done;
6953 }
6954
6955 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6956 err = -EINVAL;
6957 goto done;
6958 }
6959
6960 switch (chan->mode) {
6961 case L2CAP_MODE_BASIC:
6962 break;
6963 case L2CAP_MODE_LE_FLOWCTL:
6964 break;
6965 case L2CAP_MODE_EXT_FLOWCTL:
6966 if (!enable_ecred) {
6967 err = -EOPNOTSUPP;
6968 goto done;
6969 }
6970 break;
6971 case L2CAP_MODE_ERTM:
6972 case L2CAP_MODE_STREAMING:
6973 if (!disable_ertm)
6974 break;
6975 fallthrough;
6976 default:
6977 err = -EOPNOTSUPP;
6978 goto done;
6979 }
6980
6981 switch (chan->state) {
6982 case BT_CONNECT:
6983 case BT_CONNECT2:
6984 case BT_CONFIG:
6985 /* Already connecting */
6986 err = 0;
6987 goto done;
6988
6989 case BT_CONNECTED:
6990 /* Already connected */
6991 err = -EISCONN;
6992 goto done;
6993
6994 case BT_OPEN:
6995 case BT_BOUND:
6996 /* Can connect */
6997 break;
6998
6999 default:
7000 err = -EBADFD;
7001 goto done;
7002 }
7003
7004 /* Set destination address and psm */
7005 bacpy(&chan->dst, dst);
7006 chan->dst_type = dst_type;
7007
7008 chan->psm = psm;
7009 chan->dcid = cid;
7010
7011 if (bdaddr_type_is_le(dst_type)) {
7012 /* Convert from L2CAP channel address type to HCI address type
7013 */
7014 if (dst_type == BDADDR_LE_PUBLIC)
7015 dst_type = ADDR_LE_DEV_PUBLIC;
7016 else
7017 dst_type = ADDR_LE_DEV_RANDOM;
7018
7019 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7020 hcon = hci_connect_le(hdev, dst, dst_type, false,
7021 chan->sec_level,
7022 HCI_LE_CONN_TIMEOUT,
7023 HCI_ROLE_SLAVE);
7024 else
7025 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7026 chan->sec_level,
7027 HCI_LE_CONN_TIMEOUT,
7028 CONN_REASON_L2CAP_CHAN);
7029
7030 } else {
7031 u8 auth_type = l2cap_get_auth_type(chan);
7032 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7033 CONN_REASON_L2CAP_CHAN);
7034 }
7035
7036 if (IS_ERR(hcon)) {
7037 err = PTR_ERR(hcon);
7038 goto done;
7039 }
7040
7041 conn = l2cap_conn_add(hcon);
7042 if (!conn) {
7043 hci_conn_drop(hcon);
7044 err = -ENOMEM;
7045 goto done;
7046 }
7047
7048 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7049 struct l2cap_chan_data data;
7050
7051 data.chan = chan;
7052 data.pid = chan->ops->get_peer_pid(chan);
7053 data.count = 1;
7054
7055 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7056
7057 /* Check if there isn't too many channels being connected */
7058 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7059 hci_conn_drop(hcon);
7060 err = -EPROTO;
7061 goto done;
7062 }
7063 }
7064
7065 mutex_lock(&conn->chan_lock);
7066 l2cap_chan_lock(chan);
7067
7068 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7069 hci_conn_drop(hcon);
7070 err = -EBUSY;
7071 goto chan_unlock;
7072 }
7073
7074 /* Update source addr of the socket */
7075 bacpy(&chan->src, &hcon->src);
7076 chan->src_type = bdaddr_src_type(hcon);
7077
7078 __l2cap_chan_add(conn, chan);
7079
7080 /* l2cap_chan_add takes its own ref so we can drop this one */
7081 hci_conn_drop(hcon);
7082
7083 l2cap_state_change(chan, BT_CONNECT);
7084 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7085
7086 /* Release chan->sport so that it can be reused by other
7087 * sockets (as it's only used for listening sockets).
7088 */
7089 write_lock(&chan_list_lock);
7090 chan->sport = 0;
7091 write_unlock(&chan_list_lock);
7092
7093 if (hcon->state == BT_CONNECTED) {
7094 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7095 __clear_chan_timer(chan);
7096 if (l2cap_chan_check_security(chan, true))
7097 l2cap_state_change(chan, BT_CONNECTED);
7098 } else
7099 l2cap_do_start(chan);
7100 }
7101
7102 err = 0;
7103
7104 chan_unlock:
7105 l2cap_chan_unlock(chan);
7106 mutex_unlock(&conn->chan_lock);
7107 done:
7108 hci_dev_unlock(hdev);
7109 hci_dev_put(hdev);
7110 return err;
7111 }
7112 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7113
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7114 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7115 {
7116 struct l2cap_conn *conn = chan->conn;
7117 struct {
7118 struct l2cap_ecred_reconf_req req;
7119 __le16 scid;
7120 } pdu;
7121
7122 pdu.req.mtu = cpu_to_le16(chan->imtu);
7123 pdu.req.mps = cpu_to_le16(chan->mps);
7124 pdu.scid = cpu_to_le16(chan->scid);
7125
7126 chan->ident = l2cap_get_ident(conn);
7127
7128 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7129 sizeof(pdu), &pdu);
7130 }
7131
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7132 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7133 {
7134 if (chan->imtu > mtu)
7135 return -EINVAL;
7136
7137 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7138
7139 chan->imtu = mtu;
7140
7141 l2cap_ecred_reconfigure(chan);
7142
7143 return 0;
7144 }
7145
7146 /* ---- L2CAP interface with lower layer (HCI) ---- */
7147
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7148 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7149 {
7150 int exact = 0, lm1 = 0, lm2 = 0;
7151 struct l2cap_chan *c;
7152
7153 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7154
7155 /* Find listening sockets and check their link_mode */
7156 read_lock(&chan_list_lock);
7157 list_for_each_entry(c, &chan_list, global_l) {
7158 if (c->state != BT_LISTEN)
7159 continue;
7160
7161 if (!bacmp(&c->src, &hdev->bdaddr)) {
7162 lm1 |= HCI_LM_ACCEPT;
7163 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7164 lm1 |= HCI_LM_MASTER;
7165 exact++;
7166 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7167 lm2 |= HCI_LM_ACCEPT;
7168 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7169 lm2 |= HCI_LM_MASTER;
7170 }
7171 }
7172 read_unlock(&chan_list_lock);
7173
7174 return exact ? lm1 : lm2;
7175 }
7176
7177 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7178 * from an existing channel in the list or from the beginning of the
7179 * global list (by passing NULL as first parameter).
7180 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7181 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7182 struct hci_conn *hcon)
7183 {
7184 u8 src_type = bdaddr_src_type(hcon);
7185
7186 read_lock(&chan_list_lock);
7187
7188 if (c)
7189 c = list_next_entry(c, global_l);
7190 else
7191 c = list_entry(chan_list.next, typeof(*c), global_l);
7192
7193 list_for_each_entry_from(c, &chan_list, global_l) {
7194 if (c->chan_type != L2CAP_CHAN_FIXED)
7195 continue;
7196 if (c->state != BT_LISTEN)
7197 continue;
7198 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7199 continue;
7200 if (src_type != c->src_type)
7201 continue;
7202
7203 c = l2cap_chan_hold_unless_zero(c);
7204 read_unlock(&chan_list_lock);
7205 return c;
7206 }
7207
7208 read_unlock(&chan_list_lock);
7209
7210 return NULL;
7211 }
7212
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7213 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7214 {
7215 struct hci_dev *hdev = hcon->hdev;
7216 struct l2cap_conn *conn;
7217 struct l2cap_chan *pchan;
7218 u8 dst_type;
7219
7220 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7221 return;
7222
7223 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7224
7225 if (status) {
7226 l2cap_conn_del(hcon, bt_to_errno(status));
7227 return;
7228 }
7229
7230 conn = l2cap_conn_add(hcon);
7231 if (!conn)
7232 return;
7233
7234 dst_type = bdaddr_dst_type(hcon);
7235
7236 /* If device is blocked, do not create channels for it */
7237 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7238 return;
7239
7240 /* Find fixed channels and notify them of the new connection. We
7241 * use multiple individual lookups, continuing each time where
7242 * we left off, because the list lock would prevent calling the
7243 * potentially sleeping l2cap_chan_lock() function.
7244 */
7245 pchan = l2cap_global_fixed_chan(NULL, hcon);
7246 while (pchan) {
7247 struct l2cap_chan *chan, *next;
7248
7249 /* Client fixed channels should override server ones */
7250 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7251 goto next;
7252
7253 l2cap_chan_lock(pchan);
7254 chan = pchan->ops->new_connection(pchan);
7255 if (chan) {
7256 bacpy(&chan->src, &hcon->src);
7257 bacpy(&chan->dst, &hcon->dst);
7258 chan->src_type = bdaddr_src_type(hcon);
7259 chan->dst_type = dst_type;
7260
7261 __l2cap_chan_add(conn, chan);
7262 }
7263
7264 l2cap_chan_unlock(pchan);
7265 next:
7266 next = l2cap_global_fixed_chan(pchan, hcon);
7267 l2cap_chan_put(pchan);
7268 pchan = next;
7269 }
7270
7271 l2cap_conn_ready(conn);
7272 }
7273
l2cap_disconn_ind(struct hci_conn * hcon)7274 int l2cap_disconn_ind(struct hci_conn *hcon)
7275 {
7276 struct l2cap_conn *conn = hcon->l2cap_data;
7277
7278 BT_DBG("hcon %p", hcon);
7279
7280 if (!conn)
7281 return HCI_ERROR_REMOTE_USER_TERM;
7282 return conn->disc_reason;
7283 }
7284
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7285 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7286 {
7287 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7288 return;
7289
7290 BT_DBG("hcon %p reason %d", hcon, reason);
7291
7292 l2cap_conn_del(hcon, bt_to_errno(reason));
7293 }
7294
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7295 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7296 {
7297 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7298 return;
7299
7300 if (encrypt == 0x00) {
7301 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7302 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7303 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7304 chan->sec_level == BT_SECURITY_FIPS)
7305 l2cap_chan_close(chan, ECONNREFUSED);
7306 } else {
7307 if (chan->sec_level == BT_SECURITY_MEDIUM)
7308 __clear_chan_timer(chan);
7309 }
7310 }
7311
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7312 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7313 {
7314 struct l2cap_conn *conn = hcon->l2cap_data;
7315 struct l2cap_chan *chan;
7316
7317 if (!conn)
7318 return;
7319
7320 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7321
7322 mutex_lock(&conn->chan_lock);
7323
7324 list_for_each_entry(chan, &conn->chan_l, list) {
7325 l2cap_chan_lock(chan);
7326
7327 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7328 state_to_string(chan->state));
7329
7330 if (!status && encrypt)
7331 chan->sec_level = hcon->sec_level;
7332
7333 if (!__l2cap_no_conn_pending(chan)) {
7334 l2cap_chan_unlock(chan);
7335 continue;
7336 }
7337
7338 if (!status && (chan->state == BT_CONNECTED ||
7339 chan->state == BT_CONFIG)) {
7340 chan->ops->resume(chan);
7341 l2cap_check_encryption(chan, encrypt);
7342 l2cap_chan_unlock(chan);
7343 continue;
7344 }
7345
7346 if (chan->state == BT_CONNECT) {
7347 if (!status && l2cap_check_enc_key_size(hcon))
7348 l2cap_start_connection(chan);
7349 else
7350 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7351 } else if (chan->state == BT_CONNECT2 &&
7352 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7353 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7354 struct l2cap_conn_rsp rsp;
7355 __u16 res, stat;
7356
7357 if (!status && l2cap_check_enc_key_size(hcon)) {
7358 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7359 res = L2CAP_CR_PEND;
7360 stat = L2CAP_CS_AUTHOR_PEND;
7361 chan->ops->defer(chan);
7362 } else {
7363 l2cap_state_change(chan, BT_CONFIG);
7364 res = L2CAP_CR_SUCCESS;
7365 stat = L2CAP_CS_NO_INFO;
7366 }
7367 } else {
7368 l2cap_state_change(chan, BT_DISCONN);
7369 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7370 res = L2CAP_CR_SEC_BLOCK;
7371 stat = L2CAP_CS_NO_INFO;
7372 }
7373
7374 rsp.scid = cpu_to_le16(chan->dcid);
7375 rsp.dcid = cpu_to_le16(chan->scid);
7376 rsp.result = cpu_to_le16(res);
7377 rsp.status = cpu_to_le16(stat);
7378 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7379 sizeof(rsp), &rsp);
7380
7381 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7382 res == L2CAP_CR_SUCCESS) {
7383 char buf[128];
7384 set_bit(CONF_REQ_SENT, &chan->conf_state);
7385 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7386 L2CAP_CONF_REQ,
7387 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7388 buf);
7389 chan->num_conf_req++;
7390 }
7391 }
7392
7393 l2cap_chan_unlock(chan);
7394 }
7395
7396 mutex_unlock(&conn->chan_lock);
7397 }
7398
7399 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7400 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7401 u16 len)
7402 {
7403 if (!conn->rx_skb) {
7404 /* Allocate skb for the complete frame (with header) */
7405 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7406 if (!conn->rx_skb)
7407 return -ENOMEM;
7408 /* Init rx_len */
7409 conn->rx_len = len;
7410 }
7411
7412 /* Copy as much as the rx_skb can hold */
7413 len = min_t(u16, len, skb->len);
7414 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7415 skb_pull(skb, len);
7416 conn->rx_len -= len;
7417
7418 return len;
7419 }
7420
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7421 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7422 {
7423 struct sk_buff *rx_skb;
7424 int len;
7425
7426 /* Append just enough to complete the header */
7427 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7428
7429 /* If header could not be read just continue */
7430 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7431 return len;
7432
7433 rx_skb = conn->rx_skb;
7434 len = get_unaligned_le16(rx_skb->data);
7435
7436 /* Check if rx_skb has enough space to received all fragments */
7437 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7438 /* Update expected len */
7439 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7440 return L2CAP_LEN_SIZE;
7441 }
7442
7443 /* Reset conn->rx_skb since it will need to be reallocated in order to
7444 * fit all fragments.
7445 */
7446 conn->rx_skb = NULL;
7447
7448 /* Reallocates rx_skb using the exact expected length */
7449 len = l2cap_recv_frag(conn, rx_skb,
7450 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7451 kfree_skb(rx_skb);
7452
7453 return len;
7454 }
7455
l2cap_recv_reset(struct l2cap_conn * conn)7456 static void l2cap_recv_reset(struct l2cap_conn *conn)
7457 {
7458 kfree_skb(conn->rx_skb);
7459 conn->rx_skb = NULL;
7460 conn->rx_len = 0;
7461 }
7462
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7463 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7464 {
7465 struct l2cap_conn *conn = hcon->l2cap_data;
7466 int len;
7467
7468 /* For AMP controller do not create l2cap conn */
7469 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7470 goto drop;
7471
7472 if (!conn)
7473 conn = l2cap_conn_add(hcon);
7474
7475 if (!conn)
7476 goto drop;
7477
7478 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7479
7480 switch (flags) {
7481 case ACL_START:
7482 case ACL_START_NO_FLUSH:
7483 case ACL_COMPLETE:
7484 if (conn->rx_skb) {
7485 BT_ERR("Unexpected start frame (len %d)", skb->len);
7486 l2cap_recv_reset(conn);
7487 l2cap_conn_unreliable(conn, ECOMM);
7488 }
7489
7490 /* Start fragment may not contain the L2CAP length so just
7491 * copy the initial byte when that happens and use conn->mtu as
7492 * expected length.
7493 */
7494 if (skb->len < L2CAP_LEN_SIZE) {
7495 l2cap_recv_frag(conn, skb, conn->mtu);
7496 break;
7497 }
7498
7499 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7500
7501 if (len == skb->len) {
7502 /* Complete frame received */
7503 l2cap_recv_frame(conn, skb);
7504 return;
7505 }
7506
7507 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7508
7509 if (skb->len > len) {
7510 BT_ERR("Frame is too long (len %u, expected len %d)",
7511 skb->len, len);
7512 l2cap_conn_unreliable(conn, ECOMM);
7513 goto drop;
7514 }
7515
7516 /* Append fragment into frame (with header) */
7517 if (l2cap_recv_frag(conn, skb, len) < 0)
7518 goto drop;
7519
7520 break;
7521
7522 case ACL_CONT:
7523 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7524
7525 if (!conn->rx_skb) {
7526 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7527 l2cap_conn_unreliable(conn, ECOMM);
7528 goto drop;
7529 }
7530
7531 /* Complete the L2CAP length if it has not been read */
7532 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7533 if (l2cap_recv_len(conn, skb) < 0) {
7534 l2cap_conn_unreliable(conn, ECOMM);
7535 goto drop;
7536 }
7537
7538 /* Header still could not be read just continue */
7539 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7540 break;
7541 }
7542
7543 if (skb->len > conn->rx_len) {
7544 BT_ERR("Fragment is too long (len %u, expected %u)",
7545 skb->len, conn->rx_len);
7546 l2cap_recv_reset(conn);
7547 l2cap_conn_unreliable(conn, ECOMM);
7548 goto drop;
7549 }
7550
7551 /* Append fragment into frame (with header) */
7552 l2cap_recv_frag(conn, skb, skb->len);
7553
7554 if (!conn->rx_len) {
7555 /* Complete frame received. l2cap_recv_frame
7556 * takes ownership of the skb so set the global
7557 * rx_skb pointer to NULL first.
7558 */
7559 struct sk_buff *rx_skb = conn->rx_skb;
7560 conn->rx_skb = NULL;
7561 l2cap_recv_frame(conn, rx_skb);
7562 }
7563 break;
7564 }
7565
7566 drop:
7567 kfree_skb(skb);
7568 }
7569
7570 static struct hci_cb l2cap_cb = {
7571 .name = "L2CAP",
7572 .connect_cfm = l2cap_connect_cfm,
7573 .disconn_cfm = l2cap_disconn_cfm,
7574 .security_cfm = l2cap_security_cfm,
7575 };
7576
l2cap_debugfs_show(struct seq_file * f,void * p)7577 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7578 {
7579 struct l2cap_chan *c;
7580
7581 read_lock(&chan_list_lock);
7582
7583 list_for_each_entry(c, &chan_list, global_l) {
7584 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7585 &c->src, c->src_type, &c->dst, c->dst_type,
7586 c->state, __le16_to_cpu(c->psm),
7587 c->scid, c->dcid, c->imtu, c->omtu,
7588 c->sec_level, c->mode);
7589 }
7590
7591 read_unlock(&chan_list_lock);
7592
7593 return 0;
7594 }
7595
7596 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7597
7598 static struct dentry *l2cap_debugfs;
7599
l2cap_init(void)7600 int __init l2cap_init(void)
7601 {
7602 int err;
7603
7604 err = l2cap_init_sockets();
7605 if (err < 0)
7606 return err;
7607
7608 hci_register_cb(&l2cap_cb);
7609
7610 if (IS_ERR_OR_NULL(bt_debugfs))
7611 return 0;
7612
7613 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7614 NULL, &l2cap_debugfs_fops);
7615
7616 return 0;
7617 }
7618
l2cap_exit(void)7619 void l2cap_exit(void)
7620 {
7621 debugfs_remove(l2cap_debugfs);
7622 hci_unregister_cb(&l2cap_cb);
7623 l2cap_cleanup_sockets();
7624 }
7625
7626 module_param(disable_ertm, bool, 0644);
7627 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7628
7629 module_param(enable_ecred, bool, 0644);
7630 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7631