1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 /* Get HCI device by index.
66 * Device is held on return. */
hci_dev_get(int index)67 struct hci_dev *hci_dev_get(int index)
68 {
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 break;
81 }
82 }
83 read_unlock(&hci_dev_list_lock);
84 return hdev;
85 }
86
87 /* ---- Inquiry support ---- */
88
hci_discovery_active(struct hci_dev * hdev)89 bool hci_discovery_active(struct hci_dev *hdev)
90 {
91 struct discovery_state *discov = &hdev->discovery;
92
93 switch (discov->state) {
94 case DISCOVERY_FINDING:
95 case DISCOVERY_RESOLVING:
96 return true;
97
98 default:
99 return false;
100 }
101 }
102
hci_discovery_set_state(struct hci_dev * hdev,int state)103 void hci_discovery_set_state(struct hci_dev *hdev, int state)
104 {
105 int old_state = hdev->discovery.state;
106
107 if (old_state == state)
108 return;
109
110 hdev->discovery.state = state;
111
112 switch (state) {
113 case DISCOVERY_STOPPED:
114 hci_update_passive_scan(hdev);
115
116 if (old_state != DISCOVERY_STARTING)
117 mgmt_discovering(hdev, 0);
118 break;
119 case DISCOVERY_STARTING:
120 break;
121 case DISCOVERY_FINDING:
122 mgmt_discovering(hdev, 1);
123 break;
124 case DISCOVERY_RESOLVING:
125 break;
126 case DISCOVERY_STOPPING:
127 break;
128 }
129
130 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131 }
132
hci_inquiry_cache_flush(struct hci_dev * hdev)133 void hci_inquiry_cache_flush(struct hci_dev *hdev)
134 {
135 struct discovery_state *cache = &hdev->discovery;
136 struct inquiry_entry *p, *n;
137
138 list_for_each_entry_safe(p, n, &cache->all, all) {
139 list_del(&p->all);
140 kfree(p);
141 }
142
143 INIT_LIST_HEAD(&cache->unknown);
144 INIT_LIST_HEAD(&cache->resolve);
145 }
146
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)147 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148 bdaddr_t *bdaddr)
149 {
150 struct discovery_state *cache = &hdev->discovery;
151 struct inquiry_entry *e;
152
153 BT_DBG("cache %p, %pMR", cache, bdaddr);
154
155 list_for_each_entry(e, &cache->all, all) {
156 if (!bacmp(&e->data.bdaddr, bdaddr))
157 return e;
158 }
159
160 return NULL;
161 }
162
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)163 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164 bdaddr_t *bdaddr)
165 {
166 struct discovery_state *cache = &hdev->discovery;
167 struct inquiry_entry *e;
168
169 BT_DBG("cache %p, %pMR", cache, bdaddr);
170
171 list_for_each_entry(e, &cache->unknown, list) {
172 if (!bacmp(&e->data.bdaddr, bdaddr))
173 return e;
174 }
175
176 return NULL;
177 }
178
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)179 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180 bdaddr_t *bdaddr,
181 int state)
182 {
183 struct discovery_state *cache = &hdev->discovery;
184 struct inquiry_entry *e;
185
186 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187
188 list_for_each_entry(e, &cache->resolve, list) {
189 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
190 return e;
191 if (!bacmp(&e->data.bdaddr, bdaddr))
192 return e;
193 }
194
195 return NULL;
196 }
197
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)198 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199 struct inquiry_entry *ie)
200 {
201 struct discovery_state *cache = &hdev->discovery;
202 struct list_head *pos = &cache->resolve;
203 struct inquiry_entry *p;
204
205 list_del(&ie->list);
206
207 list_for_each_entry(p, &cache->resolve, list) {
208 if (p->name_state != NAME_PENDING &&
209 abs(p->data.rssi) >= abs(ie->data.rssi))
210 break;
211 pos = &p->list;
212 }
213
214 list_add(&ie->list, pos);
215 }
216
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)217 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218 bool name_known)
219 {
220 struct discovery_state *cache = &hdev->discovery;
221 struct inquiry_entry *ie;
222 u32 flags = 0;
223
224 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225
226 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227
228 if (!data->ssp_mode)
229 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230
231 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232 if (ie) {
233 if (!ie->data.ssp_mode)
234 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235
236 if (ie->name_state == NAME_NEEDED &&
237 data->rssi != ie->data.rssi) {
238 ie->data.rssi = data->rssi;
239 hci_inquiry_cache_update_resolve(hdev, ie);
240 }
241
242 goto update;
243 }
244
245 /* Entry not in the cache. Add new one. */
246 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247 if (!ie) {
248 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249 goto done;
250 }
251
252 list_add(&ie->all, &cache->all);
253
254 if (name_known) {
255 ie->name_state = NAME_KNOWN;
256 } else {
257 ie->name_state = NAME_NOT_KNOWN;
258 list_add(&ie->list, &cache->unknown);
259 }
260
261 update:
262 if (name_known && ie->name_state != NAME_KNOWN &&
263 ie->name_state != NAME_PENDING) {
264 ie->name_state = NAME_KNOWN;
265 list_del(&ie->list);
266 }
267
268 memcpy(&ie->data, data, sizeof(*data));
269 ie->timestamp = jiffies;
270 cache->timestamp = jiffies;
271
272 if (ie->name_state == NAME_NOT_KNOWN)
273 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274
275 done:
276 return flags;
277 }
278
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)279 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280 {
281 struct discovery_state *cache = &hdev->discovery;
282 struct inquiry_info *info = (struct inquiry_info *) buf;
283 struct inquiry_entry *e;
284 int copied = 0;
285
286 list_for_each_entry(e, &cache->all, all) {
287 struct inquiry_data *data = &e->data;
288
289 if (copied >= num)
290 break;
291
292 bacpy(&info->bdaddr, &data->bdaddr);
293 info->pscan_rep_mode = data->pscan_rep_mode;
294 info->pscan_period_mode = data->pscan_period_mode;
295 info->pscan_mode = data->pscan_mode;
296 memcpy(info->dev_class, data->dev_class, 3);
297 info->clock_offset = data->clock_offset;
298
299 info++;
300 copied++;
301 }
302
303 BT_DBG("cache %p, copied %d", cache, copied);
304 return copied;
305 }
306
hci_inquiry(void __user * arg)307 int hci_inquiry(void __user *arg)
308 {
309 __u8 __user *ptr = arg;
310 struct hci_inquiry_req ir;
311 struct hci_dev *hdev;
312 int err = 0, do_inquiry = 0, max_rsp;
313 __u8 *buf;
314
315 if (copy_from_user(&ir, ptr, sizeof(ir)))
316 return -EFAULT;
317
318 hdev = hci_dev_get(ir.dev_id);
319 if (!hdev)
320 return -ENODEV;
321
322 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323 err = -EBUSY;
324 goto done;
325 }
326
327 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328 err = -EOPNOTSUPP;
329 goto done;
330 }
331
332 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333 err = -EOPNOTSUPP;
334 goto done;
335 }
336
337 /* Restrict maximum inquiry length to 60 seconds */
338 if (ir.length > 60) {
339 err = -EINVAL;
340 goto done;
341 }
342
343 hci_dev_lock(hdev);
344 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346 hci_inquiry_cache_flush(hdev);
347 do_inquiry = 1;
348 }
349 hci_dev_unlock(hdev);
350
351 if (do_inquiry) {
352 hci_req_sync_lock(hdev);
353 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
354 hci_req_sync_unlock(hdev);
355
356 if (err < 0)
357 goto done;
358
359 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360 * cleared). If it is interrupted by a signal, return -EINTR.
361 */
362 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
363 TASK_INTERRUPTIBLE)) {
364 err = -EINTR;
365 goto done;
366 }
367 }
368
369 /* for unlimited number of responses we will use buffer with
370 * 255 entries
371 */
372 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
373
374 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
375 * copy it to the user space.
376 */
377 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378 if (!buf) {
379 err = -ENOMEM;
380 goto done;
381 }
382
383 hci_dev_lock(hdev);
384 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
385 hci_dev_unlock(hdev);
386
387 BT_DBG("num_rsp %d", ir.num_rsp);
388
389 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
390 ptr += sizeof(ir);
391 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
392 ir.num_rsp))
393 err = -EFAULT;
394 } else
395 err = -EFAULT;
396
397 kfree(buf);
398
399 done:
400 hci_dev_put(hdev);
401 return err;
402 }
403
hci_dev_do_open(struct hci_dev * hdev)404 static int hci_dev_do_open(struct hci_dev *hdev)
405 {
406 int ret = 0;
407
408 BT_DBG("%s %p", hdev->name, hdev);
409
410 hci_req_sync_lock(hdev);
411
412 ret = hci_dev_open_sync(hdev);
413
414 hci_req_sync_unlock(hdev);
415 return ret;
416 }
417
418 /* ---- HCI ioctl helpers ---- */
419
hci_dev_open(__u16 dev)420 int hci_dev_open(__u16 dev)
421 {
422 struct hci_dev *hdev;
423 int err;
424
425 hdev = hci_dev_get(dev);
426 if (!hdev)
427 return -ENODEV;
428
429 /* Devices that are marked as unconfigured can only be powered
430 * up as user channel. Trying to bring them up as normal devices
431 * will result into a failure. Only user channel operation is
432 * possible.
433 *
434 * When this function is called for a user channel, the flag
435 * HCI_USER_CHANNEL will be set first before attempting to
436 * open the device.
437 */
438 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
439 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440 err = -EOPNOTSUPP;
441 goto done;
442 }
443
444 /* We need to ensure that no other power on/off work is pending
445 * before proceeding to call hci_dev_do_open. This is
446 * particularly important if the setup procedure has not yet
447 * completed.
448 */
449 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450 cancel_delayed_work(&hdev->power_off);
451
452 /* After this call it is guaranteed that the setup procedure
453 * has finished. This means that error conditions like RFKILL
454 * or no valid public or static random address apply.
455 */
456 flush_workqueue(hdev->req_workqueue);
457
458 /* For controllers not using the management interface and that
459 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460 * so that pairing works for them. Once the management interface
461 * is in use this bit will be cleared again and userspace has
462 * to explicitly enable it.
463 */
464 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
465 !hci_dev_test_flag(hdev, HCI_MGMT))
466 hci_dev_set_flag(hdev, HCI_BONDABLE);
467
468 err = hci_dev_do_open(hdev);
469
470 done:
471 hci_dev_put(hdev);
472 return err;
473 }
474
hci_dev_do_close(struct hci_dev * hdev)475 int hci_dev_do_close(struct hci_dev *hdev)
476 {
477 int err;
478
479 BT_DBG("%s %p", hdev->name, hdev);
480
481 hci_req_sync_lock(hdev);
482
483 err = hci_dev_close_sync(hdev);
484
485 hci_req_sync_unlock(hdev);
486
487 return err;
488 }
489
hci_dev_close(__u16 dev)490 int hci_dev_close(__u16 dev)
491 {
492 struct hci_dev *hdev;
493 int err;
494
495 hdev = hci_dev_get(dev);
496 if (!hdev)
497 return -ENODEV;
498
499 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500 err = -EBUSY;
501 goto done;
502 }
503
504 cancel_work_sync(&hdev->power_on);
505 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506 cancel_delayed_work(&hdev->power_off);
507
508 err = hci_dev_do_close(hdev);
509
510 done:
511 hci_dev_put(hdev);
512 return err;
513 }
514
hci_dev_do_reset(struct hci_dev * hdev)515 static int hci_dev_do_reset(struct hci_dev *hdev)
516 {
517 int ret;
518
519 BT_DBG("%s %p", hdev->name, hdev);
520
521 hci_req_sync_lock(hdev);
522
523 /* Drop queues */
524 skb_queue_purge(&hdev->rx_q);
525 skb_queue_purge(&hdev->cmd_q);
526
527 /* Cancel these to avoid queueing non-chained pending work */
528 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529 /* Wait for
530 *
531 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
533 *
534 * inside RCU section to see the flag or complete scheduling.
535 */
536 synchronize_rcu();
537 /* Explicitly cancel works in case scheduled after setting the flag. */
538 cancel_delayed_work(&hdev->cmd_timer);
539 cancel_delayed_work(&hdev->ncmd_timer);
540
541 /* Avoid potential lockdep warnings from the *_flush() calls by
542 * ensuring the workqueue is empty up front.
543 */
544 drain_workqueue(hdev->workqueue);
545
546 hci_dev_lock(hdev);
547 hci_inquiry_cache_flush(hdev);
548 hci_conn_hash_flush(hdev);
549 hci_dev_unlock(hdev);
550
551 if (hdev->flush)
552 hdev->flush(hdev);
553
554 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
555
556 atomic_set(&hdev->cmd_cnt, 1);
557 hdev->acl_cnt = 0;
558 hdev->sco_cnt = 0;
559 hdev->le_cnt = 0;
560 hdev->iso_cnt = 0;
561
562 ret = hci_reset_sync(hdev);
563
564 hci_req_sync_unlock(hdev);
565 return ret;
566 }
567
hci_dev_reset(__u16 dev)568 int hci_dev_reset(__u16 dev)
569 {
570 struct hci_dev *hdev;
571 int err;
572
573 hdev = hci_dev_get(dev);
574 if (!hdev)
575 return -ENODEV;
576
577 if (!test_bit(HCI_UP, &hdev->flags)) {
578 err = -ENETDOWN;
579 goto done;
580 }
581
582 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583 err = -EBUSY;
584 goto done;
585 }
586
587 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588 err = -EOPNOTSUPP;
589 goto done;
590 }
591
592 err = hci_dev_do_reset(hdev);
593
594 done:
595 hci_dev_put(hdev);
596 return err;
597 }
598
hci_dev_reset_stat(__u16 dev)599 int hci_dev_reset_stat(__u16 dev)
600 {
601 struct hci_dev *hdev;
602 int ret = 0;
603
604 hdev = hci_dev_get(dev);
605 if (!hdev)
606 return -ENODEV;
607
608 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609 ret = -EBUSY;
610 goto done;
611 }
612
613 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614 ret = -EOPNOTSUPP;
615 goto done;
616 }
617
618 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619
620 done:
621 hci_dev_put(hdev);
622 return ret;
623 }
624
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)625 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626 {
627 bool conn_changed, discov_changed;
628
629 BT_DBG("%s scan 0x%02x", hdev->name, scan);
630
631 if ((scan & SCAN_PAGE))
632 conn_changed = !hci_dev_test_and_set_flag(hdev,
633 HCI_CONNECTABLE);
634 else
635 conn_changed = hci_dev_test_and_clear_flag(hdev,
636 HCI_CONNECTABLE);
637
638 if ((scan & SCAN_INQUIRY)) {
639 discov_changed = !hci_dev_test_and_set_flag(hdev,
640 HCI_DISCOVERABLE);
641 } else {
642 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643 discov_changed = hci_dev_test_and_clear_flag(hdev,
644 HCI_DISCOVERABLE);
645 }
646
647 if (!hci_dev_test_flag(hdev, HCI_MGMT))
648 return;
649
650 if (conn_changed || discov_changed) {
651 /* In case this was disabled through mgmt */
652 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653
654 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655 hci_update_adv_data(hdev, hdev->cur_adv_instance);
656
657 mgmt_new_settings(hdev);
658 }
659 }
660
hci_dev_cmd(unsigned int cmd,void __user * arg)661 int hci_dev_cmd(unsigned int cmd, void __user *arg)
662 {
663 struct hci_dev *hdev;
664 struct hci_dev_req dr;
665 __le16 policy;
666 int err = 0;
667
668 if (copy_from_user(&dr, arg, sizeof(dr)))
669 return -EFAULT;
670
671 hdev = hci_dev_get(dr.dev_id);
672 if (!hdev)
673 return -ENODEV;
674
675 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676 err = -EBUSY;
677 goto done;
678 }
679
680 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681 err = -EOPNOTSUPP;
682 goto done;
683 }
684
685 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686 err = -EOPNOTSUPP;
687 goto done;
688 }
689
690 switch (cmd) {
691 case HCISETAUTH:
692 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
693 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
694 break;
695
696 case HCISETENCRYPT:
697 if (!lmp_encrypt_capable(hdev)) {
698 err = -EOPNOTSUPP;
699 break;
700 }
701
702 if (!test_bit(HCI_AUTH, &hdev->flags)) {
703 /* Auth must be enabled first */
704 err = hci_cmd_sync_status(hdev,
705 HCI_OP_WRITE_AUTH_ENABLE,
706 1, &dr.dev_opt,
707 HCI_CMD_TIMEOUT);
708 if (err)
709 break;
710 }
711
712 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
713 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
714 break;
715
716 case HCISETSCAN:
717 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
718 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
719
720 /* Ensure that the connectable and discoverable states
721 * get correctly modified as this was a non-mgmt change.
722 */
723 if (!err)
724 hci_update_passive_scan_state(hdev, dr.dev_opt);
725 break;
726
727 case HCISETLINKPOL:
728 policy = cpu_to_le16(dr.dev_opt);
729
730 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
731 2, &policy, HCI_CMD_TIMEOUT);
732 break;
733
734 case HCISETLINKMODE:
735 hdev->link_mode = ((__u16) dr.dev_opt) &
736 (HCI_LM_MASTER | HCI_LM_ACCEPT);
737 break;
738
739 case HCISETPTYPE:
740 if (hdev->pkt_type == (__u16) dr.dev_opt)
741 break;
742
743 hdev->pkt_type = (__u16) dr.dev_opt;
744 mgmt_phy_configuration_changed(hdev, NULL);
745 break;
746
747 case HCISETACLMTU:
748 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
749 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
750 break;
751
752 case HCISETSCOMTU:
753 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
754 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
755 break;
756
757 default:
758 err = -EINVAL;
759 break;
760 }
761
762 done:
763 hci_dev_put(hdev);
764 return err;
765 }
766
hci_get_dev_list(void __user * arg)767 int hci_get_dev_list(void __user *arg)
768 {
769 struct hci_dev *hdev;
770 struct hci_dev_list_req *dl;
771 struct hci_dev_req *dr;
772 int n = 0, err;
773 __u16 dev_num;
774
775 if (get_user(dev_num, (__u16 __user *) arg))
776 return -EFAULT;
777
778 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
779 return -EINVAL;
780
781 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782 if (!dl)
783 return -ENOMEM;
784
785 dl->dev_num = dev_num;
786 dr = dl->dev_req;
787
788 read_lock(&hci_dev_list_lock);
789 list_for_each_entry(hdev, &hci_dev_list, list) {
790 unsigned long flags = hdev->flags;
791
792 /* When the auto-off is configured it means the transport
793 * is running, but in that case still indicate that the
794 * device is actually down.
795 */
796 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797 flags &= ~BIT(HCI_UP);
798
799 dr[n].dev_id = hdev->id;
800 dr[n].dev_opt = flags;
801
802 if (++n >= dev_num)
803 break;
804 }
805 read_unlock(&hci_dev_list_lock);
806
807 dl->dev_num = n;
808 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
809 kfree(dl);
810
811 return err ? -EFAULT : 0;
812 }
813
hci_get_dev_info(void __user * arg)814 int hci_get_dev_info(void __user *arg)
815 {
816 struct hci_dev *hdev;
817 struct hci_dev_info di;
818 unsigned long flags;
819 int err = 0;
820
821 if (copy_from_user(&di, arg, sizeof(di)))
822 return -EFAULT;
823
824 hdev = hci_dev_get(di.dev_id);
825 if (!hdev)
826 return -ENODEV;
827
828 /* When the auto-off is configured it means the transport
829 * is running, but in that case still indicate that the
830 * device is actually down.
831 */
832 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833 flags = hdev->flags & ~BIT(HCI_UP);
834 else
835 flags = hdev->flags;
836
837 strscpy(di.name, hdev->name, sizeof(di.name));
838 di.bdaddr = hdev->bdaddr;
839 di.type = (hdev->bus & 0x0f);
840 di.flags = flags;
841 di.pkt_type = hdev->pkt_type;
842 if (lmp_bredr_capable(hdev)) {
843 di.acl_mtu = hdev->acl_mtu;
844 di.acl_pkts = hdev->acl_pkts;
845 di.sco_mtu = hdev->sco_mtu;
846 di.sco_pkts = hdev->sco_pkts;
847 } else {
848 di.acl_mtu = hdev->le_mtu;
849 di.acl_pkts = hdev->le_pkts;
850 di.sco_mtu = 0;
851 di.sco_pkts = 0;
852 }
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865 }
866
867 /* ---- Interface to HCI drivers ---- */
868
hci_dev_do_poweroff(struct hci_dev * hdev)869 static int hci_dev_do_poweroff(struct hci_dev *hdev)
870 {
871 int err;
872
873 BT_DBG("%s %p", hdev->name, hdev);
874
875 hci_req_sync_lock(hdev);
876
877 err = hci_set_powered_sync(hdev, false);
878
879 hci_req_sync_unlock(hdev);
880
881 return err;
882 }
883
hci_rfkill_set_block(void * data,bool blocked)884 static int hci_rfkill_set_block(void *data, bool blocked)
885 {
886 struct hci_dev *hdev = data;
887 int err;
888
889 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890
891 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892 return -EBUSY;
893
894 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
895 return 0;
896
897 if (blocked) {
898 hci_dev_set_flag(hdev, HCI_RFKILLED);
899
900 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
902 err = hci_dev_do_poweroff(hdev);
903 if (err) {
904 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
905 err);
906
907 /* Make sure the device is still closed even if
908 * anything during power off sequence (eg.
909 * disconnecting devices) failed.
910 */
911 hci_dev_do_close(hdev);
912 }
913 }
914 } else {
915 hci_dev_clear_flag(hdev, HCI_RFKILLED);
916 }
917
918 return 0;
919 }
920
921 static const struct rfkill_ops hci_rfkill_ops = {
922 .set_block = hci_rfkill_set_block,
923 };
924
hci_power_on(struct work_struct * work)925 static void hci_power_on(struct work_struct *work)
926 {
927 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928 int err;
929
930 BT_DBG("%s", hdev->name);
931
932 if (test_bit(HCI_UP, &hdev->flags) &&
933 hci_dev_test_flag(hdev, HCI_MGMT) &&
934 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935 cancel_delayed_work(&hdev->power_off);
936 err = hci_powered_update_sync(hdev);
937 mgmt_power_on(hdev, err);
938 return;
939 }
940
941 err = hci_dev_do_open(hdev);
942 if (err < 0) {
943 hci_dev_lock(hdev);
944 mgmt_set_powered_failed(hdev, err);
945 hci_dev_unlock(hdev);
946 return;
947 }
948
949 /* During the HCI setup phase, a few error conditions are
950 * ignored and they need to be checked now. If they are still
951 * valid, it is important to turn the device back off.
952 */
953 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
954 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
956 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
957 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958 hci_dev_do_close(hdev);
959 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
961 HCI_AUTO_OFF_TIMEOUT);
962 }
963
964 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965 /* For unconfigured devices, set the HCI_RAW flag
966 * so that userspace can easily identify them.
967 */
968 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969 set_bit(HCI_RAW, &hdev->flags);
970
971 /* For fully configured devices, this will send
972 * the Index Added event. For unconfigured devices,
973 * it will send Unconfigued Index Added event.
974 *
975 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976 * and no event will be send.
977 */
978 mgmt_index_added(hdev);
979 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980 /* When the controller is now configured, then it
981 * is important to clear the HCI_RAW flag.
982 */
983 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984 clear_bit(HCI_RAW, &hdev->flags);
985
986 /* Powering on the controller with HCI_CONFIG set only
987 * happens with the transition from unconfigured to
988 * configured. This will send the Index Added event.
989 */
990 mgmt_index_added(hdev);
991 }
992 }
993
hci_power_off(struct work_struct * work)994 static void hci_power_off(struct work_struct *work)
995 {
996 struct hci_dev *hdev = container_of(work, struct hci_dev,
997 power_off.work);
998
999 BT_DBG("%s", hdev->name);
1000
1001 hci_dev_do_close(hdev);
1002 }
1003
hci_error_reset(struct work_struct * work)1004 static void hci_error_reset(struct work_struct *work)
1005 {
1006 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1007
1008 hci_dev_hold(hdev);
1009 BT_DBG("%s", hdev->name);
1010
1011 if (hdev->hw_error)
1012 hdev->hw_error(hdev, hdev->hw_error_code);
1013 else
1014 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015
1016 if (!hci_dev_do_close(hdev))
1017 hci_dev_do_open(hdev);
1018
1019 hci_dev_put(hdev);
1020 }
1021
hci_uuids_clear(struct hci_dev * hdev)1022 void hci_uuids_clear(struct hci_dev *hdev)
1023 {
1024 struct bt_uuid *uuid, *tmp;
1025
1026 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1027 list_del(&uuid->list);
1028 kfree(uuid);
1029 }
1030 }
1031
hci_link_keys_clear(struct hci_dev * hdev)1032 void hci_link_keys_clear(struct hci_dev *hdev)
1033 {
1034 struct link_key *key, *tmp;
1035
1036 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037 list_del_rcu(&key->list);
1038 kfree_rcu(key, rcu);
1039 }
1040 }
1041
hci_smp_ltks_clear(struct hci_dev * hdev)1042 void hci_smp_ltks_clear(struct hci_dev *hdev)
1043 {
1044 struct smp_ltk *k, *tmp;
1045
1046 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047 list_del_rcu(&k->list);
1048 kfree_rcu(k, rcu);
1049 }
1050 }
1051
hci_smp_irks_clear(struct hci_dev * hdev)1052 void hci_smp_irks_clear(struct hci_dev *hdev)
1053 {
1054 struct smp_irk *k, *tmp;
1055
1056 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057 list_del_rcu(&k->list);
1058 kfree_rcu(k, rcu);
1059 }
1060 }
1061
hci_blocked_keys_clear(struct hci_dev * hdev)1062 void hci_blocked_keys_clear(struct hci_dev *hdev)
1063 {
1064 struct blocked_key *b, *tmp;
1065
1066 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067 list_del_rcu(&b->list);
1068 kfree_rcu(b, rcu);
1069 }
1070 }
1071
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1072 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1073 {
1074 bool blocked = false;
1075 struct blocked_key *b;
1076
1077 rcu_read_lock();
1078 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1080 blocked = true;
1081 break;
1082 }
1083 }
1084
1085 rcu_read_unlock();
1086 return blocked;
1087 }
1088
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1089 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1090 {
1091 struct link_key *k;
1092
1093 rcu_read_lock();
1094 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1095 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1096 rcu_read_unlock();
1097
1098 if (hci_is_blocked_key(hdev,
1099 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1100 k->val)) {
1101 bt_dev_warn_ratelimited(hdev,
1102 "Link key blocked for %pMR",
1103 &k->bdaddr);
1104 return NULL;
1105 }
1106
1107 return k;
1108 }
1109 }
1110 rcu_read_unlock();
1111
1112 return NULL;
1113 }
1114
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1115 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116 u8 key_type, u8 old_key_type)
1117 {
1118 /* Legacy key */
1119 if (key_type < 0x03)
1120 return true;
1121
1122 /* Debug keys are insecure so don't store them persistently */
1123 if (key_type == HCI_LK_DEBUG_COMBINATION)
1124 return false;
1125
1126 /* Changed combination key and there's no previous one */
1127 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128 return false;
1129
1130 /* Security mode 3 case */
1131 if (!conn)
1132 return true;
1133
1134 /* BR/EDR key derived using SC from an LE link */
1135 if (conn->type == LE_LINK)
1136 return true;
1137
1138 /* Neither local nor remote side had no-bonding as requirement */
1139 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140 return true;
1141
1142 /* Local side had dedicated bonding as requirement */
1143 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144 return true;
1145
1146 /* Remote side had dedicated bonding as requirement */
1147 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148 return true;
1149
1150 /* If none of the above criteria match, then don't store the key
1151 * persistently */
1152 return false;
1153 }
1154
ltk_role(u8 type)1155 static u8 ltk_role(u8 type)
1156 {
1157 if (type == SMP_LTK)
1158 return HCI_ROLE_MASTER;
1159
1160 return HCI_ROLE_SLAVE;
1161 }
1162
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1163 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1164 u8 addr_type, u8 role)
1165 {
1166 struct smp_ltk *k;
1167
1168 rcu_read_lock();
1169 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1171 continue;
1172
1173 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1174 rcu_read_unlock();
1175
1176 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1177 k->val)) {
1178 bt_dev_warn_ratelimited(hdev,
1179 "LTK blocked for %pMR",
1180 &k->bdaddr);
1181 return NULL;
1182 }
1183
1184 return k;
1185 }
1186 }
1187 rcu_read_unlock();
1188
1189 return NULL;
1190 }
1191
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1192 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1193 {
1194 struct smp_irk *irk_to_return = NULL;
1195 struct smp_irk *irk;
1196
1197 rcu_read_lock();
1198 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199 if (!bacmp(&irk->rpa, rpa)) {
1200 irk_to_return = irk;
1201 goto done;
1202 }
1203 }
1204
1205 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206 if (smp_irk_matches(hdev, irk->val, rpa)) {
1207 bacpy(&irk->rpa, rpa);
1208 irk_to_return = irk;
1209 goto done;
1210 }
1211 }
1212
1213 done:
1214 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1215 irk_to_return->val)) {
1216 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1217 &irk_to_return->bdaddr);
1218 irk_to_return = NULL;
1219 }
1220
1221 rcu_read_unlock();
1222
1223 return irk_to_return;
1224 }
1225
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1226 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 addr_type)
1228 {
1229 struct smp_irk *irk_to_return = NULL;
1230 struct smp_irk *irk;
1231
1232 /* Identity Address must be public or static random */
1233 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1234 return NULL;
1235
1236 rcu_read_lock();
1237 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238 if (addr_type == irk->addr_type &&
1239 bacmp(bdaddr, &irk->bdaddr) == 0) {
1240 irk_to_return = irk;
1241 goto done;
1242 }
1243 }
1244
1245 done:
1246
1247 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248 irk_to_return->val)) {
1249 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250 &irk_to_return->bdaddr);
1251 irk_to_return = NULL;
1252 }
1253
1254 rcu_read_unlock();
1255
1256 return irk_to_return;
1257 }
1258
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1259 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260 bdaddr_t *bdaddr, u8 *val, u8 type,
1261 u8 pin_len, bool *persistent)
1262 {
1263 struct link_key *key, *old_key;
1264 u8 old_key_type;
1265
1266 old_key = hci_find_link_key(hdev, bdaddr);
1267 if (old_key) {
1268 old_key_type = old_key->type;
1269 key = old_key;
1270 } else {
1271 old_key_type = conn ? conn->key_type : 0xff;
1272 key = kzalloc(sizeof(*key), GFP_KERNEL);
1273 if (!key)
1274 return NULL;
1275 list_add_rcu(&key->list, &hdev->link_keys);
1276 }
1277
1278 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279
1280 /* Some buggy controller combinations generate a changed
1281 * combination key for legacy pairing even when there's no
1282 * previous key */
1283 if (type == HCI_LK_CHANGED_COMBINATION &&
1284 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285 type = HCI_LK_COMBINATION;
1286 if (conn)
1287 conn->key_type = type;
1288 }
1289
1290 bacpy(&key->bdaddr, bdaddr);
1291 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292 key->pin_len = pin_len;
1293
1294 if (type == HCI_LK_CHANGED_COMBINATION)
1295 key->type = old_key_type;
1296 else
1297 key->type = type;
1298
1299 if (persistent)
1300 *persistent = hci_persistent_key(hdev, conn, type,
1301 old_key_type);
1302
1303 return key;
1304 }
1305
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1306 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307 u8 addr_type, u8 type, u8 authenticated,
1308 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309 {
1310 struct smp_ltk *key, *old_key;
1311 u8 role = ltk_role(type);
1312
1313 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314 if (old_key)
1315 key = old_key;
1316 else {
1317 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 if (!key)
1319 return NULL;
1320 list_add_rcu(&key->list, &hdev->long_term_keys);
1321 }
1322
1323 bacpy(&key->bdaddr, bdaddr);
1324 key->bdaddr_type = addr_type;
1325 memcpy(key->val, tk, sizeof(key->val));
1326 key->authenticated = authenticated;
1327 key->ediv = ediv;
1328 key->rand = rand;
1329 key->enc_size = enc_size;
1330 key->type = type;
1331
1332 return key;
1333 }
1334
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1335 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337 {
1338 struct smp_irk *irk;
1339
1340 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1341 if (!irk) {
1342 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1343 if (!irk)
1344 return NULL;
1345
1346 bacpy(&irk->bdaddr, bdaddr);
1347 irk->addr_type = addr_type;
1348
1349 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1350 }
1351
1352 memcpy(irk->val, val, 16);
1353 bacpy(&irk->rpa, rpa);
1354
1355 return irk;
1356 }
1357
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1358 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359 {
1360 struct link_key *key;
1361
1362 key = hci_find_link_key(hdev, bdaddr);
1363 if (!key)
1364 return -ENOENT;
1365
1366 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367
1368 list_del_rcu(&key->list);
1369 kfree_rcu(key, rcu);
1370
1371 return 0;
1372 }
1373
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1374 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375 {
1376 struct smp_ltk *k, *tmp;
1377 int removed = 0;
1378
1379 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381 continue;
1382
1383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384
1385 list_del_rcu(&k->list);
1386 kfree_rcu(k, rcu);
1387 removed++;
1388 }
1389
1390 return removed ? 0 : -ENOENT;
1391 }
1392
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1393 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1394 {
1395 struct smp_irk *k, *tmp;
1396
1397 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1399 continue;
1400
1401 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402
1403 list_del_rcu(&k->list);
1404 kfree_rcu(k, rcu);
1405 }
1406 }
1407
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1408 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1409 {
1410 struct smp_ltk *k;
1411 struct smp_irk *irk;
1412 u8 addr_type;
1413
1414 if (type == BDADDR_BREDR) {
1415 if (hci_find_link_key(hdev, bdaddr))
1416 return true;
1417 return false;
1418 }
1419
1420 /* Convert to HCI addr type which struct smp_ltk uses */
1421 if (type == BDADDR_LE_PUBLIC)
1422 addr_type = ADDR_LE_DEV_PUBLIC;
1423 else
1424 addr_type = ADDR_LE_DEV_RANDOM;
1425
1426 irk = hci_get_irk(hdev, bdaddr, addr_type);
1427 if (irk) {
1428 bdaddr = &irk->bdaddr;
1429 addr_type = irk->addr_type;
1430 }
1431
1432 rcu_read_lock();
1433 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1435 rcu_read_unlock();
1436 return true;
1437 }
1438 }
1439 rcu_read_unlock();
1440
1441 return false;
1442 }
1443
1444 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1445 static void hci_cmd_timeout(struct work_struct *work)
1446 {
1447 struct hci_dev *hdev = container_of(work, struct hci_dev,
1448 cmd_timer.work);
1449
1450 if (hdev->req_skb) {
1451 u16 opcode = hci_skb_opcode(hdev->req_skb);
1452
1453 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454
1455 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456 } else {
1457 bt_dev_err(hdev, "command tx timeout");
1458 }
1459
1460 if (hdev->reset)
1461 hdev->reset(hdev);
1462
1463 atomic_set(&hdev->cmd_cnt, 1);
1464 queue_work(hdev->workqueue, &hdev->cmd_work);
1465 }
1466
1467 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1468 static void hci_ncmd_timeout(struct work_struct *work)
1469 {
1470 struct hci_dev *hdev = container_of(work, struct hci_dev,
1471 ncmd_timer.work);
1472
1473 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1474
1475 /* During HCI_INIT phase no events can be injected if the ncmd timer
1476 * triggers since the procedure has its own timeout handling.
1477 */
1478 if (test_bit(HCI_INIT, &hdev->flags))
1479 return;
1480
1481 /* This is an irrecoverable state, inject hardware error event */
1482 hci_reset_dev(hdev);
1483 }
1484
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1485 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486 bdaddr_t *bdaddr, u8 bdaddr_type)
1487 {
1488 struct oob_data *data;
1489
1490 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1491 if (bacmp(bdaddr, &data->bdaddr) != 0)
1492 continue;
1493 if (data->bdaddr_type != bdaddr_type)
1494 continue;
1495 return data;
1496 }
1497
1498 return NULL;
1499 }
1500
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1501 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1502 u8 bdaddr_type)
1503 {
1504 struct oob_data *data;
1505
1506 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507 if (!data)
1508 return -ENOENT;
1509
1510 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511
1512 list_del(&data->list);
1513 kfree(data);
1514
1515 return 0;
1516 }
1517
hci_remote_oob_data_clear(struct hci_dev * hdev)1518 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519 {
1520 struct oob_data *data, *n;
1521
1522 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1523 list_del(&data->list);
1524 kfree(data);
1525 }
1526 }
1527
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1528 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530 u8 *hash256, u8 *rand256)
1531 {
1532 struct oob_data *data;
1533
1534 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535 if (!data) {
1536 data = kmalloc(sizeof(*data), GFP_KERNEL);
1537 if (!data)
1538 return -ENOMEM;
1539
1540 bacpy(&data->bdaddr, bdaddr);
1541 data->bdaddr_type = bdaddr_type;
1542 list_add(&data->list, &hdev->remote_oob_data);
1543 }
1544
1545 if (hash192 && rand192) {
1546 memcpy(data->hash192, hash192, sizeof(data->hash192));
1547 memcpy(data->rand192, rand192, sizeof(data->rand192));
1548 if (hash256 && rand256)
1549 data->present = 0x03;
1550 } else {
1551 memset(data->hash192, 0, sizeof(data->hash192));
1552 memset(data->rand192, 0, sizeof(data->rand192));
1553 if (hash256 && rand256)
1554 data->present = 0x02;
1555 else
1556 data->present = 0x00;
1557 }
1558
1559 if (hash256 && rand256) {
1560 memcpy(data->hash256, hash256, sizeof(data->hash256));
1561 memcpy(data->rand256, rand256, sizeof(data->rand256));
1562 } else {
1563 memset(data->hash256, 0, sizeof(data->hash256));
1564 memset(data->rand256, 0, sizeof(data->rand256));
1565 if (hash192 && rand192)
1566 data->present = 0x01;
1567 }
1568
1569 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570
1571 return 0;
1572 }
1573
1574 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1575 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1576 {
1577 struct adv_info *adv_instance;
1578
1579 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1580 if (adv_instance->instance == instance)
1581 return adv_instance;
1582 }
1583
1584 return NULL;
1585 }
1586
1587 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1588 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1589 {
1590 struct adv_info *cur_instance;
1591
1592 cur_instance = hci_find_adv_instance(hdev, instance);
1593 if (!cur_instance)
1594 return NULL;
1595
1596 if (cur_instance == list_last_entry(&hdev->adv_instances,
1597 struct adv_info, list))
1598 return list_first_entry(&hdev->adv_instances,
1599 struct adv_info, list);
1600 else
1601 return list_next_entry(cur_instance, list);
1602 }
1603
1604 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1605 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1606 {
1607 struct adv_info *adv_instance;
1608
1609 adv_instance = hci_find_adv_instance(hdev, instance);
1610 if (!adv_instance)
1611 return -ENOENT;
1612
1613 BT_DBG("%s removing %dMR", hdev->name, instance);
1614
1615 if (hdev->cur_adv_instance == instance) {
1616 if (hdev->adv_instance_timeout) {
1617 cancel_delayed_work(&hdev->adv_instance_expire);
1618 hdev->adv_instance_timeout = 0;
1619 }
1620 hdev->cur_adv_instance = 0x00;
1621 }
1622
1623 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1624
1625 list_del(&adv_instance->list);
1626 kfree(adv_instance);
1627
1628 hdev->adv_instance_cnt--;
1629
1630 return 0;
1631 }
1632
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1633 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1634 {
1635 struct adv_info *adv_instance, *n;
1636
1637 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1638 adv_instance->rpa_expired = rpa_expired;
1639 }
1640
1641 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1642 void hci_adv_instances_clear(struct hci_dev *hdev)
1643 {
1644 struct adv_info *adv_instance, *n;
1645
1646 if (hdev->adv_instance_timeout) {
1647 disable_delayed_work(&hdev->adv_instance_expire);
1648 hdev->adv_instance_timeout = 0;
1649 }
1650
1651 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1652 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653 list_del(&adv_instance->list);
1654 kfree(adv_instance);
1655 }
1656
1657 hdev->adv_instance_cnt = 0;
1658 hdev->cur_adv_instance = 0x00;
1659 }
1660
adv_instance_rpa_expired(struct work_struct * work)1661 static void adv_instance_rpa_expired(struct work_struct *work)
1662 {
1663 struct adv_info *adv_instance = container_of(work, struct adv_info,
1664 rpa_expired_cb.work);
1665
1666 BT_DBG("");
1667
1668 adv_instance->rpa_expired = true;
1669 }
1670
1671 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1672 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1673 u32 flags, u16 adv_data_len, u8 *adv_data,
1674 u16 scan_rsp_len, u8 *scan_rsp_data,
1675 u16 timeout, u16 duration, s8 tx_power,
1676 u32 min_interval, u32 max_interval,
1677 u8 mesh_handle)
1678 {
1679 struct adv_info *adv;
1680
1681 adv = hci_find_adv_instance(hdev, instance);
1682 if (adv) {
1683 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1684 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1685 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1686 } else {
1687 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1688 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1689 return ERR_PTR(-EOVERFLOW);
1690
1691 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1692 if (!adv)
1693 return ERR_PTR(-ENOMEM);
1694
1695 adv->pending = true;
1696 adv->instance = instance;
1697
1698 /* If controller support only one set and the instance is set to
1699 * 1 then there is no option other than using handle 0x00.
1700 */
1701 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1702 adv->handle = 0x00;
1703 else
1704 adv->handle = instance;
1705
1706 list_add(&adv->list, &hdev->adv_instances);
1707 hdev->adv_instance_cnt++;
1708 }
1709
1710 adv->flags = flags;
1711 adv->min_interval = min_interval;
1712 adv->max_interval = max_interval;
1713 adv->tx_power = tx_power;
1714 /* Defining a mesh_handle changes the timing units to ms,
1715 * rather than seconds, and ties the instance to the requested
1716 * mesh_tx queue.
1717 */
1718 adv->mesh = mesh_handle;
1719
1720 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1721 scan_rsp_len, scan_rsp_data);
1722
1723 adv->timeout = timeout;
1724 adv->remaining_time = timeout;
1725
1726 if (duration == 0)
1727 adv->duration = hdev->def_multi_adv_rotation_duration;
1728 else
1729 adv->duration = duration;
1730
1731 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1732
1733 BT_DBG("%s for %dMR", hdev->name, instance);
1734
1735 return adv;
1736 }
1737
1738 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1739 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1740 u32 flags, u8 data_len, u8 *data,
1741 u32 min_interval, u32 max_interval)
1742 {
1743 struct adv_info *adv;
1744
1745 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1746 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1747 min_interval, max_interval, 0);
1748 if (IS_ERR(adv))
1749 return adv;
1750
1751 adv->periodic = true;
1752 adv->per_adv_data_len = data_len;
1753
1754 if (data)
1755 memcpy(adv->per_adv_data, data, data_len);
1756
1757 return adv;
1758 }
1759
1760 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1761 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1762 u16 adv_data_len, u8 *adv_data,
1763 u16 scan_rsp_len, u8 *scan_rsp_data)
1764 {
1765 struct adv_info *adv;
1766
1767 adv = hci_find_adv_instance(hdev, instance);
1768
1769 /* If advertisement doesn't exist, we can't modify its data */
1770 if (!adv)
1771 return -ENOENT;
1772
1773 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1774 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1775 memcpy(adv->adv_data, adv_data, adv_data_len);
1776 adv->adv_data_len = adv_data_len;
1777 adv->adv_data_changed = true;
1778 }
1779
1780 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1781 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1782 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1783 adv->scan_rsp_len = scan_rsp_len;
1784 adv->scan_rsp_changed = true;
1785 }
1786
1787 /* Mark as changed if there are flags which would affect it */
1788 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1789 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1790 adv->scan_rsp_changed = true;
1791
1792 return 0;
1793 }
1794
1795 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1796 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1797 {
1798 u32 flags;
1799 struct adv_info *adv;
1800
1801 if (instance == 0x00) {
1802 /* Instance 0 always manages the "Tx Power" and "Flags"
1803 * fields
1804 */
1805 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1806
1807 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1808 * corresponds to the "connectable" instance flag.
1809 */
1810 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1811 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1812
1813 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1814 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1815 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1816 flags |= MGMT_ADV_FLAG_DISCOV;
1817
1818 return flags;
1819 }
1820
1821 adv = hci_find_adv_instance(hdev, instance);
1822
1823 /* Return 0 when we got an invalid instance identifier. */
1824 if (!adv)
1825 return 0;
1826
1827 return adv->flags;
1828 }
1829
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1830 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1831 {
1832 struct adv_info *adv;
1833
1834 /* Instance 0x00 always set local name */
1835 if (instance == 0x00)
1836 return true;
1837
1838 adv = hci_find_adv_instance(hdev, instance);
1839 if (!adv)
1840 return false;
1841
1842 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1843 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1844 return true;
1845
1846 return adv->scan_rsp_len ? true : false;
1847 }
1848
1849 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1850 void hci_adv_monitors_clear(struct hci_dev *hdev)
1851 {
1852 struct adv_monitor *monitor;
1853 int handle;
1854
1855 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1856 hci_free_adv_monitor(hdev, monitor);
1857
1858 idr_destroy(&hdev->adv_monitors_idr);
1859 }
1860
1861 /* Frees the monitor structure and do some bookkeepings.
1862 * This function requires the caller holds hdev->lock.
1863 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1864 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1865 {
1866 struct adv_pattern *pattern;
1867 struct adv_pattern *tmp;
1868
1869 if (!monitor)
1870 return;
1871
1872 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1873 list_del(&pattern->list);
1874 kfree(pattern);
1875 }
1876
1877 if (monitor->handle)
1878 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1879
1880 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1881 hdev->adv_monitors_cnt--;
1882
1883 kfree(monitor);
1884 }
1885
1886 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1887 * also attempts to forward the request to the controller.
1888 * This function requires the caller holds hci_req_sync_lock.
1889 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1890 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1891 {
1892 int min, max, handle;
1893 int status = 0;
1894
1895 if (!monitor)
1896 return -EINVAL;
1897
1898 hci_dev_lock(hdev);
1899
1900 min = HCI_MIN_ADV_MONITOR_HANDLE;
1901 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1902 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1903 GFP_KERNEL);
1904
1905 hci_dev_unlock(hdev);
1906
1907 if (handle < 0)
1908 return handle;
1909
1910 monitor->handle = handle;
1911
1912 if (!hdev_is_powered(hdev))
1913 return status;
1914
1915 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1916 case HCI_ADV_MONITOR_EXT_NONE:
1917 bt_dev_dbg(hdev, "add monitor %d status %d",
1918 monitor->handle, status);
1919 /* Message was not forwarded to controller - not an error */
1920 break;
1921
1922 case HCI_ADV_MONITOR_EXT_MSFT:
1923 status = msft_add_monitor_pattern(hdev, monitor);
1924 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1925 handle, status);
1926 break;
1927 }
1928
1929 return status;
1930 }
1931
1932 /* Attempts to tell the controller and free the monitor. If somehow the
1933 * controller doesn't have a corresponding handle, remove anyway.
1934 * This function requires the caller holds hci_req_sync_lock.
1935 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1936 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1937 struct adv_monitor *monitor)
1938 {
1939 int status = 0;
1940 int handle;
1941
1942 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1943 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1944 bt_dev_dbg(hdev, "remove monitor %d status %d",
1945 monitor->handle, status);
1946 goto free_monitor;
1947
1948 case HCI_ADV_MONITOR_EXT_MSFT:
1949 handle = monitor->handle;
1950 status = msft_remove_monitor(hdev, monitor);
1951 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1952 handle, status);
1953 break;
1954 }
1955
1956 /* In case no matching handle registered, just free the monitor */
1957 if (status == -ENOENT)
1958 goto free_monitor;
1959
1960 return status;
1961
1962 free_monitor:
1963 if (status == -ENOENT)
1964 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1965 monitor->handle);
1966 hci_free_adv_monitor(hdev, monitor);
1967
1968 return status;
1969 }
1970
1971 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1972 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1973 {
1974 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1975
1976 if (!monitor)
1977 return -EINVAL;
1978
1979 return hci_remove_adv_monitor(hdev, monitor);
1980 }
1981
1982 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1983 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1984 {
1985 struct adv_monitor *monitor;
1986 int idr_next_id = 0;
1987 int status = 0;
1988
1989 while (1) {
1990 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1991 if (!monitor)
1992 break;
1993
1994 status = hci_remove_adv_monitor(hdev, monitor);
1995 if (status)
1996 return status;
1997
1998 idr_next_id++;
1999 }
2000
2001 return status;
2002 }
2003
2004 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2005 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2006 {
2007 return !idr_is_empty(&hdev->adv_monitors_idr);
2008 }
2009
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2010 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2011 {
2012 if (msft_monitor_supported(hdev))
2013 return HCI_ADV_MONITOR_EXT_MSFT;
2014
2015 return HCI_ADV_MONITOR_EXT_NONE;
2016 }
2017
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2018 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2019 bdaddr_t *bdaddr, u8 type)
2020 {
2021 struct bdaddr_list *b;
2022
2023 list_for_each_entry(b, bdaddr_list, list) {
2024 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2025 return b;
2026 }
2027
2028 return NULL;
2029 }
2030
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2031 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2032 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2033 u8 type)
2034 {
2035 struct bdaddr_list_with_irk *b;
2036
2037 list_for_each_entry(b, bdaddr_list, list) {
2038 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2039 return b;
2040 }
2041
2042 return NULL;
2043 }
2044
2045 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2046 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2047 bdaddr_t *bdaddr, u8 type)
2048 {
2049 struct bdaddr_list_with_flags *b;
2050
2051 list_for_each_entry(b, bdaddr_list, list) {
2052 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2053 return b;
2054 }
2055
2056 return NULL;
2057 }
2058
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2059 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2060 {
2061 struct bdaddr_list *b, *n;
2062
2063 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2064 list_del(&b->list);
2065 kfree(b);
2066 }
2067 }
2068
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2069 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2070 {
2071 struct bdaddr_list *entry;
2072
2073 if (!bacmp(bdaddr, BDADDR_ANY))
2074 return -EBADF;
2075
2076 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2077 return -EEXIST;
2078
2079 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2080 if (!entry)
2081 return -ENOMEM;
2082
2083 bacpy(&entry->bdaddr, bdaddr);
2084 entry->bdaddr_type = type;
2085
2086 list_add(&entry->list, list);
2087
2088 return 0;
2089 }
2090
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2091 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2092 u8 type, u8 *peer_irk, u8 *local_irk)
2093 {
2094 struct bdaddr_list_with_irk *entry;
2095
2096 if (!bacmp(bdaddr, BDADDR_ANY))
2097 return -EBADF;
2098
2099 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2100 return -EEXIST;
2101
2102 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2103 if (!entry)
2104 return -ENOMEM;
2105
2106 bacpy(&entry->bdaddr, bdaddr);
2107 entry->bdaddr_type = type;
2108
2109 if (peer_irk)
2110 memcpy(entry->peer_irk, peer_irk, 16);
2111
2112 if (local_irk)
2113 memcpy(entry->local_irk, local_irk, 16);
2114
2115 list_add(&entry->list, list);
2116
2117 return 0;
2118 }
2119
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2120 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2121 u8 type, u32 flags)
2122 {
2123 struct bdaddr_list_with_flags *entry;
2124
2125 if (!bacmp(bdaddr, BDADDR_ANY))
2126 return -EBADF;
2127
2128 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2129 return -EEXIST;
2130
2131 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2132 if (!entry)
2133 return -ENOMEM;
2134
2135 bacpy(&entry->bdaddr, bdaddr);
2136 entry->bdaddr_type = type;
2137 entry->flags = flags;
2138
2139 list_add(&entry->list, list);
2140
2141 return 0;
2142 }
2143
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2144 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2145 {
2146 struct bdaddr_list *entry;
2147
2148 if (!bacmp(bdaddr, BDADDR_ANY)) {
2149 hci_bdaddr_list_clear(list);
2150 return 0;
2151 }
2152
2153 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2154 if (!entry)
2155 return -ENOENT;
2156
2157 list_del(&entry->list);
2158 kfree(entry);
2159
2160 return 0;
2161 }
2162
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2163 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2164 u8 type)
2165 {
2166 struct bdaddr_list_with_irk *entry;
2167
2168 if (!bacmp(bdaddr, BDADDR_ANY)) {
2169 hci_bdaddr_list_clear(list);
2170 return 0;
2171 }
2172
2173 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2174 if (!entry)
2175 return -ENOENT;
2176
2177 list_del(&entry->list);
2178 kfree(entry);
2179
2180 return 0;
2181 }
2182
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2183 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2184 u8 type)
2185 {
2186 struct bdaddr_list_with_flags *entry;
2187
2188 if (!bacmp(bdaddr, BDADDR_ANY)) {
2189 hci_bdaddr_list_clear(list);
2190 return 0;
2191 }
2192
2193 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2194 if (!entry)
2195 return -ENOENT;
2196
2197 list_del(&entry->list);
2198 kfree(entry);
2199
2200 return 0;
2201 }
2202
2203 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2204 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2205 bdaddr_t *addr, u8 addr_type)
2206 {
2207 struct hci_conn_params *params;
2208
2209 list_for_each_entry(params, &hdev->le_conn_params, list) {
2210 if (bacmp(¶ms->addr, addr) == 0 &&
2211 params->addr_type == addr_type) {
2212 return params;
2213 }
2214 }
2215
2216 return NULL;
2217 }
2218
2219 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2220 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2221 bdaddr_t *addr, u8 addr_type)
2222 {
2223 struct hci_conn_params *param;
2224
2225 rcu_read_lock();
2226
2227 list_for_each_entry_rcu(param, list, action) {
2228 if (bacmp(¶m->addr, addr) == 0 &&
2229 param->addr_type == addr_type) {
2230 rcu_read_unlock();
2231 return param;
2232 }
2233 }
2234
2235 rcu_read_unlock();
2236
2237 return NULL;
2238 }
2239
2240 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2241 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2242 {
2243 if (list_empty(¶m->action))
2244 return;
2245
2246 list_del_rcu(¶m->action);
2247 synchronize_rcu();
2248 INIT_LIST_HEAD(¶m->action);
2249 }
2250
2251 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2252 void hci_pend_le_list_add(struct hci_conn_params *param,
2253 struct list_head *list)
2254 {
2255 list_add_rcu(¶m->action, list);
2256 }
2257
2258 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2259 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2260 bdaddr_t *addr, u8 addr_type)
2261 {
2262 struct hci_conn_params *params;
2263
2264 params = hci_conn_params_lookup(hdev, addr, addr_type);
2265 if (params)
2266 return params;
2267
2268 params = kzalloc(sizeof(*params), GFP_KERNEL);
2269 if (!params) {
2270 bt_dev_err(hdev, "out of memory");
2271 return NULL;
2272 }
2273
2274 bacpy(¶ms->addr, addr);
2275 params->addr_type = addr_type;
2276
2277 list_add(¶ms->list, &hdev->le_conn_params);
2278 INIT_LIST_HEAD(¶ms->action);
2279
2280 params->conn_min_interval = hdev->le_conn_min_interval;
2281 params->conn_max_interval = hdev->le_conn_max_interval;
2282 params->conn_latency = hdev->le_conn_latency;
2283 params->supervision_timeout = hdev->le_supv_timeout;
2284 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2285
2286 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2287
2288 return params;
2289 }
2290
hci_conn_params_free(struct hci_conn_params * params)2291 void hci_conn_params_free(struct hci_conn_params *params)
2292 {
2293 hci_pend_le_list_del_init(params);
2294
2295 if (params->conn) {
2296 hci_conn_drop(params->conn);
2297 hci_conn_put(params->conn);
2298 }
2299
2300 list_del(¶ms->list);
2301 kfree(params);
2302 }
2303
2304 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2305 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2306 {
2307 struct hci_conn_params *params;
2308
2309 params = hci_conn_params_lookup(hdev, addr, addr_type);
2310 if (!params)
2311 return;
2312
2313 hci_conn_params_free(params);
2314
2315 hci_update_passive_scan(hdev);
2316
2317 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2318 }
2319
2320 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2321 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2322 {
2323 struct hci_conn_params *params, *tmp;
2324
2325 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2326 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2327 continue;
2328
2329 /* If trying to establish one time connection to disabled
2330 * device, leave the params, but mark them as just once.
2331 */
2332 if (params->explicit_connect) {
2333 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2334 continue;
2335 }
2336
2337 hci_conn_params_free(params);
2338 }
2339
2340 BT_DBG("All LE disabled connection parameters were removed");
2341 }
2342
2343 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2344 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2345 {
2346 struct hci_conn_params *params, *tmp;
2347
2348 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2349 hci_conn_params_free(params);
2350
2351 BT_DBG("All LE connection parameters were removed");
2352 }
2353
2354 /* Copy the Identity Address of the controller.
2355 *
2356 * If the controller has a public BD_ADDR, then by default use that one.
2357 * If this is a LE only controller without a public address, default to
2358 * the static random address.
2359 *
2360 * For debugging purposes it is possible to force controllers with a
2361 * public address to use the static random address instead.
2362 *
2363 * In case BR/EDR has been disabled on a dual-mode controller and
2364 * userspace has configured a static address, then that address
2365 * becomes the identity address instead of the public BR/EDR address.
2366 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2367 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2368 u8 *bdaddr_type)
2369 {
2370 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2371 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2372 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2373 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2374 bacpy(bdaddr, &hdev->static_addr);
2375 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2376 } else {
2377 bacpy(bdaddr, &hdev->bdaddr);
2378 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2379 }
2380 }
2381
hci_clear_wake_reason(struct hci_dev * hdev)2382 static void hci_clear_wake_reason(struct hci_dev *hdev)
2383 {
2384 hci_dev_lock(hdev);
2385
2386 hdev->wake_reason = 0;
2387 bacpy(&hdev->wake_addr, BDADDR_ANY);
2388 hdev->wake_addr_type = 0;
2389
2390 hci_dev_unlock(hdev);
2391 }
2392
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2393 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2394 void *data)
2395 {
2396 struct hci_dev *hdev =
2397 container_of(nb, struct hci_dev, suspend_notifier);
2398 int ret = 0;
2399
2400 /* Userspace has full control of this device. Do nothing. */
2401 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2402 return NOTIFY_DONE;
2403
2404 /* To avoid a potential race with hci_unregister_dev. */
2405 hci_dev_hold(hdev);
2406
2407 switch (action) {
2408 case PM_HIBERNATION_PREPARE:
2409 case PM_SUSPEND_PREPARE:
2410 ret = hci_suspend_dev(hdev);
2411 break;
2412 case PM_POST_HIBERNATION:
2413 case PM_POST_SUSPEND:
2414 ret = hci_resume_dev(hdev);
2415 break;
2416 }
2417
2418 if (ret)
2419 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2420 action, ret);
2421
2422 hci_dev_put(hdev);
2423 return NOTIFY_DONE;
2424 }
2425
2426 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2427 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2428 {
2429 struct hci_dev *hdev;
2430 unsigned int alloc_size;
2431
2432 alloc_size = sizeof(*hdev);
2433 if (sizeof_priv) {
2434 /* Fixme: May need ALIGN-ment? */
2435 alloc_size += sizeof_priv;
2436 }
2437
2438 hdev = kzalloc(alloc_size, GFP_KERNEL);
2439 if (!hdev)
2440 return NULL;
2441
2442 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2443 hdev->esco_type = (ESCO_HV1);
2444 hdev->link_mode = (HCI_LM_ACCEPT);
2445 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2446 hdev->io_capability = 0x03; /* No Input No Output */
2447 hdev->manufacturer = 0xffff; /* Default to internal use */
2448 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2449 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2450 hdev->adv_instance_cnt = 0;
2451 hdev->cur_adv_instance = 0x00;
2452 hdev->adv_instance_timeout = 0;
2453
2454 hdev->advmon_allowlist_duration = 300;
2455 hdev->advmon_no_filter_duration = 500;
2456 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2457
2458 hdev->sniff_max_interval = 800;
2459 hdev->sniff_min_interval = 80;
2460
2461 hdev->le_adv_channel_map = 0x07;
2462 hdev->le_adv_min_interval = 0x0800;
2463 hdev->le_adv_max_interval = 0x0800;
2464 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2465 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2466 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2467 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2468 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2469 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2470 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2471 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2472 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2473 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2474 hdev->le_conn_min_interval = 0x0018;
2475 hdev->le_conn_max_interval = 0x0028;
2476 hdev->le_conn_latency = 0x0000;
2477 hdev->le_supv_timeout = 0x002a;
2478 hdev->le_def_tx_len = 0x001b;
2479 hdev->le_def_tx_time = 0x0148;
2480 hdev->le_max_tx_len = 0x001b;
2481 hdev->le_max_tx_time = 0x0148;
2482 hdev->le_max_rx_len = 0x001b;
2483 hdev->le_max_rx_time = 0x0148;
2484 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2485 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2486 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2487 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2488 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2489 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2490 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2491 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2492 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2493
2494 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2495 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2496 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2497 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2498 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2499 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2500
2501 /* default 1.28 sec page scan */
2502 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2503 hdev->def_page_scan_int = 0x0800;
2504 hdev->def_page_scan_window = 0x0012;
2505
2506 mutex_init(&hdev->lock);
2507 mutex_init(&hdev->req_lock);
2508
2509 ida_init(&hdev->unset_handle_ida);
2510
2511 INIT_LIST_HEAD(&hdev->mesh_pending);
2512 INIT_LIST_HEAD(&hdev->mgmt_pending);
2513 INIT_LIST_HEAD(&hdev->reject_list);
2514 INIT_LIST_HEAD(&hdev->accept_list);
2515 INIT_LIST_HEAD(&hdev->uuids);
2516 INIT_LIST_HEAD(&hdev->link_keys);
2517 INIT_LIST_HEAD(&hdev->long_term_keys);
2518 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2519 INIT_LIST_HEAD(&hdev->remote_oob_data);
2520 INIT_LIST_HEAD(&hdev->le_accept_list);
2521 INIT_LIST_HEAD(&hdev->le_resolv_list);
2522 INIT_LIST_HEAD(&hdev->le_conn_params);
2523 INIT_LIST_HEAD(&hdev->pend_le_conns);
2524 INIT_LIST_HEAD(&hdev->pend_le_reports);
2525 INIT_LIST_HEAD(&hdev->conn_hash.list);
2526 INIT_LIST_HEAD(&hdev->adv_instances);
2527 INIT_LIST_HEAD(&hdev->blocked_keys);
2528 INIT_LIST_HEAD(&hdev->monitored_devices);
2529
2530 INIT_LIST_HEAD(&hdev->local_codecs);
2531 INIT_WORK(&hdev->rx_work, hci_rx_work);
2532 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2533 INIT_WORK(&hdev->tx_work, hci_tx_work);
2534 INIT_WORK(&hdev->power_on, hci_power_on);
2535 INIT_WORK(&hdev->error_reset, hci_error_reset);
2536
2537 hci_cmd_sync_init(hdev);
2538
2539 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2540
2541 skb_queue_head_init(&hdev->rx_q);
2542 skb_queue_head_init(&hdev->cmd_q);
2543 skb_queue_head_init(&hdev->raw_q);
2544
2545 init_waitqueue_head(&hdev->req_wait_q);
2546
2547 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2548 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2549
2550 hci_devcd_setup(hdev);
2551
2552 hci_init_sysfs(hdev);
2553 discovery_init(hdev);
2554
2555 return hdev;
2556 }
2557 EXPORT_SYMBOL(hci_alloc_dev_priv);
2558
2559 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2560 void hci_free_dev(struct hci_dev *hdev)
2561 {
2562 /* will free via device release */
2563 put_device(&hdev->dev);
2564 }
2565 EXPORT_SYMBOL(hci_free_dev);
2566
2567 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2568 int hci_register_dev(struct hci_dev *hdev)
2569 {
2570 int id, error;
2571
2572 if (!hdev->open || !hdev->close || !hdev->send)
2573 return -EINVAL;
2574
2575 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2576 if (id < 0)
2577 return id;
2578
2579 error = dev_set_name(&hdev->dev, "hci%u", id);
2580 if (error)
2581 return error;
2582
2583 hdev->name = dev_name(&hdev->dev);
2584 hdev->id = id;
2585
2586 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2587
2588 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2589 if (!hdev->workqueue) {
2590 error = -ENOMEM;
2591 goto err;
2592 }
2593
2594 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2595 hdev->name);
2596 if (!hdev->req_workqueue) {
2597 destroy_workqueue(hdev->workqueue);
2598 error = -ENOMEM;
2599 goto err;
2600 }
2601
2602 if (!IS_ERR_OR_NULL(bt_debugfs))
2603 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2604
2605 error = device_add(&hdev->dev);
2606 if (error < 0)
2607 goto err_wqueue;
2608
2609 hci_leds_init(hdev);
2610
2611 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2612 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2613 hdev);
2614 if (hdev->rfkill) {
2615 if (rfkill_register(hdev->rfkill) < 0) {
2616 rfkill_destroy(hdev->rfkill);
2617 hdev->rfkill = NULL;
2618 }
2619 }
2620
2621 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2622 hci_dev_set_flag(hdev, HCI_RFKILLED);
2623
2624 hci_dev_set_flag(hdev, HCI_SETUP);
2625 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2626
2627 /* Assume BR/EDR support until proven otherwise (such as
2628 * through reading supported features during init.
2629 */
2630 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2631
2632 write_lock(&hci_dev_list_lock);
2633 list_add(&hdev->list, &hci_dev_list);
2634 write_unlock(&hci_dev_list_lock);
2635
2636 /* Devices that are marked for raw-only usage are unconfigured
2637 * and should not be included in normal operation.
2638 */
2639 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2640 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2641
2642 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2643 * callback.
2644 */
2645 if (hdev->wakeup)
2646 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2647
2648 hci_sock_dev_event(hdev, HCI_DEV_REG);
2649 hci_dev_hold(hdev);
2650
2651 error = hci_register_suspend_notifier(hdev);
2652 if (error)
2653 BT_WARN("register suspend notifier failed error:%d\n", error);
2654
2655 queue_work(hdev->req_workqueue, &hdev->power_on);
2656
2657 idr_init(&hdev->adv_monitors_idr);
2658 msft_register(hdev);
2659
2660 return id;
2661
2662 err_wqueue:
2663 debugfs_remove_recursive(hdev->debugfs);
2664 destroy_workqueue(hdev->workqueue);
2665 destroy_workqueue(hdev->req_workqueue);
2666 err:
2667 ida_free(&hci_index_ida, hdev->id);
2668
2669 return error;
2670 }
2671 EXPORT_SYMBOL(hci_register_dev);
2672
2673 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2674 void hci_unregister_dev(struct hci_dev *hdev)
2675 {
2676 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2677
2678 mutex_lock(&hdev->unregister_lock);
2679 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2680 mutex_unlock(&hdev->unregister_lock);
2681
2682 write_lock(&hci_dev_list_lock);
2683 list_del(&hdev->list);
2684 write_unlock(&hci_dev_list_lock);
2685
2686 disable_work_sync(&hdev->rx_work);
2687 disable_work_sync(&hdev->cmd_work);
2688 disable_work_sync(&hdev->tx_work);
2689 disable_work_sync(&hdev->power_on);
2690 disable_work_sync(&hdev->error_reset);
2691
2692 hci_cmd_sync_clear(hdev);
2693
2694 hci_unregister_suspend_notifier(hdev);
2695
2696 hci_dev_do_close(hdev);
2697
2698 if (!test_bit(HCI_INIT, &hdev->flags) &&
2699 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2700 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2701 hci_dev_lock(hdev);
2702 mgmt_index_removed(hdev);
2703 hci_dev_unlock(hdev);
2704 }
2705
2706 /* mgmt_index_removed should take care of emptying the
2707 * pending list */
2708 BUG_ON(!list_empty(&hdev->mgmt_pending));
2709
2710 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2711
2712 if (hdev->rfkill) {
2713 rfkill_unregister(hdev->rfkill);
2714 rfkill_destroy(hdev->rfkill);
2715 }
2716
2717 device_del(&hdev->dev);
2718 /* Actual cleanup is deferred until hci_release_dev(). */
2719 hci_dev_put(hdev);
2720 }
2721 EXPORT_SYMBOL(hci_unregister_dev);
2722
2723 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2724 void hci_release_dev(struct hci_dev *hdev)
2725 {
2726 debugfs_remove_recursive(hdev->debugfs);
2727 kfree_const(hdev->hw_info);
2728 kfree_const(hdev->fw_info);
2729
2730 destroy_workqueue(hdev->workqueue);
2731 destroy_workqueue(hdev->req_workqueue);
2732
2733 hci_dev_lock(hdev);
2734 hci_bdaddr_list_clear(&hdev->reject_list);
2735 hci_bdaddr_list_clear(&hdev->accept_list);
2736 hci_uuids_clear(hdev);
2737 hci_link_keys_clear(hdev);
2738 hci_smp_ltks_clear(hdev);
2739 hci_smp_irks_clear(hdev);
2740 hci_remote_oob_data_clear(hdev);
2741 hci_adv_instances_clear(hdev);
2742 hci_adv_monitors_clear(hdev);
2743 hci_bdaddr_list_clear(&hdev->le_accept_list);
2744 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2745 hci_conn_params_clear_all(hdev);
2746 hci_discovery_filter_clear(hdev);
2747 hci_blocked_keys_clear(hdev);
2748 hci_codec_list_clear(&hdev->local_codecs);
2749 msft_release(hdev);
2750 hci_dev_unlock(hdev);
2751
2752 ida_destroy(&hdev->unset_handle_ida);
2753 ida_free(&hci_index_ida, hdev->id);
2754 kfree_skb(hdev->sent_cmd);
2755 kfree_skb(hdev->req_skb);
2756 kfree_skb(hdev->recv_event);
2757 kfree(hdev);
2758 }
2759 EXPORT_SYMBOL(hci_release_dev);
2760
hci_register_suspend_notifier(struct hci_dev * hdev)2761 int hci_register_suspend_notifier(struct hci_dev *hdev)
2762 {
2763 int ret = 0;
2764
2765 if (!hdev->suspend_notifier.notifier_call &&
2766 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2767 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2768 ret = register_pm_notifier(&hdev->suspend_notifier);
2769 }
2770
2771 return ret;
2772 }
2773
hci_unregister_suspend_notifier(struct hci_dev * hdev)2774 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2775 {
2776 int ret = 0;
2777
2778 if (hdev->suspend_notifier.notifier_call) {
2779 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2780 if (!ret)
2781 hdev->suspend_notifier.notifier_call = NULL;
2782 }
2783
2784 return ret;
2785 }
2786
2787 /* Cancel ongoing command synchronously:
2788 *
2789 * - Cancel command timer
2790 * - Reset command counter
2791 * - Cancel command request
2792 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2793 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2794 {
2795 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2796
2797 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2798 disable_delayed_work_sync(&hdev->cmd_timer);
2799 disable_delayed_work_sync(&hdev->ncmd_timer);
2800 } else {
2801 cancel_delayed_work_sync(&hdev->cmd_timer);
2802 cancel_delayed_work_sync(&hdev->ncmd_timer);
2803 }
2804
2805 atomic_set(&hdev->cmd_cnt, 1);
2806
2807 hci_cmd_sync_cancel_sync(hdev, err);
2808 }
2809
2810 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2811 int hci_suspend_dev(struct hci_dev *hdev)
2812 {
2813 int ret;
2814
2815 bt_dev_dbg(hdev, "");
2816
2817 /* Suspend should only act on when powered. */
2818 if (!hdev_is_powered(hdev) ||
2819 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2820 return 0;
2821
2822 /* If powering down don't attempt to suspend */
2823 if (mgmt_powering_down(hdev))
2824 return 0;
2825
2826 /* Cancel potentially blocking sync operation before suspend */
2827 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2828
2829 hci_req_sync_lock(hdev);
2830 ret = hci_suspend_sync(hdev);
2831 hci_req_sync_unlock(hdev);
2832
2833 hci_clear_wake_reason(hdev);
2834 mgmt_suspending(hdev, hdev->suspend_state);
2835
2836 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2837 return ret;
2838 }
2839 EXPORT_SYMBOL(hci_suspend_dev);
2840
2841 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2842 int hci_resume_dev(struct hci_dev *hdev)
2843 {
2844 int ret;
2845
2846 bt_dev_dbg(hdev, "");
2847
2848 /* Resume should only act on when powered. */
2849 if (!hdev_is_powered(hdev) ||
2850 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2851 return 0;
2852
2853 /* If powering down don't attempt to resume */
2854 if (mgmt_powering_down(hdev))
2855 return 0;
2856
2857 hci_req_sync_lock(hdev);
2858 ret = hci_resume_sync(hdev);
2859 hci_req_sync_unlock(hdev);
2860
2861 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2862 hdev->wake_addr_type);
2863
2864 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2865 return ret;
2866 }
2867 EXPORT_SYMBOL(hci_resume_dev);
2868
2869 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2870 int hci_reset_dev(struct hci_dev *hdev)
2871 {
2872 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2873 struct sk_buff *skb;
2874
2875 skb = bt_skb_alloc(3, GFP_ATOMIC);
2876 if (!skb)
2877 return -ENOMEM;
2878
2879 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2880 skb_put_data(skb, hw_err, 3);
2881
2882 bt_dev_err(hdev, "Injecting HCI hardware error event");
2883
2884 /* Send Hardware Error to upper stack */
2885 return hci_recv_frame(hdev, skb);
2886 }
2887 EXPORT_SYMBOL(hci_reset_dev);
2888
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2889 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2890 {
2891 if (hdev->classify_pkt_type)
2892 return hdev->classify_pkt_type(hdev, skb);
2893
2894 return hci_skb_pkt_type(skb);
2895 }
2896
2897 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2898 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2899 {
2900 u8 dev_pkt_type;
2901
2902 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2903 && !test_bit(HCI_INIT, &hdev->flags))) {
2904 kfree_skb(skb);
2905 return -ENXIO;
2906 }
2907
2908 /* Check if the driver agree with packet type classification */
2909 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2910 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2911 hci_skb_pkt_type(skb) = dev_pkt_type;
2912 }
2913
2914 switch (hci_skb_pkt_type(skb)) {
2915 case HCI_EVENT_PKT:
2916 break;
2917 case HCI_ACLDATA_PKT:
2918 /* Detect if ISO packet has been sent as ACL */
2919 if (hci_conn_num(hdev, ISO_LINK)) {
2920 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2921 __u8 type;
2922
2923 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2924 if (type == ISO_LINK)
2925 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2926 }
2927 break;
2928 case HCI_SCODATA_PKT:
2929 break;
2930 case HCI_ISODATA_PKT:
2931 break;
2932 case HCI_DRV_PKT:
2933 break;
2934 default:
2935 kfree_skb(skb);
2936 return -EINVAL;
2937 }
2938
2939 /* Incoming skb */
2940 bt_cb(skb)->incoming = 1;
2941
2942 /* Time stamp */
2943 __net_timestamp(skb);
2944
2945 skb_queue_tail(&hdev->rx_q, skb);
2946 queue_work(hdev->workqueue, &hdev->rx_work);
2947
2948 return 0;
2949 }
2950 EXPORT_SYMBOL(hci_recv_frame);
2951
2952 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2953 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2954 {
2955 /* Mark as diagnostic packet */
2956 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2957
2958 /* Time stamp */
2959 __net_timestamp(skb);
2960
2961 skb_queue_tail(&hdev->rx_q, skb);
2962 queue_work(hdev->workqueue, &hdev->rx_work);
2963
2964 return 0;
2965 }
2966 EXPORT_SYMBOL(hci_recv_diag);
2967
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2968 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2969 {
2970 va_list vargs;
2971
2972 va_start(vargs, fmt);
2973 kfree_const(hdev->hw_info);
2974 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2975 va_end(vargs);
2976 }
2977 EXPORT_SYMBOL(hci_set_hw_info);
2978
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2979 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2980 {
2981 va_list vargs;
2982
2983 va_start(vargs, fmt);
2984 kfree_const(hdev->fw_info);
2985 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2986 va_end(vargs);
2987 }
2988 EXPORT_SYMBOL(hci_set_fw_info);
2989
2990 /* ---- Interface to upper protocols ---- */
2991
hci_register_cb(struct hci_cb * cb)2992 int hci_register_cb(struct hci_cb *cb)
2993 {
2994 BT_DBG("%p name %s", cb, cb->name);
2995
2996 mutex_lock(&hci_cb_list_lock);
2997 list_add_tail(&cb->list, &hci_cb_list);
2998 mutex_unlock(&hci_cb_list_lock);
2999
3000 return 0;
3001 }
3002 EXPORT_SYMBOL(hci_register_cb);
3003
hci_unregister_cb(struct hci_cb * cb)3004 int hci_unregister_cb(struct hci_cb *cb)
3005 {
3006 BT_DBG("%p name %s", cb, cb->name);
3007
3008 mutex_lock(&hci_cb_list_lock);
3009 list_del(&cb->list);
3010 mutex_unlock(&hci_cb_list_lock);
3011
3012 return 0;
3013 }
3014 EXPORT_SYMBOL(hci_unregister_cb);
3015
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3016 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3017 {
3018 int err;
3019
3020 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3021 skb->len);
3022
3023 /* Time stamp */
3024 __net_timestamp(skb);
3025
3026 /* Send copy to monitor */
3027 hci_send_to_monitor(hdev, skb);
3028
3029 if (atomic_read(&hdev->promisc)) {
3030 /* Send copy to the sockets */
3031 hci_send_to_sock(hdev, skb);
3032 }
3033
3034 /* Get rid of skb owner, prior to sending to the driver. */
3035 skb_orphan(skb);
3036
3037 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3038 kfree_skb(skb);
3039 return -EINVAL;
3040 }
3041
3042 if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3043 /* Intercept HCI Drv packet here and don't go with hdev->send
3044 * callback.
3045 */
3046 err = hci_drv_process_cmd(hdev, skb);
3047 kfree_skb(skb);
3048 return err;
3049 }
3050
3051 err = hdev->send(hdev, skb);
3052 if (err < 0) {
3053 bt_dev_err(hdev, "sending frame failed (%d)", err);
3054 kfree_skb(skb);
3055 return err;
3056 }
3057
3058 return 0;
3059 }
3060
3061 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3062 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3063 const void *param)
3064 {
3065 struct sk_buff *skb;
3066
3067 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3068
3069 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3070 if (!skb) {
3071 bt_dev_err(hdev, "no memory for command");
3072 return -ENOMEM;
3073 }
3074
3075 /* Stand-alone HCI commands must be flagged as
3076 * single-command requests.
3077 */
3078 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3079
3080 skb_queue_tail(&hdev->cmd_q, skb);
3081 queue_work(hdev->workqueue, &hdev->cmd_work);
3082
3083 return 0;
3084 }
3085
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3086 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3087 const void *param)
3088 {
3089 struct sk_buff *skb;
3090
3091 if (hci_opcode_ogf(opcode) != 0x3f) {
3092 /* A controller receiving a command shall respond with either
3093 * a Command Status Event or a Command Complete Event.
3094 * Therefore, all standard HCI commands must be sent via the
3095 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3096 * Some vendors do not comply with this rule for vendor-specific
3097 * commands and do not return any event. We want to support
3098 * unresponded commands for such cases only.
3099 */
3100 bt_dev_err(hdev, "unresponded command not supported");
3101 return -EINVAL;
3102 }
3103
3104 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3105 if (!skb) {
3106 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3107 opcode);
3108 return -ENOMEM;
3109 }
3110
3111 hci_send_frame(hdev, skb);
3112
3113 return 0;
3114 }
3115 EXPORT_SYMBOL(__hci_cmd_send);
3116
3117 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3118 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3119 {
3120 struct hci_command_hdr *hdr;
3121
3122 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3123 return NULL;
3124
3125 hdr = (void *)skb->data;
3126
3127 if (hdr->opcode != cpu_to_le16(opcode))
3128 return NULL;
3129
3130 return skb->data + HCI_COMMAND_HDR_SIZE;
3131 }
3132
3133 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3134 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3135 {
3136 void *data;
3137
3138 /* Check if opcode matches last sent command */
3139 data = hci_cmd_data(hdev->sent_cmd, opcode);
3140 if (!data)
3141 /* Check if opcode matches last request */
3142 data = hci_cmd_data(hdev->req_skb, opcode);
3143
3144 return data;
3145 }
3146
3147 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3148 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3149 {
3150 struct hci_event_hdr *hdr;
3151 int offset;
3152
3153 if (!hdev->recv_event)
3154 return NULL;
3155
3156 hdr = (void *)hdev->recv_event->data;
3157 offset = sizeof(*hdr);
3158
3159 if (hdr->evt != event) {
3160 /* In case of LE metaevent check the subevent match */
3161 if (hdr->evt == HCI_EV_LE_META) {
3162 struct hci_ev_le_meta *ev;
3163
3164 ev = (void *)hdev->recv_event->data + offset;
3165 offset += sizeof(*ev);
3166 if (ev->subevent == event)
3167 goto found;
3168 }
3169 return NULL;
3170 }
3171
3172 found:
3173 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3174
3175 return hdev->recv_event->data + offset;
3176 }
3177
3178 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3179 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3180 {
3181 struct hci_acl_hdr *hdr;
3182 int len = skb->len;
3183
3184 skb_push(skb, HCI_ACL_HDR_SIZE);
3185 skb_reset_transport_header(skb);
3186 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3187 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3188 hdr->dlen = cpu_to_le16(len);
3189 }
3190
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3191 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3192 struct sk_buff *skb, __u16 flags)
3193 {
3194 struct hci_conn *conn = chan->conn;
3195 struct hci_dev *hdev = conn->hdev;
3196 struct sk_buff *list;
3197
3198 skb->len = skb_headlen(skb);
3199 skb->data_len = 0;
3200
3201 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3202
3203 hci_add_acl_hdr(skb, conn->handle, flags);
3204
3205 list = skb_shinfo(skb)->frag_list;
3206 if (!list) {
3207 /* Non fragmented */
3208 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3209
3210 skb_queue_tail(queue, skb);
3211 } else {
3212 /* Fragmented */
3213 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3214
3215 skb_shinfo(skb)->frag_list = NULL;
3216
3217 /* Queue all fragments atomically. We need to use spin_lock_bh
3218 * here because of 6LoWPAN links, as there this function is
3219 * called from softirq and using normal spin lock could cause
3220 * deadlocks.
3221 */
3222 spin_lock_bh(&queue->lock);
3223
3224 __skb_queue_tail(queue, skb);
3225
3226 flags &= ~ACL_START;
3227 flags |= ACL_CONT;
3228 do {
3229 skb = list; list = list->next;
3230
3231 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3232 hci_add_acl_hdr(skb, conn->handle, flags);
3233
3234 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3235
3236 __skb_queue_tail(queue, skb);
3237 } while (list);
3238
3239 spin_unlock_bh(&queue->lock);
3240 }
3241 }
3242
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3243 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3244 {
3245 struct hci_dev *hdev = chan->conn->hdev;
3246
3247 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3248
3249 hci_queue_acl(chan, &chan->data_q, skb, flags);
3250
3251 queue_work(hdev->workqueue, &hdev->tx_work);
3252 }
3253
3254 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3255 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3256 {
3257 struct hci_dev *hdev = conn->hdev;
3258 struct hci_sco_hdr hdr;
3259
3260 BT_DBG("%s len %d", hdev->name, skb->len);
3261
3262 hdr.handle = cpu_to_le16(conn->handle);
3263 hdr.dlen = skb->len;
3264
3265 skb_push(skb, HCI_SCO_HDR_SIZE);
3266 skb_reset_transport_header(skb);
3267 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3268
3269 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3270
3271 skb_queue_tail(&conn->data_q, skb);
3272 queue_work(hdev->workqueue, &hdev->tx_work);
3273 }
3274
3275 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3276 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3277 {
3278 struct hci_iso_hdr *hdr;
3279 int len = skb->len;
3280
3281 skb_push(skb, HCI_ISO_HDR_SIZE);
3282 skb_reset_transport_header(skb);
3283 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3284 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3285 hdr->dlen = cpu_to_le16(len);
3286 }
3287
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3288 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3289 struct sk_buff *skb)
3290 {
3291 struct hci_dev *hdev = conn->hdev;
3292 struct sk_buff *list;
3293 __u16 flags;
3294
3295 skb->len = skb_headlen(skb);
3296 skb->data_len = 0;
3297
3298 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3299
3300 list = skb_shinfo(skb)->frag_list;
3301
3302 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3303 hci_add_iso_hdr(skb, conn->handle, flags);
3304
3305 if (!list) {
3306 /* Non fragmented */
3307 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3308
3309 skb_queue_tail(queue, skb);
3310 } else {
3311 /* Fragmented */
3312 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3313
3314 skb_shinfo(skb)->frag_list = NULL;
3315
3316 __skb_queue_tail(queue, skb);
3317
3318 do {
3319 skb = list; list = list->next;
3320
3321 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3322 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3323 0x00);
3324 hci_add_iso_hdr(skb, conn->handle, flags);
3325
3326 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3327
3328 __skb_queue_tail(queue, skb);
3329 } while (list);
3330 }
3331 }
3332
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3333 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3334 {
3335 struct hci_dev *hdev = conn->hdev;
3336
3337 BT_DBG("%s len %d", hdev->name, skb->len);
3338
3339 hci_queue_iso(conn, &conn->data_q, skb);
3340
3341 queue_work(hdev->workqueue, &hdev->tx_work);
3342 }
3343
3344 /* ---- HCI TX task (outgoing data) ---- */
3345
3346 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3347 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3348 {
3349 struct hci_dev *hdev;
3350 int cnt, q;
3351
3352 if (!conn) {
3353 *quote = 0;
3354 return;
3355 }
3356
3357 hdev = conn->hdev;
3358
3359 switch (conn->type) {
3360 case ACL_LINK:
3361 cnt = hdev->acl_cnt;
3362 break;
3363 case SCO_LINK:
3364 case ESCO_LINK:
3365 cnt = hdev->sco_cnt;
3366 break;
3367 case LE_LINK:
3368 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3369 break;
3370 case ISO_LINK:
3371 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3372 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3373 break;
3374 default:
3375 cnt = 0;
3376 bt_dev_err(hdev, "unknown link type %d", conn->type);
3377 }
3378
3379 q = cnt / num;
3380 *quote = q ? q : 1;
3381 }
3382
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3383 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3384 int *quote)
3385 {
3386 struct hci_conn_hash *h = &hdev->conn_hash;
3387 struct hci_conn *conn = NULL, *c;
3388 unsigned int num = 0, min = ~0;
3389
3390 /* We don't have to lock device here. Connections are always
3391 * added and removed with TX task disabled. */
3392
3393 rcu_read_lock();
3394
3395 list_for_each_entry_rcu(c, &h->list, list) {
3396 if (c->type != type || skb_queue_empty(&c->data_q))
3397 continue;
3398
3399 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3400 continue;
3401
3402 num++;
3403
3404 if (c->sent < min) {
3405 min = c->sent;
3406 conn = c;
3407 }
3408
3409 if (hci_conn_num(hdev, type) == num)
3410 break;
3411 }
3412
3413 rcu_read_unlock();
3414
3415 hci_quote_sent(conn, num, quote);
3416
3417 BT_DBG("conn %p quote %d", conn, *quote);
3418 return conn;
3419 }
3420
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3421 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3422 {
3423 struct hci_conn_hash *h = &hdev->conn_hash;
3424 struct hci_conn *c;
3425
3426 bt_dev_err(hdev, "link tx timeout");
3427
3428 hci_dev_lock(hdev);
3429
3430 /* Kill stalled connections */
3431 list_for_each_entry(c, &h->list, list) {
3432 if (c->type == type && c->sent) {
3433 bt_dev_err(hdev, "killing stalled connection %pMR",
3434 &c->dst);
3435 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3436 }
3437 }
3438
3439 hci_dev_unlock(hdev);
3440 }
3441
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3442 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3443 int *quote)
3444 {
3445 struct hci_conn_hash *h = &hdev->conn_hash;
3446 struct hci_chan *chan = NULL;
3447 unsigned int num = 0, min = ~0, cur_prio = 0;
3448 struct hci_conn *conn;
3449 int conn_num = 0;
3450
3451 BT_DBG("%s", hdev->name);
3452
3453 rcu_read_lock();
3454
3455 list_for_each_entry_rcu(conn, &h->list, list) {
3456 struct hci_chan *tmp;
3457
3458 if (conn->type != type)
3459 continue;
3460
3461 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3462 continue;
3463
3464 conn_num++;
3465
3466 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3467 struct sk_buff *skb;
3468
3469 if (skb_queue_empty(&tmp->data_q))
3470 continue;
3471
3472 skb = skb_peek(&tmp->data_q);
3473 if (skb->priority < cur_prio)
3474 continue;
3475
3476 if (skb->priority > cur_prio) {
3477 num = 0;
3478 min = ~0;
3479 cur_prio = skb->priority;
3480 }
3481
3482 num++;
3483
3484 if (conn->sent < min) {
3485 min = conn->sent;
3486 chan = tmp;
3487 }
3488 }
3489
3490 if (hci_conn_num(hdev, type) == conn_num)
3491 break;
3492 }
3493
3494 rcu_read_unlock();
3495
3496 if (!chan)
3497 return NULL;
3498
3499 hci_quote_sent(chan->conn, num, quote);
3500
3501 BT_DBG("chan %p quote %d", chan, *quote);
3502 return chan;
3503 }
3504
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3505 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3506 {
3507 struct hci_conn_hash *h = &hdev->conn_hash;
3508 struct hci_conn *conn;
3509 int num = 0;
3510
3511 BT_DBG("%s", hdev->name);
3512
3513 rcu_read_lock();
3514
3515 list_for_each_entry_rcu(conn, &h->list, list) {
3516 struct hci_chan *chan;
3517
3518 if (conn->type != type)
3519 continue;
3520
3521 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3522 continue;
3523
3524 num++;
3525
3526 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3527 struct sk_buff *skb;
3528
3529 if (chan->sent) {
3530 chan->sent = 0;
3531 continue;
3532 }
3533
3534 if (skb_queue_empty(&chan->data_q))
3535 continue;
3536
3537 skb = skb_peek(&chan->data_q);
3538 if (skb->priority >= HCI_PRIO_MAX - 1)
3539 continue;
3540
3541 skb->priority = HCI_PRIO_MAX - 1;
3542
3543 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3544 skb->priority);
3545 }
3546
3547 if (hci_conn_num(hdev, type) == num)
3548 break;
3549 }
3550
3551 rcu_read_unlock();
3552
3553 }
3554
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3555 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3556 {
3557 unsigned long last_tx;
3558
3559 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3560 return;
3561
3562 switch (type) {
3563 case LE_LINK:
3564 last_tx = hdev->le_last_tx;
3565 break;
3566 default:
3567 last_tx = hdev->acl_last_tx;
3568 break;
3569 }
3570
3571 /* tx timeout must be longer than maximum link supervision timeout
3572 * (40.9 seconds)
3573 */
3574 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3575 hci_link_tx_to(hdev, type);
3576 }
3577
3578 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3579 static void hci_sched_sco(struct hci_dev *hdev)
3580 {
3581 struct hci_conn *conn;
3582 struct sk_buff *skb;
3583 int quote;
3584
3585 BT_DBG("%s", hdev->name);
3586
3587 if (!hci_conn_num(hdev, SCO_LINK))
3588 return;
3589
3590 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3591 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3592 BT_DBG("skb %p len %d", skb, skb->len);
3593 hci_send_frame(hdev, skb);
3594
3595 conn->sent++;
3596 if (conn->sent == ~0)
3597 conn->sent = 0;
3598 }
3599 }
3600 }
3601
hci_sched_esco(struct hci_dev * hdev)3602 static void hci_sched_esco(struct hci_dev *hdev)
3603 {
3604 struct hci_conn *conn;
3605 struct sk_buff *skb;
3606 int quote;
3607
3608 BT_DBG("%s", hdev->name);
3609
3610 if (!hci_conn_num(hdev, ESCO_LINK))
3611 return;
3612
3613 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3614 "e))) {
3615 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3616 BT_DBG("skb %p len %d", skb, skb->len);
3617 hci_send_frame(hdev, skb);
3618
3619 conn->sent++;
3620 if (conn->sent == ~0)
3621 conn->sent = 0;
3622 }
3623 }
3624 }
3625
hci_sched_acl_pkt(struct hci_dev * hdev)3626 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3627 {
3628 unsigned int cnt = hdev->acl_cnt;
3629 struct hci_chan *chan;
3630 struct sk_buff *skb;
3631 int quote;
3632
3633 __check_timeout(hdev, cnt, ACL_LINK);
3634
3635 while (hdev->acl_cnt &&
3636 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3637 u32 priority = (skb_peek(&chan->data_q))->priority;
3638 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3639 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3640 skb->len, skb->priority);
3641
3642 /* Stop if priority has changed */
3643 if (skb->priority < priority)
3644 break;
3645
3646 skb = skb_dequeue(&chan->data_q);
3647
3648 hci_conn_enter_active_mode(chan->conn,
3649 bt_cb(skb)->force_active);
3650
3651 hci_send_frame(hdev, skb);
3652 hdev->acl_last_tx = jiffies;
3653
3654 hdev->acl_cnt--;
3655 chan->sent++;
3656 chan->conn->sent++;
3657
3658 /* Send pending SCO packets right away */
3659 hci_sched_sco(hdev);
3660 hci_sched_esco(hdev);
3661 }
3662 }
3663
3664 if (cnt != hdev->acl_cnt)
3665 hci_prio_recalculate(hdev, ACL_LINK);
3666 }
3667
hci_sched_acl(struct hci_dev * hdev)3668 static void hci_sched_acl(struct hci_dev *hdev)
3669 {
3670 BT_DBG("%s", hdev->name);
3671
3672 /* No ACL link over BR/EDR controller */
3673 if (!hci_conn_num(hdev, ACL_LINK))
3674 return;
3675
3676 hci_sched_acl_pkt(hdev);
3677 }
3678
hci_sched_le(struct hci_dev * hdev)3679 static void hci_sched_le(struct hci_dev *hdev)
3680 {
3681 struct hci_chan *chan;
3682 struct sk_buff *skb;
3683 int quote, *cnt, tmp;
3684
3685 BT_DBG("%s", hdev->name);
3686
3687 if (!hci_conn_num(hdev, LE_LINK))
3688 return;
3689
3690 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3691
3692 __check_timeout(hdev, *cnt, LE_LINK);
3693
3694 tmp = *cnt;
3695 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3696 u32 priority = (skb_peek(&chan->data_q))->priority;
3697 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3698 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3699 skb->len, skb->priority);
3700
3701 /* Stop if priority has changed */
3702 if (skb->priority < priority)
3703 break;
3704
3705 skb = skb_dequeue(&chan->data_q);
3706
3707 hci_send_frame(hdev, skb);
3708 hdev->le_last_tx = jiffies;
3709
3710 (*cnt)--;
3711 chan->sent++;
3712 chan->conn->sent++;
3713
3714 /* Send pending SCO packets right away */
3715 hci_sched_sco(hdev);
3716 hci_sched_esco(hdev);
3717 }
3718 }
3719
3720 if (*cnt != tmp)
3721 hci_prio_recalculate(hdev, LE_LINK);
3722 }
3723
3724 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3725 static void hci_sched_iso(struct hci_dev *hdev)
3726 {
3727 struct hci_conn *conn;
3728 struct sk_buff *skb;
3729 int quote, *cnt;
3730
3731 BT_DBG("%s", hdev->name);
3732
3733 if (!hci_conn_num(hdev, ISO_LINK))
3734 return;
3735
3736 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3737 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3738 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3739 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3740 BT_DBG("skb %p len %d", skb, skb->len);
3741 hci_send_frame(hdev, skb);
3742
3743 conn->sent++;
3744 if (conn->sent == ~0)
3745 conn->sent = 0;
3746 (*cnt)--;
3747 }
3748 }
3749 }
3750
hci_tx_work(struct work_struct * work)3751 static void hci_tx_work(struct work_struct *work)
3752 {
3753 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3754 struct sk_buff *skb;
3755
3756 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3757 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3758
3759 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3760 /* Schedule queues and send stuff to HCI driver */
3761 hci_sched_sco(hdev);
3762 hci_sched_esco(hdev);
3763 hci_sched_iso(hdev);
3764 hci_sched_acl(hdev);
3765 hci_sched_le(hdev);
3766 }
3767
3768 /* Send next queued raw (unknown type) packet */
3769 while ((skb = skb_dequeue(&hdev->raw_q)))
3770 hci_send_frame(hdev, skb);
3771 }
3772
3773 /* ----- HCI RX task (incoming data processing) ----- */
3774
3775 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3776 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3777 {
3778 struct hci_acl_hdr *hdr;
3779 struct hci_conn *conn;
3780 __u16 handle, flags;
3781
3782 hdr = skb_pull_data(skb, sizeof(*hdr));
3783 if (!hdr) {
3784 bt_dev_err(hdev, "ACL packet too small");
3785 goto drop;
3786 }
3787
3788 handle = __le16_to_cpu(hdr->handle);
3789 flags = hci_flags(handle);
3790 handle = hci_handle(handle);
3791
3792 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3793 handle, flags);
3794
3795 hdev->stat.acl_rx++;
3796
3797 hci_dev_lock(hdev);
3798 conn = hci_conn_hash_lookup_handle(hdev, handle);
3799 hci_dev_unlock(hdev);
3800
3801 if (conn) {
3802 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3803
3804 /* Send to upper protocol */
3805 l2cap_recv_acldata(conn, skb, flags);
3806 return;
3807 } else {
3808 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3809 handle);
3810 }
3811
3812 drop:
3813 kfree_skb(skb);
3814 }
3815
3816 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3817 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3818 {
3819 struct hci_sco_hdr *hdr = (void *) skb->data;
3820 struct hci_conn *conn;
3821 __u16 handle, flags;
3822
3823 skb_pull(skb, HCI_SCO_HDR_SIZE);
3824
3825 handle = __le16_to_cpu(hdr->handle);
3826 flags = hci_flags(handle);
3827 handle = hci_handle(handle);
3828
3829 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3830 handle, flags);
3831
3832 hdev->stat.sco_rx++;
3833
3834 hci_dev_lock(hdev);
3835 conn = hci_conn_hash_lookup_handle(hdev, handle);
3836 hci_dev_unlock(hdev);
3837
3838 if (conn) {
3839 /* Send to upper protocol */
3840 hci_skb_pkt_status(skb) = flags & 0x03;
3841 sco_recv_scodata(conn, skb);
3842 return;
3843 } else {
3844 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3845 handle);
3846 }
3847
3848 kfree_skb(skb);
3849 }
3850
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3851 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3852 {
3853 struct hci_iso_hdr *hdr;
3854 struct hci_conn *conn;
3855 __u16 handle, flags;
3856
3857 hdr = skb_pull_data(skb, sizeof(*hdr));
3858 if (!hdr) {
3859 bt_dev_err(hdev, "ISO packet too small");
3860 goto drop;
3861 }
3862
3863 handle = __le16_to_cpu(hdr->handle);
3864 flags = hci_flags(handle);
3865 handle = hci_handle(handle);
3866
3867 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3868 handle, flags);
3869
3870 hci_dev_lock(hdev);
3871 conn = hci_conn_hash_lookup_handle(hdev, handle);
3872 hci_dev_unlock(hdev);
3873
3874 if (!conn) {
3875 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3876 handle);
3877 goto drop;
3878 }
3879
3880 /* Send to upper protocol */
3881 iso_recv(conn, skb, flags);
3882 return;
3883
3884 drop:
3885 kfree_skb(skb);
3886 }
3887
hci_req_is_complete(struct hci_dev * hdev)3888 static bool hci_req_is_complete(struct hci_dev *hdev)
3889 {
3890 struct sk_buff *skb;
3891
3892 skb = skb_peek(&hdev->cmd_q);
3893 if (!skb)
3894 return true;
3895
3896 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3897 }
3898
hci_resend_last(struct hci_dev * hdev)3899 static void hci_resend_last(struct hci_dev *hdev)
3900 {
3901 struct hci_command_hdr *sent;
3902 struct sk_buff *skb;
3903 u16 opcode;
3904
3905 if (!hdev->sent_cmd)
3906 return;
3907
3908 sent = (void *) hdev->sent_cmd->data;
3909 opcode = __le16_to_cpu(sent->opcode);
3910 if (opcode == HCI_OP_RESET)
3911 return;
3912
3913 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3914 if (!skb)
3915 return;
3916
3917 skb_queue_head(&hdev->cmd_q, skb);
3918 queue_work(hdev->workqueue, &hdev->cmd_work);
3919 }
3920
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3921 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3922 hci_req_complete_t *req_complete,
3923 hci_req_complete_skb_t *req_complete_skb)
3924 {
3925 struct sk_buff *skb;
3926 unsigned long flags;
3927
3928 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3929
3930 /* If the completed command doesn't match the last one that was
3931 * sent we need to do special handling of it.
3932 */
3933 if (!hci_sent_cmd_data(hdev, opcode)) {
3934 /* Some CSR based controllers generate a spontaneous
3935 * reset complete event during init and any pending
3936 * command will never be completed. In such a case we
3937 * need to resend whatever was the last sent
3938 * command.
3939 */
3940 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3941 hci_resend_last(hdev);
3942
3943 return;
3944 }
3945
3946 /* If we reach this point this event matches the last command sent */
3947 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3948
3949 /* If the command succeeded and there's still more commands in
3950 * this request the request is not yet complete.
3951 */
3952 if (!status && !hci_req_is_complete(hdev))
3953 return;
3954
3955 skb = hdev->req_skb;
3956
3957 /* If this was the last command in a request the complete
3958 * callback would be found in hdev->req_skb instead of the
3959 * command queue (hdev->cmd_q).
3960 */
3961 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3962 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3963 return;
3964 }
3965
3966 if (skb && bt_cb(skb)->hci.req_complete) {
3967 *req_complete = bt_cb(skb)->hci.req_complete;
3968 return;
3969 }
3970
3971 /* Remove all pending commands belonging to this request */
3972 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3973 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3974 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3975 __skb_queue_head(&hdev->cmd_q, skb);
3976 break;
3977 }
3978
3979 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3980 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3981 else
3982 *req_complete = bt_cb(skb)->hci.req_complete;
3983 dev_kfree_skb_irq(skb);
3984 }
3985 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3986 }
3987
hci_rx_work(struct work_struct * work)3988 static void hci_rx_work(struct work_struct *work)
3989 {
3990 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3991 struct sk_buff *skb;
3992
3993 BT_DBG("%s", hdev->name);
3994
3995 /* The kcov_remote functions used for collecting packet parsing
3996 * coverage information from this background thread and associate
3997 * the coverage with the syscall's thread which originally injected
3998 * the packet. This helps fuzzing the kernel.
3999 */
4000 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4001 kcov_remote_start_common(skb_get_kcov_handle(skb));
4002
4003 /* Send copy to monitor */
4004 hci_send_to_monitor(hdev, skb);
4005
4006 if (atomic_read(&hdev->promisc)) {
4007 /* Send copy to the sockets */
4008 hci_send_to_sock(hdev, skb);
4009 }
4010
4011 /* If the device has been opened in HCI_USER_CHANNEL,
4012 * the userspace has exclusive access to device.
4013 * When device is HCI_INIT, we still need to process
4014 * the data packets to the driver in order
4015 * to complete its setup().
4016 */
4017 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4018 !test_bit(HCI_INIT, &hdev->flags)) {
4019 kfree_skb(skb);
4020 continue;
4021 }
4022
4023 if (test_bit(HCI_INIT, &hdev->flags)) {
4024 /* Don't process data packets in this states. */
4025 switch (hci_skb_pkt_type(skb)) {
4026 case HCI_ACLDATA_PKT:
4027 case HCI_SCODATA_PKT:
4028 case HCI_ISODATA_PKT:
4029 kfree_skb(skb);
4030 continue;
4031 }
4032 }
4033
4034 /* Process frame */
4035 switch (hci_skb_pkt_type(skb)) {
4036 case HCI_EVENT_PKT:
4037 BT_DBG("%s Event packet", hdev->name);
4038 hci_event_packet(hdev, skb);
4039 break;
4040
4041 case HCI_ACLDATA_PKT:
4042 BT_DBG("%s ACL data packet", hdev->name);
4043 hci_acldata_packet(hdev, skb);
4044 break;
4045
4046 case HCI_SCODATA_PKT:
4047 BT_DBG("%s SCO data packet", hdev->name);
4048 hci_scodata_packet(hdev, skb);
4049 break;
4050
4051 case HCI_ISODATA_PKT:
4052 BT_DBG("%s ISO data packet", hdev->name);
4053 hci_isodata_packet(hdev, skb);
4054 break;
4055
4056 default:
4057 kfree_skb(skb);
4058 break;
4059 }
4060 }
4061 }
4062
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4063 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4064 {
4065 int err;
4066
4067 bt_dev_dbg(hdev, "skb %p", skb);
4068
4069 kfree_skb(hdev->sent_cmd);
4070
4071 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4072 if (!hdev->sent_cmd) {
4073 skb_queue_head(&hdev->cmd_q, skb);
4074 queue_work(hdev->workqueue, &hdev->cmd_work);
4075 return;
4076 }
4077
4078 if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4079 err = hci_send_frame(hdev, skb);
4080 if (err < 0) {
4081 hci_cmd_sync_cancel_sync(hdev, -err);
4082 return;
4083 }
4084 atomic_dec(&hdev->cmd_cnt);
4085 }
4086
4087 if (hdev->req_status == HCI_REQ_PEND &&
4088 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4089 kfree_skb(hdev->req_skb);
4090 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4091 }
4092 }
4093
hci_cmd_work(struct work_struct * work)4094 static void hci_cmd_work(struct work_struct *work)
4095 {
4096 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4097 struct sk_buff *skb;
4098
4099 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4100 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4101
4102 /* Send queued commands */
4103 if (atomic_read(&hdev->cmd_cnt)) {
4104 skb = skb_dequeue(&hdev->cmd_q);
4105 if (!skb)
4106 return;
4107
4108 hci_send_cmd_sync(hdev, skb);
4109
4110 rcu_read_lock();
4111 if (test_bit(HCI_RESET, &hdev->flags) ||
4112 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4113 cancel_delayed_work(&hdev->cmd_timer);
4114 else
4115 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4116 HCI_CMD_TIMEOUT);
4117 rcu_read_unlock();
4118 }
4119 }
4120