1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
64
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68 /* ---- HCI debugfs entries ---- */
69
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)70 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72 {
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
77 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80 }
81
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)82 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84 {
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
90
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
116 kfree_skb(skb);
117
118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
119
120 return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128 };
129
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)130 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132 {
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140 }
141
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)142 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144 {
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
175 done:
176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182 }
183
184 static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189 };
190
hci_debugfs_create_basic(struct hci_dev * hdev)191 static void hci_debugfs_create_basic(struct hci_dev *hdev)
192 {
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199 }
200
201 /* ---- HCI requests ---- */
202
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)203 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
205 {
206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
211 if (skb)
212 hdev->req_skb = skb_get(skb);
213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215 }
216
hci_req_cancel(struct hci_dev * hdev,int err)217 static void hci_req_cancel(struct hci_dev *hdev, int err)
218 {
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226 }
227
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)228 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229 const void *param, u8 event, u32 timeout)
230 {
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
233 struct sk_buff *skb;
234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
240 hci_req_add_ev(&req, opcode, plen, param, event);
241
242 hdev->req_status = HCI_REQ_PEND;
243
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
247 err = hci_req_run_skb(&req, hci_req_sync_complete);
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
250 set_current_state(TASK_RUNNING);
251 return ERR_PTR(err);
252 }
253
254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
281 if (err < 0) {
282 kfree_skb(skb);
283 return ERR_PTR(err);
284 }
285
286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
290 }
291 EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)293 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294 const void *param, u32 timeout)
295 {
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
297 }
298 EXPORT_SYMBOL(__hci_cmd_sync);
299
300 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,void (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)301 static int __hci_req_sync(struct hci_dev *hdev,
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
304 unsigned long opt, __u32 timeout)
305 {
306 struct hci_request req;
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
312 hci_req_init(&req, hdev);
313
314 hdev->req_status = HCI_REQ_PEND;
315
316 func(&req, opt);
317
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
321 err = hci_req_run_skb(&req, hci_req_sync_complete);
322 if (err < 0) {
323 hdev->req_status = 0;
324
325 remove_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_RUNNING);
327
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
332 */
333 if (err == -ENODATA)
334 return 0;
335
336 return err;
337 }
338
339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
348 err = -bt_to_errno(hdev->req_result);
349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
358 }
359
360 hdev->req_status = hdev->req_result = 0;
361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365 }
366
hci_req_sync(struct hci_dev * hdev,void (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)367 static int hci_req_sync(struct hci_dev *hdev,
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
370 unsigned long opt, __u32 timeout)
371 {
372 int ret;
373
374 /* Serialize all requests */
375 hci_req_lock(hdev);
376 /* check the state after obtaing the lock to protect the HCI_UP
377 * against any races from hci_dev_do_close when the controller
378 * gets removed.
379 */
380 if (test_bit(HCI_UP, &hdev->flags))
381 ret = __hci_req_sync(hdev, req, opt, timeout);
382 else
383 ret = -ENETDOWN;
384
385 hci_req_unlock(hdev);
386
387 return ret;
388 }
389
hci_reset_req(struct hci_request * req,unsigned long opt)390 static void hci_reset_req(struct hci_request *req, unsigned long opt)
391 {
392 BT_DBG("%s %ld", req->hdev->name, opt);
393
394 /* Reset device */
395 set_bit(HCI_RESET, &req->hdev->flags);
396 hci_req_add(req, HCI_OP_RESET, 0, NULL);
397 }
398
bredr_init(struct hci_request * req)399 static void bredr_init(struct hci_request *req)
400 {
401 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406 /* Read Local Version */
407 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
408
409 /* Read BD Address */
410 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
411 }
412
amp_init1(struct hci_request * req)413 static void amp_init1(struct hci_request *req)
414 {
415 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
416
417 /* Read Local Version */
418 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
419
420 /* Read Local Supported Commands */
421 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
422
423 /* Read Local AMP Info */
424 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
425
426 /* Read Data Blk size */
427 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
428
429 /* Read Flow Control Mode */
430 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
431
432 /* Read Location Data */
433 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
434 }
435
amp_init2(struct hci_request * req)436 static void amp_init2(struct hci_request *req)
437 {
438 /* Read Local Supported Features. Not all AMP controllers
439 * support this so it's placed conditionally in the second
440 * stage init.
441 */
442 if (req->hdev->commands[14] & 0x20)
443 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
444 }
445
hci_init1_req(struct hci_request * req,unsigned long opt)446 static void hci_init1_req(struct hci_request *req, unsigned long opt)
447 {
448 struct hci_dev *hdev = req->hdev;
449
450 BT_DBG("%s %ld", hdev->name, opt);
451
452 /* Reset */
453 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
454 hci_reset_req(req, 0);
455
456 switch (hdev->dev_type) {
457 case HCI_BREDR:
458 bredr_init(req);
459 break;
460
461 case HCI_AMP:
462 amp_init1(req);
463 break;
464
465 default:
466 BT_ERR("Unknown device type %d", hdev->dev_type);
467 break;
468 }
469 }
470
bredr_setup(struct hci_request * req)471 static void bredr_setup(struct hci_request *req)
472 {
473 __le16 param;
474 __u8 flt_type;
475
476 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
477 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
478
479 /* Read Class of Device */
480 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
481
482 /* Read Local Name */
483 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
484
485 /* Read Voice Setting */
486 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
487
488 /* Read Number of Supported IAC */
489 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
490
491 /* Read Current IAC LAP */
492 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
493
494 /* Clear Event Filters */
495 flt_type = HCI_FLT_CLEAR_ALL;
496 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
497
498 /* Connection accept timeout ~20 secs */
499 param = cpu_to_le16(0x7d00);
500 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
501 }
502
le_setup(struct hci_request * req)503 static void le_setup(struct hci_request *req)
504 {
505 struct hci_dev *hdev = req->hdev;
506
507 /* Read LE Buffer Size */
508 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
509
510 /* Read LE Local Supported Features */
511 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
512
513 /* Read LE Supported States */
514 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
515
516 /* LE-only controllers have LE implicitly enabled */
517 if (!lmp_bredr_capable(hdev))
518 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
519 }
520
hci_setup_event_mask(struct hci_request * req)521 static void hci_setup_event_mask(struct hci_request *req)
522 {
523 struct hci_dev *hdev = req->hdev;
524
525 /* The second byte is 0xff instead of 0x9f (two reserved bits
526 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
527 * command otherwise.
528 */
529 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
530
531 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
532 * any event mask for pre 1.2 devices.
533 */
534 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
535 return;
536
537 if (lmp_bredr_capable(hdev)) {
538 events[4] |= 0x01; /* Flow Specification Complete */
539 events[4] |= 0x02; /* Inquiry Result with RSSI */
540 events[4] |= 0x04; /* Read Remote Extended Features Complete */
541 events[5] |= 0x08; /* Synchronous Connection Complete */
542 events[5] |= 0x10; /* Synchronous Connection Changed */
543 } else {
544 /* Use a different default for LE-only devices */
545 memset(events, 0, sizeof(events));
546 events[0] |= 0x10; /* Disconnection Complete */
547 events[1] |= 0x08; /* Read Remote Version Information Complete */
548 events[1] |= 0x20; /* Command Complete */
549 events[1] |= 0x40; /* Command Status */
550 events[1] |= 0x80; /* Hardware Error */
551 events[2] |= 0x04; /* Number of Completed Packets */
552 events[3] |= 0x02; /* Data Buffer Overflow */
553
554 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
555 events[0] |= 0x80; /* Encryption Change */
556 events[5] |= 0x80; /* Encryption Key Refresh Complete */
557 }
558 }
559
560 if (lmp_inq_rssi_capable(hdev))
561 events[4] |= 0x02; /* Inquiry Result with RSSI */
562
563 if (lmp_sniffsubr_capable(hdev))
564 events[5] |= 0x20; /* Sniff Subrating */
565
566 if (lmp_pause_enc_capable(hdev))
567 events[5] |= 0x80; /* Encryption Key Refresh Complete */
568
569 if (lmp_ext_inq_capable(hdev))
570 events[5] |= 0x40; /* Extended Inquiry Result */
571
572 if (lmp_no_flush_capable(hdev))
573 events[7] |= 0x01; /* Enhanced Flush Complete */
574
575 if (lmp_lsto_capable(hdev))
576 events[6] |= 0x80; /* Link Supervision Timeout Changed */
577
578 if (lmp_ssp_capable(hdev)) {
579 events[6] |= 0x01; /* IO Capability Request */
580 events[6] |= 0x02; /* IO Capability Response */
581 events[6] |= 0x04; /* User Confirmation Request */
582 events[6] |= 0x08; /* User Passkey Request */
583 events[6] |= 0x10; /* Remote OOB Data Request */
584 events[6] |= 0x20; /* Simple Pairing Complete */
585 events[7] |= 0x04; /* User Passkey Notification */
586 events[7] |= 0x08; /* Keypress Notification */
587 events[7] |= 0x10; /* Remote Host Supported
588 * Features Notification
589 */
590 }
591
592 if (lmp_le_capable(hdev))
593 events[7] |= 0x20; /* LE Meta-Event */
594
595 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
596 }
597
hci_init2_req(struct hci_request * req,unsigned long opt)598 static void hci_init2_req(struct hci_request *req, unsigned long opt)
599 {
600 struct hci_dev *hdev = req->hdev;
601
602 if (hdev->dev_type == HCI_AMP)
603 return amp_init2(req);
604
605 if (lmp_bredr_capable(hdev))
606 bredr_setup(req);
607 else
608 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
609
610 if (lmp_le_capable(hdev))
611 le_setup(req);
612
613 /* All Bluetooth 1.2 and later controllers should support the
614 * HCI command for reading the local supported commands.
615 *
616 * Unfortunately some controllers indicate Bluetooth 1.2 support,
617 * but do not have support for this command. If that is the case,
618 * the driver can quirk the behavior and skip reading the local
619 * supported commands.
620 */
621 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
622 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
623 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
624
625 if (lmp_ssp_capable(hdev)) {
626 /* When SSP is available, then the host features page
627 * should also be available as well. However some
628 * controllers list the max_page as 0 as long as SSP
629 * has not been enabled. To achieve proper debugging
630 * output, force the minimum max_page to 1 at least.
631 */
632 hdev->max_page = 0x01;
633
634 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
635 u8 mode = 0x01;
636
637 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
638 sizeof(mode), &mode);
639 } else {
640 struct hci_cp_write_eir cp;
641
642 memset(hdev->eir, 0, sizeof(hdev->eir));
643 memset(&cp, 0, sizeof(cp));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646 }
647 }
648
649 if (lmp_inq_rssi_capable(hdev) ||
650 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
651 u8 mode;
652
653 /* If Extended Inquiry Result events are supported, then
654 * they are clearly preferred over Inquiry Result with RSSI
655 * events.
656 */
657 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
658
659 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
660 }
661
662 if (lmp_inq_tx_pwr_capable(hdev))
663 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
664
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
667
668 cp.page = 0x01;
669 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
670 sizeof(cp), &cp);
671 }
672
673 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
674 u8 enable = 1;
675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 &enable);
677 }
678 }
679
hci_setup_link_policy(struct hci_request * req)680 static void hci_setup_link_policy(struct hci_request *req)
681 {
682 struct hci_dev *hdev = req->hdev;
683 struct hci_cp_write_def_link_policy cp;
684 u16 link_policy = 0;
685
686 if (lmp_rswitch_capable(hdev))
687 link_policy |= HCI_LP_RSWITCH;
688 if (lmp_hold_capable(hdev))
689 link_policy |= HCI_LP_HOLD;
690 if (lmp_sniff_capable(hdev))
691 link_policy |= HCI_LP_SNIFF;
692 if (lmp_park_capable(hdev))
693 link_policy |= HCI_LP_PARK;
694
695 cp.policy = cpu_to_le16(link_policy);
696 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
697 }
698
hci_set_le_support(struct hci_request * req)699 static void hci_set_le_support(struct hci_request *req)
700 {
701 struct hci_dev *hdev = req->hdev;
702 struct hci_cp_write_le_host_supported cp;
703
704 /* LE-only devices do not support explicit enablement */
705 if (!lmp_bredr_capable(hdev))
706 return;
707
708 memset(&cp, 0, sizeof(cp));
709
710 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
711 cp.le = 0x01;
712 cp.simul = 0x00;
713 }
714
715 if (cp.le != lmp_host_le_capable(hdev))
716 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
717 &cp);
718 }
719
hci_set_event_mask_page_2(struct hci_request * req)720 static void hci_set_event_mask_page_2(struct hci_request *req)
721 {
722 struct hci_dev *hdev = req->hdev;
723 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
724 bool changed = false;
725
726 /* If Connectionless Slave Broadcast master role is supported
727 * enable all necessary events for it.
728 */
729 if (lmp_csb_master_capable(hdev)) {
730 events[1] |= 0x40; /* Triggered Clock Capture */
731 events[1] |= 0x80; /* Synchronization Train Complete */
732 events[2] |= 0x10; /* Slave Page Response Timeout */
733 events[2] |= 0x20; /* CSB Channel Map Change */
734 changed = true;
735 }
736
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
739 */
740 if (lmp_csb_slave_capable(hdev)) {
741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
745 changed = true;
746 }
747
748 /* Enable Authenticated Payload Timeout Expired event if supported */
749 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
750 events[2] |= 0x80;
751 changed = true;
752 }
753
754 /* Some Broadcom based controllers indicate support for Set Event
755 * Mask Page 2 command, but then actually do not support it. Since
756 * the default value is all bits set to zero, the command is only
757 * required if the event mask has to be changed. In case no change
758 * to the event mask is needed, skip this command.
759 */
760 if (changed)
761 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
762 sizeof(events), events);
763 }
764
hci_init3_req(struct hci_request * req,unsigned long opt)765 static void hci_init3_req(struct hci_request *req, unsigned long opt)
766 {
767 struct hci_dev *hdev = req->hdev;
768 u8 p;
769
770 hci_setup_event_mask(req);
771
772 if (hdev->commands[6] & 0x20 &&
773 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
774 struct hci_cp_read_stored_link_key cp;
775
776 bacpy(&cp.bdaddr, BDADDR_ANY);
777 cp.read_all = 0x01;
778 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
779 }
780
781 if (hdev->commands[5] & 0x10)
782 hci_setup_link_policy(req);
783
784 if (hdev->commands[8] & 0x01)
785 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
786
787 /* Some older Broadcom based Bluetooth 1.2 controllers do not
788 * support the Read Page Scan Type command. Check support for
789 * this command in the bit mask of supported commands.
790 */
791 if (hdev->commands[13] & 0x01)
792 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
793
794 if (lmp_le_capable(hdev)) {
795 u8 events[8];
796
797 memset(events, 0, sizeof(events));
798 events[0] = 0x0f;
799
800 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
801 events[0] |= 0x10; /* LE Long Term Key Request */
802
803 /* If controller supports the Connection Parameters Request
804 * Link Layer Procedure, enable the corresponding event.
805 */
806 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
807 events[0] |= 0x20; /* LE Remote Connection
808 * Parameter Request
809 */
810
811 /* If the controller supports the Data Length Extension
812 * feature, enable the corresponding event.
813 */
814 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
815 events[0] |= 0x40; /* LE Data Length Change */
816
817 /* If the controller supports Extended Scanner Filter
818 * Policies, enable the correspondig event.
819 */
820 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
821 events[1] |= 0x04; /* LE Direct Advertising
822 * Report
823 */
824
825 /* If the controller supports the LE Read Local P-256
826 * Public Key command, enable the corresponding event.
827 */
828 if (hdev->commands[34] & 0x02)
829 events[0] |= 0x80; /* LE Read Local P-256
830 * Public Key Complete
831 */
832
833 /* If the controller supports the LE Generate DHKey
834 * command, enable the corresponding event.
835 */
836 if (hdev->commands[34] & 0x04)
837 events[1] |= 0x01; /* LE Generate DHKey Complete */
838
839 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
840 events);
841
842 if (hdev->commands[25] & 0x40) {
843 /* Read LE Advertising Channel TX Power */
844 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
845 }
846
847 if (hdev->commands[26] & 0x40) {
848 /* Read LE White List Size */
849 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
850 0, NULL);
851 }
852
853 if (hdev->commands[26] & 0x80) {
854 /* Clear LE White List */
855 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
856 }
857
858 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
859 /* Read LE Maximum Data Length */
860 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
861
862 /* Read LE Suggested Default Data Length */
863 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
864 }
865
866 hci_set_le_support(req);
867 }
868
869 /* Read features beyond page 1 if available */
870 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
871 struct hci_cp_read_local_ext_features cp;
872
873 cp.page = p;
874 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
875 sizeof(cp), &cp);
876 }
877 }
878
hci_init4_req(struct hci_request * req,unsigned long opt)879 static void hci_init4_req(struct hci_request *req, unsigned long opt)
880 {
881 struct hci_dev *hdev = req->hdev;
882
883 /* Some Broadcom based Bluetooth controllers do not support the
884 * Delete Stored Link Key command. They are clearly indicating its
885 * absence in the bit mask of supported commands.
886 *
887 * Check the supported commands and only if the the command is marked
888 * as supported send it. If not supported assume that the controller
889 * does not have actual support for stored link keys which makes this
890 * command redundant anyway.
891 *
892 * Some controllers indicate that they support handling deleting
893 * stored link keys, but they don't. The quirk lets a driver
894 * just disable this command.
895 */
896 if (hdev->commands[6] & 0x80 &&
897 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
898 struct hci_cp_delete_stored_link_key cp;
899
900 bacpy(&cp.bdaddr, BDADDR_ANY);
901 cp.delete_all = 0x01;
902 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
903 sizeof(cp), &cp);
904 }
905
906 /* Set event mask page 2 if the HCI command for it is supported */
907 if (hdev->commands[22] & 0x04)
908 hci_set_event_mask_page_2(req);
909
910 /* Read local codec list if the HCI command is supported */
911 if (hdev->commands[29] & 0x20)
912 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
913
914 /* Get MWS transport configuration if the HCI command is supported */
915 if (hdev->commands[30] & 0x08)
916 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
917
918 /* Check for Synchronization Train support */
919 if (lmp_sync_train_capable(hdev))
920 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
921
922 /* Enable Secure Connections if supported and configured */
923 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
924 bredr_sc_enabled(hdev)) {
925 u8 support = 0x01;
926
927 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
928 sizeof(support), &support);
929 }
930 }
931
__hci_init(struct hci_dev * hdev)932 static int __hci_init(struct hci_dev *hdev)
933 {
934 int err;
935
936 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
940 if (hci_dev_test_flag(hdev, HCI_SETUP))
941 hci_debugfs_create_basic(hdev);
942
943 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
944 if (err < 0)
945 return err;
946
947 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
948 * BR/EDR/LE type controllers. AMP controllers only need the
949 * first two stages of init.
950 */
951 if (hdev->dev_type != HCI_BREDR)
952 return 0;
953
954 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
955 if (err < 0)
956 return err;
957
958 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
959 if (err < 0)
960 return err;
961
962 /* This function is only called when the controller is actually in
963 * configured state. When the controller is marked as unconfigured,
964 * this initialization procedure is not run.
965 *
966 * It means that it is possible that a controller runs through its
967 * setup phase and then discovers missing settings. If that is the
968 * case, then this function will not be called. It then will only
969 * be called during the config phase.
970 *
971 * So only when in setup phase or config phase, create the debugfs
972 * entries and register the SMP channels.
973 */
974 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
975 !hci_dev_test_flag(hdev, HCI_CONFIG))
976 return 0;
977
978 hci_debugfs_create_common(hdev);
979
980 if (lmp_bredr_capable(hdev))
981 hci_debugfs_create_bredr(hdev);
982
983 if (lmp_le_capable(hdev))
984 hci_debugfs_create_le(hdev);
985
986 return 0;
987 }
988
hci_init0_req(struct hci_request * req,unsigned long opt)989 static void hci_init0_req(struct hci_request *req, unsigned long opt)
990 {
991 struct hci_dev *hdev = req->hdev;
992
993 BT_DBG("%s %ld", hdev->name, opt);
994
995 /* Reset */
996 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
997 hci_reset_req(req, 0);
998
999 /* Read Local Version */
1000 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1001
1002 /* Read BD Address */
1003 if (hdev->set_bdaddr)
1004 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1005 }
1006
__hci_unconf_init(struct hci_dev * hdev)1007 static int __hci_unconf_init(struct hci_dev *hdev)
1008 {
1009 int err;
1010
1011 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1012 return 0;
1013
1014 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1015 if (err < 0)
1016 return err;
1017
1018 if (hci_dev_test_flag(hdev, HCI_SETUP))
1019 hci_debugfs_create_basic(hdev);
1020
1021 return 0;
1022 }
1023
hci_scan_req(struct hci_request * req,unsigned long opt)1024 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1025 {
1026 __u8 scan = opt;
1027
1028 BT_DBG("%s %x", req->hdev->name, scan);
1029
1030 /* Inquiry and Page scans */
1031 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1032 }
1033
hci_auth_req(struct hci_request * req,unsigned long opt)1034 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1035 {
1036 __u8 auth = opt;
1037
1038 BT_DBG("%s %x", req->hdev->name, auth);
1039
1040 /* Authentication */
1041 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1042 }
1043
hci_encrypt_req(struct hci_request * req,unsigned long opt)1044 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1045 {
1046 __u8 encrypt = opt;
1047
1048 BT_DBG("%s %x", req->hdev->name, encrypt);
1049
1050 /* Encryption */
1051 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1052 }
1053
hci_linkpol_req(struct hci_request * req,unsigned long opt)1054 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1055 {
1056 __le16 policy = cpu_to_le16(opt);
1057
1058 BT_DBG("%s %x", req->hdev->name, policy);
1059
1060 /* Default link policy */
1061 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1062 }
1063
1064 /* Get HCI device by index.
1065 * Device is held on return. */
hci_dev_get(int index)1066 struct hci_dev *hci_dev_get(int index)
1067 {
1068 struct hci_dev *hdev = NULL, *d;
1069
1070 BT_DBG("%d", index);
1071
1072 if (index < 0)
1073 return NULL;
1074
1075 read_lock(&hci_dev_list_lock);
1076 list_for_each_entry(d, &hci_dev_list, list) {
1077 if (d->id == index) {
1078 hdev = hci_dev_hold(d);
1079 break;
1080 }
1081 }
1082 read_unlock(&hci_dev_list_lock);
1083 return hdev;
1084 }
1085
1086 /* ---- Inquiry support ---- */
1087
hci_discovery_active(struct hci_dev * hdev)1088 bool hci_discovery_active(struct hci_dev *hdev)
1089 {
1090 struct discovery_state *discov = &hdev->discovery;
1091
1092 switch (discov->state) {
1093 case DISCOVERY_FINDING:
1094 case DISCOVERY_RESOLVING:
1095 return true;
1096
1097 default:
1098 return false;
1099 }
1100 }
1101
hci_discovery_set_state(struct hci_dev * hdev,int state)1102 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1103 {
1104 int old_state = hdev->discovery.state;
1105
1106 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1107
1108 if (old_state == state)
1109 return;
1110
1111 hdev->discovery.state = state;
1112
1113 switch (state) {
1114 case DISCOVERY_STOPPED:
1115 hci_update_background_scan(hdev);
1116
1117 if (old_state != DISCOVERY_STARTING)
1118 mgmt_discovering(hdev, 0);
1119 break;
1120 case DISCOVERY_STARTING:
1121 break;
1122 case DISCOVERY_FINDING:
1123 mgmt_discovering(hdev, 1);
1124 break;
1125 case DISCOVERY_RESOLVING:
1126 break;
1127 case DISCOVERY_STOPPING:
1128 break;
1129 }
1130 }
1131
hci_inquiry_cache_flush(struct hci_dev * hdev)1132 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1133 {
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *p, *n;
1136
1137 list_for_each_entry_safe(p, n, &cache->all, all) {
1138 list_del(&p->all);
1139 kfree(p);
1140 }
1141
1142 INIT_LIST_HEAD(&cache->unknown);
1143 INIT_LIST_HEAD(&cache->resolve);
1144 }
1145
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1146 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1147 bdaddr_t *bdaddr)
1148 {
1149 struct discovery_state *cache = &hdev->discovery;
1150 struct inquiry_entry *e;
1151
1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154 list_for_each_entry(e, &cache->all, all) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1156 return e;
1157 }
1158
1159 return NULL;
1160 }
1161
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1162 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1163 bdaddr_t *bdaddr)
1164 {
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1167
1168 BT_DBG("cache %p, %pMR", cache, bdaddr);
1169
1170 list_for_each_entry(e, &cache->unknown, list) {
1171 if (!bacmp(&e->data.bdaddr, bdaddr))
1172 return e;
1173 }
1174
1175 return NULL;
1176 }
1177
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1178 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1179 bdaddr_t *bdaddr,
1180 int state)
1181 {
1182 struct discovery_state *cache = &hdev->discovery;
1183 struct inquiry_entry *e;
1184
1185 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1186
1187 list_for_each_entry(e, &cache->resolve, list) {
1188 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1189 return e;
1190 if (!bacmp(&e->data.bdaddr, bdaddr))
1191 return e;
1192 }
1193
1194 return NULL;
1195 }
1196
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1197 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1198 struct inquiry_entry *ie)
1199 {
1200 struct discovery_state *cache = &hdev->discovery;
1201 struct list_head *pos = &cache->resolve;
1202 struct inquiry_entry *p;
1203
1204 list_del(&ie->list);
1205
1206 list_for_each_entry(p, &cache->resolve, list) {
1207 if (p->name_state != NAME_PENDING &&
1208 abs(p->data.rssi) >= abs(ie->data.rssi))
1209 break;
1210 pos = &p->list;
1211 }
1212
1213 list_add(&ie->list, pos);
1214 }
1215
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1216 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1217 bool name_known)
1218 {
1219 struct discovery_state *cache = &hdev->discovery;
1220 struct inquiry_entry *ie;
1221 u32 flags = 0;
1222
1223 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1224
1225 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1226
1227 if (!data->ssp_mode)
1228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1229
1230 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1231 if (ie) {
1232 if (!ie->data.ssp_mode)
1233 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1234
1235 if (ie->name_state == NAME_NEEDED &&
1236 data->rssi != ie->data.rssi) {
1237 ie->data.rssi = data->rssi;
1238 hci_inquiry_cache_update_resolve(hdev, ie);
1239 }
1240
1241 goto update;
1242 }
1243
1244 /* Entry not in the cache. Add new one. */
1245 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1246 if (!ie) {
1247 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1248 goto done;
1249 }
1250
1251 list_add(&ie->all, &cache->all);
1252
1253 if (name_known) {
1254 ie->name_state = NAME_KNOWN;
1255 } else {
1256 ie->name_state = NAME_NOT_KNOWN;
1257 list_add(&ie->list, &cache->unknown);
1258 }
1259
1260 update:
1261 if (name_known && ie->name_state != NAME_KNOWN &&
1262 ie->name_state != NAME_PENDING) {
1263 ie->name_state = NAME_KNOWN;
1264 list_del(&ie->list);
1265 }
1266
1267 memcpy(&ie->data, data, sizeof(*data));
1268 ie->timestamp = jiffies;
1269 cache->timestamp = jiffies;
1270
1271 if (ie->name_state == NAME_NOT_KNOWN)
1272 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1273
1274 done:
1275 return flags;
1276 }
1277
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1278 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1279 {
1280 struct discovery_state *cache = &hdev->discovery;
1281 struct inquiry_info *info = (struct inquiry_info *) buf;
1282 struct inquiry_entry *e;
1283 int copied = 0;
1284
1285 list_for_each_entry(e, &cache->all, all) {
1286 struct inquiry_data *data = &e->data;
1287
1288 if (copied >= num)
1289 break;
1290
1291 bacpy(&info->bdaddr, &data->bdaddr);
1292 info->pscan_rep_mode = data->pscan_rep_mode;
1293 info->pscan_period_mode = data->pscan_period_mode;
1294 info->pscan_mode = data->pscan_mode;
1295 memcpy(info->dev_class, data->dev_class, 3);
1296 info->clock_offset = data->clock_offset;
1297
1298 info++;
1299 copied++;
1300 }
1301
1302 BT_DBG("cache %p, copied %d", cache, copied);
1303 return copied;
1304 }
1305
hci_inq_req(struct hci_request * req,unsigned long opt)1306 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1307 {
1308 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1309 struct hci_dev *hdev = req->hdev;
1310 struct hci_cp_inquiry cp;
1311
1312 BT_DBG("%s", hdev->name);
1313
1314 if (test_bit(HCI_INQUIRY, &hdev->flags))
1315 return;
1316
1317 /* Start Inquiry */
1318 memcpy(&cp.lap, &ir->lap, 3);
1319 cp.length = ir->length;
1320 cp.num_rsp = ir->num_rsp;
1321 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1322 }
1323
hci_inquiry(void __user * arg)1324 int hci_inquiry(void __user *arg)
1325 {
1326 __u8 __user *ptr = arg;
1327 struct hci_inquiry_req ir;
1328 struct hci_dev *hdev;
1329 int err = 0, do_inquiry = 0, max_rsp;
1330 long timeo;
1331 __u8 *buf;
1332
1333 if (copy_from_user(&ir, ptr, sizeof(ir)))
1334 return -EFAULT;
1335
1336 hdev = hci_dev_get(ir.dev_id);
1337 if (!hdev)
1338 return -ENODEV;
1339
1340 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1341 err = -EBUSY;
1342 goto done;
1343 }
1344
1345 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1346 err = -EOPNOTSUPP;
1347 goto done;
1348 }
1349
1350 if (hdev->dev_type != HCI_BREDR) {
1351 err = -EOPNOTSUPP;
1352 goto done;
1353 }
1354
1355 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1356 err = -EOPNOTSUPP;
1357 goto done;
1358 }
1359
1360 /* Restrict maximum inquiry length to 60 seconds */
1361 if (ir.length > 60) {
1362 err = -EINVAL;
1363 goto done;
1364 }
1365
1366 hci_dev_lock(hdev);
1367 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1368 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1369 hci_inquiry_cache_flush(hdev);
1370 do_inquiry = 1;
1371 }
1372 hci_dev_unlock(hdev);
1373
1374 timeo = ir.length * msecs_to_jiffies(2000);
1375
1376 if (do_inquiry) {
1377 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1378 timeo);
1379 if (err < 0)
1380 goto done;
1381
1382 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1383 * cleared). If it is interrupted by a signal, return -EINTR.
1384 */
1385 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1386 TASK_INTERRUPTIBLE)) {
1387 err = -EINTR;
1388 goto done;
1389 }
1390 }
1391
1392 /* for unlimited number of responses we will use buffer with
1393 * 255 entries
1394 */
1395 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1396
1397 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1398 * copy it to the user space.
1399 */
1400 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1401 if (!buf) {
1402 err = -ENOMEM;
1403 goto done;
1404 }
1405
1406 hci_dev_lock(hdev);
1407 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1408 hci_dev_unlock(hdev);
1409
1410 BT_DBG("num_rsp %d", ir.num_rsp);
1411
1412 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1413 ptr += sizeof(ir);
1414 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1415 ir.num_rsp))
1416 err = -EFAULT;
1417 } else
1418 err = -EFAULT;
1419
1420 kfree(buf);
1421
1422 done:
1423 hci_dev_put(hdev);
1424 return err;
1425 }
1426
hci_dev_do_open(struct hci_dev * hdev)1427 static int hci_dev_do_open(struct hci_dev *hdev)
1428 {
1429 int ret = 0;
1430
1431 BT_DBG("%s %p", hdev->name, hdev);
1432
1433 hci_req_lock(hdev);
1434
1435 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1436 ret = -ENODEV;
1437 goto done;
1438 }
1439
1440 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1441 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1442 /* Check for rfkill but allow the HCI setup stage to
1443 * proceed (which in itself doesn't cause any RF activity).
1444 */
1445 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1446 ret = -ERFKILL;
1447 goto done;
1448 }
1449
1450 /* Check for valid public address or a configured static
1451 * random adddress, but let the HCI setup proceed to
1452 * be able to determine if there is a public address
1453 * or not.
1454 *
1455 * In case of user channel usage, it is not important
1456 * if a public address or static random address is
1457 * available.
1458 *
1459 * This check is only valid for BR/EDR controllers
1460 * since AMP controllers do not have an address.
1461 */
1462 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1463 hdev->dev_type == HCI_BREDR &&
1464 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1465 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1466 ret = -EADDRNOTAVAIL;
1467 goto done;
1468 }
1469 }
1470
1471 if (test_bit(HCI_UP, &hdev->flags)) {
1472 ret = -EALREADY;
1473 goto done;
1474 }
1475
1476 if (hdev->open(hdev)) {
1477 ret = -EIO;
1478 goto done;
1479 }
1480
1481 set_bit(HCI_RUNNING, &hdev->flags);
1482 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1483
1484 atomic_set(&hdev->cmd_cnt, 1);
1485 set_bit(HCI_INIT, &hdev->flags);
1486
1487 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1488 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1489
1490 if (hdev->setup)
1491 ret = hdev->setup(hdev);
1492
1493 /* The transport driver can set these quirks before
1494 * creating the HCI device or in its setup callback.
1495 *
1496 * In case any of them is set, the controller has to
1497 * start up as unconfigured.
1498 */
1499 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1500 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1501 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1502
1503 /* For an unconfigured controller it is required to
1504 * read at least the version information provided by
1505 * the Read Local Version Information command.
1506 *
1507 * If the set_bdaddr driver callback is provided, then
1508 * also the original Bluetooth public device address
1509 * will be read using the Read BD Address command.
1510 */
1511 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1512 ret = __hci_unconf_init(hdev);
1513 }
1514
1515 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1516 /* If public address change is configured, ensure that
1517 * the address gets programmed. If the driver does not
1518 * support changing the public address, fail the power
1519 * on procedure.
1520 */
1521 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1522 hdev->set_bdaddr)
1523 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1524 else
1525 ret = -EADDRNOTAVAIL;
1526 }
1527
1528 if (!ret) {
1529 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1530 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1531 ret = __hci_init(hdev);
1532 if (!ret && hdev->post_init)
1533 ret = hdev->post_init(hdev);
1534 }
1535 }
1536
1537 /* If the HCI Reset command is clearing all diagnostic settings,
1538 * then they need to be reprogrammed after the init procedure
1539 * completed.
1540 */
1541 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1542 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1543 ret = hdev->set_diag(hdev, true);
1544
1545 clear_bit(HCI_INIT, &hdev->flags);
1546
1547 if (!ret) {
1548 hci_dev_hold(hdev);
1549 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1550 set_bit(HCI_UP, &hdev->flags);
1551 hci_sock_dev_event(hdev, HCI_DEV_UP);
1552 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1553 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1554 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1555 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1556 hdev->dev_type == HCI_BREDR) {
1557 hci_dev_lock(hdev);
1558 mgmt_powered(hdev, 1);
1559 hci_dev_unlock(hdev);
1560 }
1561 } else {
1562 /* Init failed, cleanup */
1563 flush_work(&hdev->tx_work);
1564
1565 /* Since hci_rx_work() is possible to awake new cmd_work
1566 * it should be flushed first to avoid unexpected call of
1567 * hci_cmd_work()
1568 */
1569 flush_work(&hdev->rx_work);
1570 flush_work(&hdev->cmd_work);
1571
1572 skb_queue_purge(&hdev->cmd_q);
1573 skb_queue_purge(&hdev->rx_q);
1574
1575 if (hdev->flush)
1576 hdev->flush(hdev);
1577
1578 if (hdev->sent_cmd) {
1579 kfree_skb(hdev->sent_cmd);
1580 hdev->sent_cmd = NULL;
1581 }
1582
1583 clear_bit(HCI_RUNNING, &hdev->flags);
1584 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1585
1586 hdev->close(hdev);
1587 hdev->flags &= BIT(HCI_RAW);
1588 }
1589
1590 done:
1591 hci_req_unlock(hdev);
1592 return ret;
1593 }
1594
1595 /* ---- HCI ioctl helpers ---- */
1596
hci_dev_open(__u16 dev)1597 int hci_dev_open(__u16 dev)
1598 {
1599 struct hci_dev *hdev;
1600 int err;
1601
1602 hdev = hci_dev_get(dev);
1603 if (!hdev)
1604 return -ENODEV;
1605
1606 /* Devices that are marked as unconfigured can only be powered
1607 * up as user channel. Trying to bring them up as normal devices
1608 * will result into a failure. Only user channel operation is
1609 * possible.
1610 *
1611 * When this function is called for a user channel, the flag
1612 * HCI_USER_CHANNEL will be set first before attempting to
1613 * open the device.
1614 */
1615 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1616 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1617 err = -EOPNOTSUPP;
1618 goto done;
1619 }
1620
1621 /* We need to ensure that no other power on/off work is pending
1622 * before proceeding to call hci_dev_do_open. This is
1623 * particularly important if the setup procedure has not yet
1624 * completed.
1625 */
1626 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1627 cancel_delayed_work(&hdev->power_off);
1628
1629 /* After this call it is guaranteed that the setup procedure
1630 * has finished. This means that error conditions like RFKILL
1631 * or no valid public or static random address apply.
1632 */
1633 flush_workqueue(hdev->req_workqueue);
1634
1635 /* For controllers not using the management interface and that
1636 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1637 * so that pairing works for them. Once the management interface
1638 * is in use this bit will be cleared again and userspace has
1639 * to explicitly enable it.
1640 */
1641 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1642 !hci_dev_test_flag(hdev, HCI_MGMT))
1643 hci_dev_set_flag(hdev, HCI_BONDABLE);
1644
1645 err = hci_dev_do_open(hdev);
1646
1647 done:
1648 hci_dev_put(hdev);
1649 return err;
1650 }
1651
1652 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1653 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1654 {
1655 struct hci_conn_params *p;
1656
1657 list_for_each_entry(p, &hdev->le_conn_params, list) {
1658 if (p->conn) {
1659 hci_conn_drop(p->conn);
1660 hci_conn_put(p->conn);
1661 p->conn = NULL;
1662 }
1663 list_del_init(&p->action);
1664 }
1665
1666 BT_DBG("All LE pending actions cleared");
1667 }
1668
hci_dev_do_close(struct hci_dev * hdev)1669 int hci_dev_do_close(struct hci_dev *hdev)
1670 {
1671 bool auto_off;
1672
1673 BT_DBG("%s %p", hdev->name, hdev);
1674
1675 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1676 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1677 test_bit(HCI_UP, &hdev->flags)) {
1678 /* Execute vendor specific shutdown routine */
1679 if (hdev->shutdown)
1680 hdev->shutdown(hdev);
1681 }
1682
1683 cancel_delayed_work(&hdev->power_off);
1684
1685 hci_req_cancel(hdev, ENODEV);
1686 hci_req_lock(hdev);
1687
1688 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1689 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1690 test_bit(HCI_UP, &hdev->flags)) {
1691 /* Execute vendor specific shutdown routine */
1692 if (hdev->shutdown)
1693 hdev->shutdown(hdev);
1694 }
1695
1696 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1697 cancel_delayed_work_sync(&hdev->cmd_timer);
1698 hci_req_unlock(hdev);
1699 return 0;
1700 }
1701
1702 /* Flush RX and TX works */
1703 flush_work(&hdev->tx_work);
1704 flush_work(&hdev->rx_work);
1705
1706 if (hdev->discov_timeout > 0) {
1707 cancel_delayed_work(&hdev->discov_off);
1708 hdev->discov_timeout = 0;
1709 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1711 }
1712
1713 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1714 cancel_delayed_work(&hdev->service_cache);
1715
1716 cancel_delayed_work_sync(&hdev->le_scan_disable);
1717 cancel_delayed_work_sync(&hdev->le_scan_restart);
1718
1719 if (hci_dev_test_flag(hdev, HCI_MGMT))
1720 cancel_delayed_work_sync(&hdev->rpa_expired);
1721
1722 if (hdev->adv_instance_timeout) {
1723 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1724 hdev->adv_instance_timeout = 0;
1725 }
1726
1727 /* Avoid potential lockdep warnings from the *_flush() calls by
1728 * ensuring the workqueue is empty up front.
1729 */
1730 drain_workqueue(hdev->workqueue);
1731
1732 hci_dev_lock(hdev);
1733
1734 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1735
1736 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1737
1738 if (!auto_off && hdev->dev_type == HCI_BREDR)
1739 mgmt_powered(hdev, 0);
1740
1741 hci_inquiry_cache_flush(hdev);
1742 hci_pend_le_actions_clear(hdev);
1743 hci_conn_hash_flush(hdev);
1744 hci_dev_unlock(hdev);
1745
1746 smp_unregister(hdev);
1747
1748 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1749
1750 if (hdev->flush)
1751 hdev->flush(hdev);
1752
1753 /* Reset device */
1754 skb_queue_purge(&hdev->cmd_q);
1755 atomic_set(&hdev->cmd_cnt, 1);
1756 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1757 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1758 set_bit(HCI_INIT, &hdev->flags);
1759 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1760 clear_bit(HCI_INIT, &hdev->flags);
1761 }
1762
1763 /* flush cmd work */
1764 flush_work(&hdev->cmd_work);
1765
1766 /* Drop queues */
1767 skb_queue_purge(&hdev->rx_q);
1768 skb_queue_purge(&hdev->cmd_q);
1769 skb_queue_purge(&hdev->raw_q);
1770
1771 /* Drop last sent command */
1772 if (hdev->sent_cmd) {
1773 cancel_delayed_work_sync(&hdev->cmd_timer);
1774 kfree_skb(hdev->sent_cmd);
1775 hdev->sent_cmd = NULL;
1776 }
1777
1778 clear_bit(HCI_RUNNING, &hdev->flags);
1779 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1780
1781 /* After this point our queues are empty
1782 * and no tasks are scheduled. */
1783 hdev->close(hdev);
1784
1785 /* Clear flags */
1786 hdev->flags &= BIT(HCI_RAW);
1787 hci_dev_clear_volatile_flags(hdev);
1788
1789 /* Controller radio is available but is currently powered down */
1790 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1791
1792 memset(hdev->eir, 0, sizeof(hdev->eir));
1793 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1794 bacpy(&hdev->random_addr, BDADDR_ANY);
1795
1796 hci_req_unlock(hdev);
1797
1798 hci_dev_put(hdev);
1799 return 0;
1800 }
1801
hci_dev_close(__u16 dev)1802 int hci_dev_close(__u16 dev)
1803 {
1804 struct hci_dev *hdev;
1805 int err;
1806
1807 hdev = hci_dev_get(dev);
1808 if (!hdev)
1809 return -ENODEV;
1810
1811 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1812 err = -EBUSY;
1813 goto done;
1814 }
1815
1816 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1817 cancel_delayed_work(&hdev->power_off);
1818
1819 err = hci_dev_do_close(hdev);
1820
1821 done:
1822 hci_dev_put(hdev);
1823 return err;
1824 }
1825
hci_dev_do_reset(struct hci_dev * hdev)1826 static int hci_dev_do_reset(struct hci_dev *hdev)
1827 {
1828 int ret;
1829
1830 BT_DBG("%s %p", hdev->name, hdev);
1831
1832 hci_req_lock(hdev);
1833
1834 /* Drop queues */
1835 skb_queue_purge(&hdev->rx_q);
1836 skb_queue_purge(&hdev->cmd_q);
1837
1838 /* Avoid potential lockdep warnings from the *_flush() calls by
1839 * ensuring the workqueue is empty up front.
1840 */
1841 drain_workqueue(hdev->workqueue);
1842
1843 hci_dev_lock(hdev);
1844 hci_inquiry_cache_flush(hdev);
1845 hci_conn_hash_flush(hdev);
1846 hci_dev_unlock(hdev);
1847
1848 if (hdev->flush)
1849 hdev->flush(hdev);
1850
1851 atomic_set(&hdev->cmd_cnt, 1);
1852 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1853
1854 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1855
1856 hci_req_unlock(hdev);
1857 return ret;
1858 }
1859
hci_dev_reset(__u16 dev)1860 int hci_dev_reset(__u16 dev)
1861 {
1862 struct hci_dev *hdev;
1863 int err;
1864
1865 hdev = hci_dev_get(dev);
1866 if (!hdev)
1867 return -ENODEV;
1868
1869 if (!test_bit(HCI_UP, &hdev->flags)) {
1870 err = -ENETDOWN;
1871 goto done;
1872 }
1873
1874 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1875 err = -EBUSY;
1876 goto done;
1877 }
1878
1879 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1880 err = -EOPNOTSUPP;
1881 goto done;
1882 }
1883
1884 err = hci_dev_do_reset(hdev);
1885
1886 done:
1887 hci_dev_put(hdev);
1888 return err;
1889 }
1890
hci_dev_reset_stat(__u16 dev)1891 int hci_dev_reset_stat(__u16 dev)
1892 {
1893 struct hci_dev *hdev;
1894 int ret = 0;
1895
1896 hdev = hci_dev_get(dev);
1897 if (!hdev)
1898 return -ENODEV;
1899
1900 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1901 ret = -EBUSY;
1902 goto done;
1903 }
1904
1905 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1906 ret = -EOPNOTSUPP;
1907 goto done;
1908 }
1909
1910 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1911
1912 done:
1913 hci_dev_put(hdev);
1914 return ret;
1915 }
1916
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1917 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1918 {
1919 bool conn_changed, discov_changed;
1920
1921 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1922
1923 if ((scan & SCAN_PAGE))
1924 conn_changed = !hci_dev_test_and_set_flag(hdev,
1925 HCI_CONNECTABLE);
1926 else
1927 conn_changed = hci_dev_test_and_clear_flag(hdev,
1928 HCI_CONNECTABLE);
1929
1930 if ((scan & SCAN_INQUIRY)) {
1931 discov_changed = !hci_dev_test_and_set_flag(hdev,
1932 HCI_DISCOVERABLE);
1933 } else {
1934 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1935 discov_changed = hci_dev_test_and_clear_flag(hdev,
1936 HCI_DISCOVERABLE);
1937 }
1938
1939 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1940 return;
1941
1942 if (conn_changed || discov_changed) {
1943 /* In case this was disabled through mgmt */
1944 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1945
1946 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1947 mgmt_update_adv_data(hdev);
1948
1949 mgmt_new_settings(hdev);
1950 }
1951 }
1952
hci_dev_cmd(unsigned int cmd,void __user * arg)1953 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1954 {
1955 struct hci_dev *hdev;
1956 struct hci_dev_req dr;
1957 int err = 0;
1958
1959 if (copy_from_user(&dr, arg, sizeof(dr)))
1960 return -EFAULT;
1961
1962 hdev = hci_dev_get(dr.dev_id);
1963 if (!hdev)
1964 return -ENODEV;
1965
1966 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1967 err = -EBUSY;
1968 goto done;
1969 }
1970
1971 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1972 err = -EOPNOTSUPP;
1973 goto done;
1974 }
1975
1976 if (hdev->dev_type != HCI_BREDR) {
1977 err = -EOPNOTSUPP;
1978 goto done;
1979 }
1980
1981 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1982 err = -EOPNOTSUPP;
1983 goto done;
1984 }
1985
1986 switch (cmd) {
1987 case HCISETAUTH:
1988 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1989 HCI_INIT_TIMEOUT);
1990 break;
1991
1992 case HCISETENCRYPT:
1993 if (!lmp_encrypt_capable(hdev)) {
1994 err = -EOPNOTSUPP;
1995 break;
1996 }
1997
1998 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1999 /* Auth must be enabled first */
2000 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2001 HCI_INIT_TIMEOUT);
2002 if (err)
2003 break;
2004 }
2005
2006 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2007 HCI_INIT_TIMEOUT);
2008 break;
2009
2010 case HCISETSCAN:
2011 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2012 HCI_INIT_TIMEOUT);
2013
2014 /* Ensure that the connectable and discoverable states
2015 * get correctly modified as this was a non-mgmt change.
2016 */
2017 if (!err)
2018 hci_update_scan_state(hdev, dr.dev_opt);
2019 break;
2020
2021 case HCISETLINKPOL:
2022 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2023 HCI_INIT_TIMEOUT);
2024 break;
2025
2026 case HCISETLINKMODE:
2027 hdev->link_mode = ((__u16) dr.dev_opt) &
2028 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2029 break;
2030
2031 case HCISETPTYPE:
2032 hdev->pkt_type = (__u16) dr.dev_opt;
2033 break;
2034
2035 case HCISETACLMTU:
2036 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2037 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2038 break;
2039
2040 case HCISETSCOMTU:
2041 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2042 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2043 break;
2044
2045 default:
2046 err = -EINVAL;
2047 break;
2048 }
2049
2050 done:
2051 hci_dev_put(hdev);
2052 return err;
2053 }
2054
hci_get_dev_list(void __user * arg)2055 int hci_get_dev_list(void __user *arg)
2056 {
2057 struct hci_dev *hdev;
2058 struct hci_dev_list_req *dl;
2059 struct hci_dev_req *dr;
2060 int n = 0, size, err;
2061 __u16 dev_num;
2062
2063 if (get_user(dev_num, (__u16 __user *) arg))
2064 return -EFAULT;
2065
2066 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2067 return -EINVAL;
2068
2069 size = sizeof(*dl) + dev_num * sizeof(*dr);
2070
2071 dl = kzalloc(size, GFP_KERNEL);
2072 if (!dl)
2073 return -ENOMEM;
2074
2075 dr = dl->dev_req;
2076
2077 read_lock(&hci_dev_list_lock);
2078 list_for_each_entry(hdev, &hci_dev_list, list) {
2079 unsigned long flags = hdev->flags;
2080
2081 /* When the auto-off is configured it means the transport
2082 * is running, but in that case still indicate that the
2083 * device is actually down.
2084 */
2085 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2086 flags &= ~BIT(HCI_UP);
2087
2088 (dr + n)->dev_id = hdev->id;
2089 (dr + n)->dev_opt = flags;
2090
2091 if (++n >= dev_num)
2092 break;
2093 }
2094 read_unlock(&hci_dev_list_lock);
2095
2096 dl->dev_num = n;
2097 size = sizeof(*dl) + n * sizeof(*dr);
2098
2099 err = copy_to_user(arg, dl, size);
2100 kfree(dl);
2101
2102 return err ? -EFAULT : 0;
2103 }
2104
hci_get_dev_info(void __user * arg)2105 int hci_get_dev_info(void __user *arg)
2106 {
2107 struct hci_dev *hdev;
2108 struct hci_dev_info di;
2109 unsigned long flags;
2110 int err = 0;
2111
2112 if (copy_from_user(&di, arg, sizeof(di)))
2113 return -EFAULT;
2114
2115 hdev = hci_dev_get(di.dev_id);
2116 if (!hdev)
2117 return -ENODEV;
2118
2119 /* When the auto-off is configured it means the transport
2120 * is running, but in that case still indicate that the
2121 * device is actually down.
2122 */
2123 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2124 flags = hdev->flags & ~BIT(HCI_UP);
2125 else
2126 flags = hdev->flags;
2127
2128 strcpy(di.name, hdev->name);
2129 di.bdaddr = hdev->bdaddr;
2130 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2131 di.flags = flags;
2132 di.pkt_type = hdev->pkt_type;
2133 if (lmp_bredr_capable(hdev)) {
2134 di.acl_mtu = hdev->acl_mtu;
2135 di.acl_pkts = hdev->acl_pkts;
2136 di.sco_mtu = hdev->sco_mtu;
2137 di.sco_pkts = hdev->sco_pkts;
2138 } else {
2139 di.acl_mtu = hdev->le_mtu;
2140 di.acl_pkts = hdev->le_pkts;
2141 di.sco_mtu = 0;
2142 di.sco_pkts = 0;
2143 }
2144 di.link_policy = hdev->link_policy;
2145 di.link_mode = hdev->link_mode;
2146
2147 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2148 memcpy(&di.features, &hdev->features, sizeof(di.features));
2149
2150 if (copy_to_user(arg, &di, sizeof(di)))
2151 err = -EFAULT;
2152
2153 hci_dev_put(hdev);
2154
2155 return err;
2156 }
2157
2158 /* ---- Interface to HCI drivers ---- */
2159
hci_rfkill_set_block(void * data,bool blocked)2160 static int hci_rfkill_set_block(void *data, bool blocked)
2161 {
2162 struct hci_dev *hdev = data;
2163
2164 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2165
2166 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2167 return -EBUSY;
2168
2169 if (blocked) {
2170 hci_dev_set_flag(hdev, HCI_RFKILLED);
2171 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2172 !hci_dev_test_flag(hdev, HCI_CONFIG))
2173 hci_dev_do_close(hdev);
2174 } else {
2175 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2176 }
2177
2178 return 0;
2179 }
2180
2181 static const struct rfkill_ops hci_rfkill_ops = {
2182 .set_block = hci_rfkill_set_block,
2183 };
2184
hci_power_on(struct work_struct * work)2185 static void hci_power_on(struct work_struct *work)
2186 {
2187 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2188 int err;
2189
2190 BT_DBG("%s", hdev->name);
2191
2192 err = hci_dev_do_open(hdev);
2193 if (err < 0) {
2194 hci_dev_lock(hdev);
2195 mgmt_set_powered_failed(hdev, err);
2196 hci_dev_unlock(hdev);
2197 return;
2198 }
2199
2200 /* During the HCI setup phase, a few error conditions are
2201 * ignored and they need to be checked now. If they are still
2202 * valid, it is important to turn the device back off.
2203 */
2204 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2205 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2206 (hdev->dev_type == HCI_BREDR &&
2207 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2208 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2209 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2210 hci_dev_do_close(hdev);
2211 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2212 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2213 HCI_AUTO_OFF_TIMEOUT);
2214 }
2215
2216 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2217 /* For unconfigured devices, set the HCI_RAW flag
2218 * so that userspace can easily identify them.
2219 */
2220 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2221 set_bit(HCI_RAW, &hdev->flags);
2222
2223 /* For fully configured devices, this will send
2224 * the Index Added event. For unconfigured devices,
2225 * it will send Unconfigued Index Added event.
2226 *
2227 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2228 * and no event will be send.
2229 */
2230 mgmt_index_added(hdev);
2231 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2232 /* When the controller is now configured, then it
2233 * is important to clear the HCI_RAW flag.
2234 */
2235 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2236 clear_bit(HCI_RAW, &hdev->flags);
2237
2238 /* Powering on the controller with HCI_CONFIG set only
2239 * happens with the transition from unconfigured to
2240 * configured. This will send the Index Added event.
2241 */
2242 mgmt_index_added(hdev);
2243 }
2244 }
2245
hci_power_off(struct work_struct * work)2246 static void hci_power_off(struct work_struct *work)
2247 {
2248 struct hci_dev *hdev = container_of(work, struct hci_dev,
2249 power_off.work);
2250
2251 BT_DBG("%s", hdev->name);
2252
2253 hci_dev_do_close(hdev);
2254 }
2255
hci_error_reset(struct work_struct * work)2256 static void hci_error_reset(struct work_struct *work)
2257 {
2258 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2259
2260 BT_DBG("%s", hdev->name);
2261
2262 if (hdev->hw_error)
2263 hdev->hw_error(hdev, hdev->hw_error_code);
2264 else
2265 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2266 hdev->hw_error_code);
2267
2268 if (hci_dev_do_close(hdev))
2269 return;
2270
2271 hci_dev_do_open(hdev);
2272 }
2273
hci_discov_off(struct work_struct * work)2274 static void hci_discov_off(struct work_struct *work)
2275 {
2276 struct hci_dev *hdev;
2277
2278 hdev = container_of(work, struct hci_dev, discov_off.work);
2279
2280 BT_DBG("%s", hdev->name);
2281
2282 mgmt_discoverable_timeout(hdev);
2283 }
2284
hci_adv_timeout_expire(struct work_struct * work)2285 static void hci_adv_timeout_expire(struct work_struct *work)
2286 {
2287 struct hci_dev *hdev;
2288
2289 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2290
2291 BT_DBG("%s", hdev->name);
2292
2293 mgmt_adv_timeout_expired(hdev);
2294 }
2295
hci_uuids_clear(struct hci_dev * hdev)2296 void hci_uuids_clear(struct hci_dev *hdev)
2297 {
2298 struct bt_uuid *uuid, *tmp;
2299
2300 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2301 list_del(&uuid->list);
2302 kfree(uuid);
2303 }
2304 }
2305
hci_link_keys_clear(struct hci_dev * hdev)2306 void hci_link_keys_clear(struct hci_dev *hdev)
2307 {
2308 struct link_key *key;
2309
2310 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2311 list_del_rcu(&key->list);
2312 kfree_rcu(key, rcu);
2313 }
2314 }
2315
hci_smp_ltks_clear(struct hci_dev * hdev)2316 void hci_smp_ltks_clear(struct hci_dev *hdev)
2317 {
2318 struct smp_ltk *k;
2319
2320 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2321 list_del_rcu(&k->list);
2322 kfree_rcu(k, rcu);
2323 }
2324 }
2325
hci_smp_irks_clear(struct hci_dev * hdev)2326 void hci_smp_irks_clear(struct hci_dev *hdev)
2327 {
2328 struct smp_irk *k;
2329
2330 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2331 list_del_rcu(&k->list);
2332 kfree_rcu(k, rcu);
2333 }
2334 }
2335
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2336 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2337 {
2338 struct link_key *k;
2339
2340 rcu_read_lock();
2341 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2342 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2343 rcu_read_unlock();
2344 return k;
2345 }
2346 }
2347 rcu_read_unlock();
2348
2349 return NULL;
2350 }
2351
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2352 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2353 u8 key_type, u8 old_key_type)
2354 {
2355 /* Legacy key */
2356 if (key_type < 0x03)
2357 return true;
2358
2359 /* Debug keys are insecure so don't store them persistently */
2360 if (key_type == HCI_LK_DEBUG_COMBINATION)
2361 return false;
2362
2363 /* Changed combination key and there's no previous one */
2364 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2365 return false;
2366
2367 /* Security mode 3 case */
2368 if (!conn)
2369 return true;
2370
2371 /* BR/EDR key derived using SC from an LE link */
2372 if (conn->type == LE_LINK)
2373 return true;
2374
2375 /* Neither local nor remote side had no-bonding as requirement */
2376 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2377 return true;
2378
2379 /* Local side had dedicated bonding as requirement */
2380 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2381 return true;
2382
2383 /* Remote side had dedicated bonding as requirement */
2384 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2385 return true;
2386
2387 /* If none of the above criteria match, then don't store the key
2388 * persistently */
2389 return false;
2390 }
2391
ltk_role(u8 type)2392 static u8 ltk_role(u8 type)
2393 {
2394 if (type == SMP_LTK)
2395 return HCI_ROLE_MASTER;
2396
2397 return HCI_ROLE_SLAVE;
2398 }
2399
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2400 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type, u8 role)
2402 {
2403 struct smp_ltk *k;
2404
2405 rcu_read_lock();
2406 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2407 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2408 continue;
2409
2410 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2411 rcu_read_unlock();
2412 return k;
2413 }
2414 }
2415 rcu_read_unlock();
2416
2417 return NULL;
2418 }
2419
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2420 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2421 {
2422 struct smp_irk *irk;
2423
2424 rcu_read_lock();
2425 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2426 if (!bacmp(&irk->rpa, rpa)) {
2427 rcu_read_unlock();
2428 return irk;
2429 }
2430 }
2431
2432 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2433 if (smp_irk_matches(hdev, irk->val, rpa)) {
2434 bacpy(&irk->rpa, rpa);
2435 rcu_read_unlock();
2436 return irk;
2437 }
2438 }
2439 rcu_read_unlock();
2440
2441 return NULL;
2442 }
2443
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2444 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2445 u8 addr_type)
2446 {
2447 struct smp_irk *irk;
2448
2449 /* Identity Address must be public or static random */
2450 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2451 return NULL;
2452
2453 rcu_read_lock();
2454 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2455 if (addr_type == irk->addr_type &&
2456 bacmp(bdaddr, &irk->bdaddr) == 0) {
2457 rcu_read_unlock();
2458 return irk;
2459 }
2460 }
2461 rcu_read_unlock();
2462
2463 return NULL;
2464 }
2465
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2466 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2467 bdaddr_t *bdaddr, u8 *val, u8 type,
2468 u8 pin_len, bool *persistent)
2469 {
2470 struct link_key *key, *old_key;
2471 u8 old_key_type;
2472
2473 old_key = hci_find_link_key(hdev, bdaddr);
2474 if (old_key) {
2475 old_key_type = old_key->type;
2476 key = old_key;
2477 } else {
2478 old_key_type = conn ? conn->key_type : 0xff;
2479 key = kzalloc(sizeof(*key), GFP_KERNEL);
2480 if (!key)
2481 return NULL;
2482 list_add_rcu(&key->list, &hdev->link_keys);
2483 }
2484
2485 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2486
2487 /* Some buggy controller combinations generate a changed
2488 * combination key for legacy pairing even when there's no
2489 * previous key */
2490 if (type == HCI_LK_CHANGED_COMBINATION &&
2491 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2492 type = HCI_LK_COMBINATION;
2493 if (conn)
2494 conn->key_type = type;
2495 }
2496
2497 bacpy(&key->bdaddr, bdaddr);
2498 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2499 key->pin_len = pin_len;
2500
2501 if (type == HCI_LK_CHANGED_COMBINATION)
2502 key->type = old_key_type;
2503 else
2504 key->type = type;
2505
2506 if (persistent)
2507 *persistent = hci_persistent_key(hdev, conn, type,
2508 old_key_type);
2509
2510 return key;
2511 }
2512
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2513 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2514 u8 addr_type, u8 type, u8 authenticated,
2515 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2516 {
2517 struct smp_ltk *key, *old_key;
2518 u8 role = ltk_role(type);
2519
2520 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2521 if (old_key)
2522 key = old_key;
2523 else {
2524 key = kzalloc(sizeof(*key), GFP_KERNEL);
2525 if (!key)
2526 return NULL;
2527 list_add_rcu(&key->list, &hdev->long_term_keys);
2528 }
2529
2530 bacpy(&key->bdaddr, bdaddr);
2531 key->bdaddr_type = addr_type;
2532 memcpy(key->val, tk, sizeof(key->val));
2533 key->authenticated = authenticated;
2534 key->ediv = ediv;
2535 key->rand = rand;
2536 key->enc_size = enc_size;
2537 key->type = type;
2538
2539 return key;
2540 }
2541
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2542 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2544 {
2545 struct smp_irk *irk;
2546
2547 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2548 if (!irk) {
2549 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2550 if (!irk)
2551 return NULL;
2552
2553 bacpy(&irk->bdaddr, bdaddr);
2554 irk->addr_type = addr_type;
2555
2556 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2557 }
2558
2559 memcpy(irk->val, val, 16);
2560 bacpy(&irk->rpa, rpa);
2561
2562 return irk;
2563 }
2564
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2565 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2566 {
2567 struct link_key *key;
2568
2569 key = hci_find_link_key(hdev, bdaddr);
2570 if (!key)
2571 return -ENOENT;
2572
2573 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2574
2575 list_del_rcu(&key->list);
2576 kfree_rcu(key, rcu);
2577
2578 return 0;
2579 }
2580
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2581 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2582 {
2583 struct smp_ltk *k;
2584 int removed = 0;
2585
2586 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2587 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2588 continue;
2589
2590 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2591
2592 list_del_rcu(&k->list);
2593 kfree_rcu(k, rcu);
2594 removed++;
2595 }
2596
2597 return removed ? 0 : -ENOENT;
2598 }
2599
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2600 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2601 {
2602 struct smp_irk *k;
2603
2604 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2605 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2606 continue;
2607
2608 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2609
2610 list_del_rcu(&k->list);
2611 kfree_rcu(k, rcu);
2612 }
2613 }
2614
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2615 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2616 {
2617 struct smp_ltk *k;
2618 struct smp_irk *irk;
2619 u8 addr_type;
2620
2621 if (type == BDADDR_BREDR) {
2622 if (hci_find_link_key(hdev, bdaddr))
2623 return true;
2624 return false;
2625 }
2626
2627 /* Convert to HCI addr type which struct smp_ltk uses */
2628 if (type == BDADDR_LE_PUBLIC)
2629 addr_type = ADDR_LE_DEV_PUBLIC;
2630 else
2631 addr_type = ADDR_LE_DEV_RANDOM;
2632
2633 irk = hci_get_irk(hdev, bdaddr, addr_type);
2634 if (irk) {
2635 bdaddr = &irk->bdaddr;
2636 addr_type = irk->addr_type;
2637 }
2638
2639 rcu_read_lock();
2640 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2641 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2642 rcu_read_unlock();
2643 return true;
2644 }
2645 }
2646 rcu_read_unlock();
2647
2648 return false;
2649 }
2650
2651 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2652 static void hci_cmd_timeout(struct work_struct *work)
2653 {
2654 struct hci_dev *hdev = container_of(work, struct hci_dev,
2655 cmd_timer.work);
2656
2657 if (hdev->sent_cmd) {
2658 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2659 u16 opcode = __le16_to_cpu(sent->opcode);
2660
2661 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2662 } else {
2663 BT_ERR("%s command tx timeout", hdev->name);
2664 }
2665
2666 atomic_set(&hdev->cmd_cnt, 1);
2667 queue_work(hdev->workqueue, &hdev->cmd_work);
2668 }
2669
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2670 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2671 bdaddr_t *bdaddr, u8 bdaddr_type)
2672 {
2673 struct oob_data *data;
2674
2675 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2676 if (bacmp(bdaddr, &data->bdaddr) != 0)
2677 continue;
2678 if (data->bdaddr_type != bdaddr_type)
2679 continue;
2680 return data;
2681 }
2682
2683 return NULL;
2684 }
2685
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2686 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2687 u8 bdaddr_type)
2688 {
2689 struct oob_data *data;
2690
2691 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2692 if (!data)
2693 return -ENOENT;
2694
2695 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2696
2697 list_del(&data->list);
2698 kfree(data);
2699
2700 return 0;
2701 }
2702
hci_remote_oob_data_clear(struct hci_dev * hdev)2703 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2704 {
2705 struct oob_data *data, *n;
2706
2707 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2708 list_del(&data->list);
2709 kfree(data);
2710 }
2711 }
2712
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2713 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2715 u8 *hash256, u8 *rand256)
2716 {
2717 struct oob_data *data;
2718
2719 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2720 if (!data) {
2721 data = kmalloc(sizeof(*data), GFP_KERNEL);
2722 if (!data)
2723 return -ENOMEM;
2724
2725 bacpy(&data->bdaddr, bdaddr);
2726 data->bdaddr_type = bdaddr_type;
2727 list_add(&data->list, &hdev->remote_oob_data);
2728 }
2729
2730 if (hash192 && rand192) {
2731 memcpy(data->hash192, hash192, sizeof(data->hash192));
2732 memcpy(data->rand192, rand192, sizeof(data->rand192));
2733 if (hash256 && rand256)
2734 data->present = 0x03;
2735 } else {
2736 memset(data->hash192, 0, sizeof(data->hash192));
2737 memset(data->rand192, 0, sizeof(data->rand192));
2738 if (hash256 && rand256)
2739 data->present = 0x02;
2740 else
2741 data->present = 0x00;
2742 }
2743
2744 if (hash256 && rand256) {
2745 memcpy(data->hash256, hash256, sizeof(data->hash256));
2746 memcpy(data->rand256, rand256, sizeof(data->rand256));
2747 } else {
2748 memset(data->hash256, 0, sizeof(data->hash256));
2749 memset(data->rand256, 0, sizeof(data->rand256));
2750 if (hash192 && rand192)
2751 data->present = 0x01;
2752 }
2753
2754 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2755
2756 return 0;
2757 }
2758
2759 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2760 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2761 {
2762 struct adv_info *adv_instance;
2763
2764 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2765 if (adv_instance->instance == instance)
2766 return adv_instance;
2767 }
2768
2769 return NULL;
2770 }
2771
2772 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2773 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2774 struct adv_info *cur_instance;
2775
2776 cur_instance = hci_find_adv_instance(hdev, instance);
2777 if (!cur_instance)
2778 return NULL;
2779
2780 if (cur_instance == list_last_entry(&hdev->adv_instances,
2781 struct adv_info, list))
2782 return list_first_entry(&hdev->adv_instances,
2783 struct adv_info, list);
2784 else
2785 return list_next_entry(cur_instance, list);
2786 }
2787
2788 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2789 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2790 {
2791 struct adv_info *adv_instance;
2792
2793 adv_instance = hci_find_adv_instance(hdev, instance);
2794 if (!adv_instance)
2795 return -ENOENT;
2796
2797 BT_DBG("%s removing %dMR", hdev->name, instance);
2798
2799 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2800 cancel_delayed_work(&hdev->adv_instance_expire);
2801 hdev->adv_instance_timeout = 0;
2802 }
2803
2804 list_del(&adv_instance->list);
2805 kfree(adv_instance);
2806
2807 hdev->adv_instance_cnt--;
2808
2809 return 0;
2810 }
2811
2812 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2813 void hci_adv_instances_clear(struct hci_dev *hdev)
2814 {
2815 struct adv_info *adv_instance, *n;
2816
2817 if (hdev->adv_instance_timeout) {
2818 cancel_delayed_work(&hdev->adv_instance_expire);
2819 hdev->adv_instance_timeout = 0;
2820 }
2821
2822 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2823 list_del(&adv_instance->list);
2824 kfree(adv_instance);
2825 }
2826
2827 hdev->adv_instance_cnt = 0;
2828 }
2829
2830 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2831 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2832 u16 adv_data_len, u8 *adv_data,
2833 u16 scan_rsp_len, u8 *scan_rsp_data,
2834 u16 timeout, u16 duration)
2835 {
2836 struct adv_info *adv_instance;
2837
2838 adv_instance = hci_find_adv_instance(hdev, instance);
2839 if (adv_instance) {
2840 memset(adv_instance->adv_data, 0,
2841 sizeof(adv_instance->adv_data));
2842 memset(adv_instance->scan_rsp_data, 0,
2843 sizeof(adv_instance->scan_rsp_data));
2844 } else {
2845 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2846 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2847 return -EOVERFLOW;
2848
2849 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2850 if (!adv_instance)
2851 return -ENOMEM;
2852
2853 adv_instance->pending = true;
2854 adv_instance->instance = instance;
2855 list_add(&adv_instance->list, &hdev->adv_instances);
2856 hdev->adv_instance_cnt++;
2857 }
2858
2859 adv_instance->flags = flags;
2860 adv_instance->adv_data_len = adv_data_len;
2861 adv_instance->scan_rsp_len = scan_rsp_len;
2862
2863 if (adv_data_len)
2864 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2865
2866 if (scan_rsp_len)
2867 memcpy(adv_instance->scan_rsp_data,
2868 scan_rsp_data, scan_rsp_len);
2869
2870 adv_instance->timeout = timeout;
2871 adv_instance->remaining_time = timeout;
2872
2873 if (duration == 0)
2874 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2875 else
2876 adv_instance->duration = duration;
2877
2878 BT_DBG("%s for %dMR", hdev->name, instance);
2879
2880 return 0;
2881 }
2882
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2883 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2884 bdaddr_t *bdaddr, u8 type)
2885 {
2886 struct bdaddr_list *b;
2887
2888 list_for_each_entry(b, bdaddr_list, list) {
2889 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2890 return b;
2891 }
2892
2893 return NULL;
2894 }
2895
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2896 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2897 {
2898 struct list_head *p, *n;
2899
2900 list_for_each_safe(p, n, bdaddr_list) {
2901 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2902
2903 list_del(p);
2904 kfree(b);
2905 }
2906 }
2907
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2908 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2909 {
2910 struct bdaddr_list *entry;
2911
2912 if (!bacmp(bdaddr, BDADDR_ANY))
2913 return -EBADF;
2914
2915 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2916 return -EEXIST;
2917
2918 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2919 if (!entry)
2920 return -ENOMEM;
2921
2922 bacpy(&entry->bdaddr, bdaddr);
2923 entry->bdaddr_type = type;
2924
2925 list_add(&entry->list, list);
2926
2927 return 0;
2928 }
2929
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2930 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2931 {
2932 struct bdaddr_list *entry;
2933
2934 if (!bacmp(bdaddr, BDADDR_ANY)) {
2935 hci_bdaddr_list_clear(list);
2936 return 0;
2937 }
2938
2939 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2940 if (!entry)
2941 return -ENOENT;
2942
2943 list_del(&entry->list);
2944 kfree(entry);
2945
2946 return 0;
2947 }
2948
2949 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2950 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2951 bdaddr_t *addr, u8 addr_type)
2952 {
2953 struct hci_conn_params *params;
2954
2955 list_for_each_entry(params, &hdev->le_conn_params, list) {
2956 if (bacmp(¶ms->addr, addr) == 0 &&
2957 params->addr_type == addr_type) {
2958 return params;
2959 }
2960 }
2961
2962 return NULL;
2963 }
2964
2965 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2966 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2967 bdaddr_t *addr, u8 addr_type)
2968 {
2969 struct hci_conn_params *param;
2970
2971 list_for_each_entry(param, list, action) {
2972 if (bacmp(¶m->addr, addr) == 0 &&
2973 param->addr_type == addr_type)
2974 return param;
2975 }
2976
2977 return NULL;
2978 }
2979
2980 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2981 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2982 bdaddr_t *addr, u8 addr_type)
2983 {
2984 struct hci_conn_params *params;
2985
2986 params = hci_conn_params_lookup(hdev, addr, addr_type);
2987 if (params)
2988 return params;
2989
2990 params = kzalloc(sizeof(*params), GFP_KERNEL);
2991 if (!params) {
2992 BT_ERR("Out of memory");
2993 return NULL;
2994 }
2995
2996 bacpy(¶ms->addr, addr);
2997 params->addr_type = addr_type;
2998
2999 list_add(¶ms->list, &hdev->le_conn_params);
3000 INIT_LIST_HEAD(¶ms->action);
3001
3002 params->conn_min_interval = hdev->le_conn_min_interval;
3003 params->conn_max_interval = hdev->le_conn_max_interval;
3004 params->conn_latency = hdev->le_conn_latency;
3005 params->supervision_timeout = hdev->le_supv_timeout;
3006 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3007
3008 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3009
3010 return params;
3011 }
3012
hci_conn_params_free(struct hci_conn_params * params)3013 static void hci_conn_params_free(struct hci_conn_params *params)
3014 {
3015 if (params->conn) {
3016 hci_conn_drop(params->conn);
3017 hci_conn_put(params->conn);
3018 }
3019
3020 list_del(¶ms->action);
3021 list_del(¶ms->list);
3022 kfree(params);
3023 }
3024
3025 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3026 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3027 {
3028 struct hci_conn_params *params;
3029
3030 params = hci_conn_params_lookup(hdev, addr, addr_type);
3031 if (!params)
3032 return;
3033
3034 hci_conn_params_free(params);
3035
3036 hci_update_background_scan(hdev);
3037
3038 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3039 }
3040
3041 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3042 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3043 {
3044 struct hci_conn_params *params, *tmp;
3045
3046 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3047 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3048 continue;
3049
3050 /* If trying to estabilish one time connection to disabled
3051 * device, leave the params, but mark them as just once.
3052 */
3053 if (params->explicit_connect) {
3054 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3055 continue;
3056 }
3057
3058 list_del(¶ms->list);
3059 kfree(params);
3060 }
3061
3062 BT_DBG("All LE disabled connection parameters were removed");
3063 }
3064
3065 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3066 void hci_conn_params_clear_all(struct hci_dev *hdev)
3067 {
3068 struct hci_conn_params *params, *tmp;
3069
3070 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3071 hci_conn_params_free(params);
3072
3073 hci_update_background_scan(hdev);
3074
3075 BT_DBG("All LE connection parameters were removed");
3076 }
3077
inquiry_complete(struct hci_dev * hdev,u8 status,u16 opcode)3078 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3079 {
3080 if (status) {
3081 BT_ERR("Failed to start inquiry: status %d", status);
3082
3083 hci_dev_lock(hdev);
3084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3085 hci_dev_unlock(hdev);
3086 return;
3087 }
3088 }
3089
le_scan_disable_work_complete(struct hci_dev * hdev,u8 status,u16 opcode)3090 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3091 u16 opcode)
3092 {
3093 /* General inquiry access code (GIAC) */
3094 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3095 struct hci_cp_inquiry cp;
3096 int err;
3097
3098 if (status) {
3099 BT_ERR("Failed to disable LE scanning: status %d", status);
3100 return;
3101 }
3102
3103 hdev->discovery.scan_start = 0;
3104
3105 switch (hdev->discovery.type) {
3106 case DISCOV_TYPE_LE:
3107 hci_dev_lock(hdev);
3108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3109 hci_dev_unlock(hdev);
3110 break;
3111
3112 case DISCOV_TYPE_INTERLEAVED:
3113 hci_dev_lock(hdev);
3114
3115 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3116 &hdev->quirks)) {
3117 /* If we were running LE only scan, change discovery
3118 * state. If we were running both LE and BR/EDR inquiry
3119 * simultaneously, and BR/EDR inquiry is already
3120 * finished, stop discovery, otherwise BR/EDR inquiry
3121 * will stop discovery when finished. If we will resolve
3122 * remote device name, do not change discovery state.
3123 */
3124 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3125 hdev->discovery.state != DISCOVERY_RESOLVING)
3126 hci_discovery_set_state(hdev,
3127 DISCOVERY_STOPPED);
3128 } else {
3129 struct hci_request req;
3130
3131 hci_inquiry_cache_flush(hdev);
3132
3133 hci_req_init(&req, hdev);
3134
3135 memset(&cp, 0, sizeof(cp));
3136 memcpy(&cp.lap, lap, sizeof(cp.lap));
3137 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3138 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3139
3140 err = hci_req_run(&req, inquiry_complete);
3141 if (err) {
3142 BT_ERR("Inquiry request failed: err %d", err);
3143 hci_discovery_set_state(hdev,
3144 DISCOVERY_STOPPED);
3145 }
3146 }
3147
3148 hci_dev_unlock(hdev);
3149 break;
3150 }
3151 }
3152
le_scan_disable_work(struct work_struct * work)3153 static void le_scan_disable_work(struct work_struct *work)
3154 {
3155 struct hci_dev *hdev = container_of(work, struct hci_dev,
3156 le_scan_disable.work);
3157 struct hci_request req;
3158 int err;
3159
3160 BT_DBG("%s", hdev->name);
3161
3162 cancel_delayed_work_sync(&hdev->le_scan_restart);
3163
3164 hci_req_init(&req, hdev);
3165
3166 hci_req_add_le_scan_disable(&req);
3167
3168 err = hci_req_run(&req, le_scan_disable_work_complete);
3169 if (err)
3170 BT_ERR("Disable LE scanning request failed: err %d", err);
3171 }
3172
le_scan_restart_work_complete(struct hci_dev * hdev,u8 status,u16 opcode)3173 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3174 u16 opcode)
3175 {
3176 unsigned long timeout, duration, scan_start, now;
3177
3178 BT_DBG("%s", hdev->name);
3179
3180 if (status) {
3181 BT_ERR("Failed to restart LE scan: status %d", status);
3182 return;
3183 }
3184
3185 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3186 !hdev->discovery.scan_start)
3187 return;
3188
3189 /* When the scan was started, hdev->le_scan_disable has been queued
3190 * after duration from scan_start. During scan restart this job
3191 * has been canceled, and we need to queue it again after proper
3192 * timeout, to make sure that scan does not run indefinitely.
3193 */
3194 duration = hdev->discovery.scan_duration;
3195 scan_start = hdev->discovery.scan_start;
3196 now = jiffies;
3197 if (now - scan_start <= duration) {
3198 int elapsed;
3199
3200 if (now >= scan_start)
3201 elapsed = now - scan_start;
3202 else
3203 elapsed = ULONG_MAX - scan_start + now;
3204
3205 timeout = duration - elapsed;
3206 } else {
3207 timeout = 0;
3208 }
3209 queue_delayed_work(hdev->workqueue,
3210 &hdev->le_scan_disable, timeout);
3211 }
3212
le_scan_restart_work(struct work_struct * work)3213 static void le_scan_restart_work(struct work_struct *work)
3214 {
3215 struct hci_dev *hdev = container_of(work, struct hci_dev,
3216 le_scan_restart.work);
3217 struct hci_request req;
3218 struct hci_cp_le_set_scan_enable cp;
3219 int err;
3220
3221 BT_DBG("%s", hdev->name);
3222
3223 /* If controller is not scanning we are done. */
3224 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3225 return;
3226
3227 hci_req_init(&req, hdev);
3228
3229 hci_req_add_le_scan_disable(&req);
3230
3231 memset(&cp, 0, sizeof(cp));
3232 cp.enable = LE_SCAN_ENABLE;
3233 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3234 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3235
3236 err = hci_req_run(&req, le_scan_restart_work_complete);
3237 if (err)
3238 BT_ERR("Restart LE scan request failed: err %d", err);
3239 }
3240
3241 /* Copy the Identity Address of the controller.
3242 *
3243 * If the controller has a public BD_ADDR, then by default use that one.
3244 * If this is a LE only controller without a public address, default to
3245 * the static random address.
3246 *
3247 * For debugging purposes it is possible to force controllers with a
3248 * public address to use the static random address instead.
3249 *
3250 * In case BR/EDR has been disabled on a dual-mode controller and
3251 * userspace has configured a static address, then that address
3252 * becomes the identity address instead of the public BR/EDR address.
3253 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3254 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3255 u8 *bdaddr_type)
3256 {
3257 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3258 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3259 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3260 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3261 bacpy(bdaddr, &hdev->static_addr);
3262 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3263 } else {
3264 bacpy(bdaddr, &hdev->bdaddr);
3265 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3266 }
3267 }
3268
3269 /* Alloc HCI device */
hci_alloc_dev(void)3270 struct hci_dev *hci_alloc_dev(void)
3271 {
3272 struct hci_dev *hdev;
3273
3274 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3275 if (!hdev)
3276 return NULL;
3277
3278 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3279 hdev->esco_type = (ESCO_HV1);
3280 hdev->link_mode = (HCI_LM_ACCEPT);
3281 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3282 hdev->io_capability = 0x03; /* No Input No Output */
3283 hdev->manufacturer = 0xffff; /* Default to internal use */
3284 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3285 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3286 hdev->adv_instance_cnt = 0;
3287 hdev->cur_adv_instance = 0x00;
3288 hdev->adv_instance_timeout = 0;
3289
3290 hdev->sniff_max_interval = 800;
3291 hdev->sniff_min_interval = 80;
3292
3293 hdev->le_adv_channel_map = 0x07;
3294 hdev->le_adv_min_interval = 0x0800;
3295 hdev->le_adv_max_interval = 0x0800;
3296 hdev->le_scan_interval = 0x0060;
3297 hdev->le_scan_window = 0x0030;
3298 hdev->le_conn_min_interval = 0x0028;
3299 hdev->le_conn_max_interval = 0x0038;
3300 hdev->le_conn_latency = 0x0000;
3301 hdev->le_supv_timeout = 0x002a;
3302 hdev->le_def_tx_len = 0x001b;
3303 hdev->le_def_tx_time = 0x0148;
3304 hdev->le_max_tx_len = 0x001b;
3305 hdev->le_max_tx_time = 0x0148;
3306 hdev->le_max_rx_len = 0x001b;
3307 hdev->le_max_rx_time = 0x0148;
3308
3309 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3310 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3311 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3312 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3313
3314 mutex_init(&hdev->lock);
3315 mutex_init(&hdev->req_lock);
3316
3317 INIT_LIST_HEAD(&hdev->mgmt_pending);
3318 INIT_LIST_HEAD(&hdev->blacklist);
3319 INIT_LIST_HEAD(&hdev->whitelist);
3320 INIT_LIST_HEAD(&hdev->uuids);
3321 INIT_LIST_HEAD(&hdev->link_keys);
3322 INIT_LIST_HEAD(&hdev->long_term_keys);
3323 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3324 INIT_LIST_HEAD(&hdev->remote_oob_data);
3325 INIT_LIST_HEAD(&hdev->le_white_list);
3326 INIT_LIST_HEAD(&hdev->le_conn_params);
3327 INIT_LIST_HEAD(&hdev->pend_le_conns);
3328 INIT_LIST_HEAD(&hdev->pend_le_reports);
3329 INIT_LIST_HEAD(&hdev->conn_hash.list);
3330 INIT_LIST_HEAD(&hdev->adv_instances);
3331
3332 INIT_WORK(&hdev->rx_work, hci_rx_work);
3333 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3334 INIT_WORK(&hdev->tx_work, hci_tx_work);
3335 INIT_WORK(&hdev->power_on, hci_power_on);
3336 INIT_WORK(&hdev->error_reset, hci_error_reset);
3337
3338 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3339 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3340 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3341 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3342 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3343
3344 skb_queue_head_init(&hdev->rx_q);
3345 skb_queue_head_init(&hdev->cmd_q);
3346 skb_queue_head_init(&hdev->raw_q);
3347
3348 init_waitqueue_head(&hdev->req_wait_q);
3349
3350 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3351
3352 hci_init_sysfs(hdev);
3353 discovery_init(hdev);
3354
3355 return hdev;
3356 }
3357 EXPORT_SYMBOL(hci_alloc_dev);
3358
3359 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3360 void hci_free_dev(struct hci_dev *hdev)
3361 {
3362 /* will free via device release */
3363 put_device(&hdev->dev);
3364 }
3365 EXPORT_SYMBOL(hci_free_dev);
3366
3367 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3368 int hci_register_dev(struct hci_dev *hdev)
3369 {
3370 int id, error;
3371
3372 if (!hdev->open || !hdev->close || !hdev->send)
3373 return -EINVAL;
3374
3375 /* Do not allow HCI_AMP devices to register at index 0,
3376 * so the index can be used as the AMP controller ID.
3377 */
3378 switch (hdev->dev_type) {
3379 case HCI_BREDR:
3380 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3381 break;
3382 case HCI_AMP:
3383 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3384 break;
3385 default:
3386 return -EINVAL;
3387 }
3388
3389 if (id < 0)
3390 return id;
3391
3392 sprintf(hdev->name, "hci%d", id);
3393 hdev->id = id;
3394
3395 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3396
3397 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3398 WQ_MEM_RECLAIM, 1, hdev->name);
3399 if (!hdev->workqueue) {
3400 error = -ENOMEM;
3401 goto err;
3402 }
3403
3404 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3405 WQ_MEM_RECLAIM, 1, hdev->name);
3406 if (!hdev->req_workqueue) {
3407 destroy_workqueue(hdev->workqueue);
3408 error = -ENOMEM;
3409 goto err;
3410 }
3411
3412 if (!IS_ERR_OR_NULL(bt_debugfs))
3413 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3414
3415 dev_set_name(&hdev->dev, "%s", hdev->name);
3416
3417 error = device_add(&hdev->dev);
3418 if (error < 0)
3419 goto err_wqueue;
3420
3421 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3422 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3423 hdev);
3424 if (hdev->rfkill) {
3425 if (rfkill_register(hdev->rfkill) < 0) {
3426 rfkill_destroy(hdev->rfkill);
3427 hdev->rfkill = NULL;
3428 }
3429 }
3430
3431 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3432 hci_dev_set_flag(hdev, HCI_RFKILLED);
3433
3434 hci_dev_set_flag(hdev, HCI_SETUP);
3435 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3436
3437 if (hdev->dev_type == HCI_BREDR) {
3438 /* Assume BR/EDR support until proven otherwise (such as
3439 * through reading supported features during init.
3440 */
3441 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3442 }
3443
3444 write_lock(&hci_dev_list_lock);
3445 list_add(&hdev->list, &hci_dev_list);
3446 write_unlock(&hci_dev_list_lock);
3447
3448 /* Devices that are marked for raw-only usage are unconfigured
3449 * and should not be included in normal operation.
3450 */
3451 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3452 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3453
3454 hci_sock_dev_event(hdev, HCI_DEV_REG);
3455 hci_dev_hold(hdev);
3456
3457 queue_work(hdev->req_workqueue, &hdev->power_on);
3458
3459 return id;
3460
3461 err_wqueue:
3462 debugfs_remove_recursive(hdev->debugfs);
3463 destroy_workqueue(hdev->workqueue);
3464 destroy_workqueue(hdev->req_workqueue);
3465 err:
3466 ida_simple_remove(&hci_index_ida, hdev->id);
3467
3468 return error;
3469 }
3470 EXPORT_SYMBOL(hci_register_dev);
3471
3472 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3473 void hci_unregister_dev(struct hci_dev *hdev)
3474 {
3475 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3476
3477 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3478
3479 write_lock(&hci_dev_list_lock);
3480 list_del(&hdev->list);
3481 write_unlock(&hci_dev_list_lock);
3482
3483 hci_dev_do_close(hdev);
3484
3485 cancel_work_sync(&hdev->power_on);
3486
3487 if (!test_bit(HCI_INIT, &hdev->flags) &&
3488 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3489 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3490 hci_dev_lock(hdev);
3491 mgmt_index_removed(hdev);
3492 hci_dev_unlock(hdev);
3493 }
3494
3495 /* mgmt_index_removed should take care of emptying the
3496 * pending list */
3497 BUG_ON(!list_empty(&hdev->mgmt_pending));
3498
3499 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3500
3501 if (hdev->rfkill) {
3502 rfkill_unregister(hdev->rfkill);
3503 rfkill_destroy(hdev->rfkill);
3504 }
3505
3506 device_del(&hdev->dev);
3507 /* Actual cleanup is deferred until hci_cleanup_dev(). */
3508 hci_dev_put(hdev);
3509 }
3510 EXPORT_SYMBOL(hci_unregister_dev);
3511
3512 /* Cleanup HCI device */
hci_cleanup_dev(struct hci_dev * hdev)3513 void hci_cleanup_dev(struct hci_dev *hdev)
3514 {
3515 debugfs_remove_recursive(hdev->debugfs);
3516
3517 destroy_workqueue(hdev->workqueue);
3518 destroy_workqueue(hdev->req_workqueue);
3519
3520 hci_dev_lock(hdev);
3521 hci_bdaddr_list_clear(&hdev->blacklist);
3522 hci_bdaddr_list_clear(&hdev->whitelist);
3523 hci_uuids_clear(hdev);
3524 hci_link_keys_clear(hdev);
3525 hci_smp_ltks_clear(hdev);
3526 hci_smp_irks_clear(hdev);
3527 hci_remote_oob_data_clear(hdev);
3528 hci_adv_instances_clear(hdev);
3529 hci_bdaddr_list_clear(&hdev->le_white_list);
3530 hci_conn_params_clear_all(hdev);
3531 hci_discovery_filter_clear(hdev);
3532 hci_dev_unlock(hdev);
3533
3534 ida_simple_remove(&hci_index_ida, hdev->id);
3535 }
3536
3537 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3538 int hci_suspend_dev(struct hci_dev *hdev)
3539 {
3540 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3541 return 0;
3542 }
3543 EXPORT_SYMBOL(hci_suspend_dev);
3544
3545 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3546 int hci_resume_dev(struct hci_dev *hdev)
3547 {
3548 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3549 return 0;
3550 }
3551 EXPORT_SYMBOL(hci_resume_dev);
3552
3553 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3554 int hci_reset_dev(struct hci_dev *hdev)
3555 {
3556 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3557 struct sk_buff *skb;
3558
3559 skb = bt_skb_alloc(3, GFP_ATOMIC);
3560 if (!skb)
3561 return -ENOMEM;
3562
3563 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3564 memcpy(skb_put(skb, 3), hw_err, 3);
3565
3566 /* Send Hardware Error to upper stack */
3567 return hci_recv_frame(hdev, skb);
3568 }
3569 EXPORT_SYMBOL(hci_reset_dev);
3570
3571 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3572 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3573 {
3574 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3575 && !test_bit(HCI_INIT, &hdev->flags))) {
3576 kfree_skb(skb);
3577 return -ENXIO;
3578 }
3579
3580 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3581 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3582 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3583 kfree_skb(skb);
3584 return -EINVAL;
3585 }
3586
3587 /* Incoming skb */
3588 bt_cb(skb)->incoming = 1;
3589
3590 /* Time stamp */
3591 __net_timestamp(skb);
3592
3593 skb_queue_tail(&hdev->rx_q, skb);
3594 queue_work(hdev->workqueue, &hdev->rx_work);
3595
3596 return 0;
3597 }
3598 EXPORT_SYMBOL(hci_recv_frame);
3599
3600 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)3601 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3602 {
3603 /* Mark as diagnostic packet */
3604 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3605
3606 /* Time stamp */
3607 __net_timestamp(skb);
3608
3609 skb_queue_tail(&hdev->rx_q, skb);
3610 queue_work(hdev->workqueue, &hdev->rx_work);
3611
3612 return 0;
3613 }
3614 EXPORT_SYMBOL(hci_recv_diag);
3615
3616 /* ---- Interface to upper protocols ---- */
3617
hci_register_cb(struct hci_cb * cb)3618 int hci_register_cb(struct hci_cb *cb)
3619 {
3620 BT_DBG("%p name %s", cb, cb->name);
3621
3622 mutex_lock(&hci_cb_list_lock);
3623 list_add_tail(&cb->list, &hci_cb_list);
3624 mutex_unlock(&hci_cb_list_lock);
3625
3626 return 0;
3627 }
3628 EXPORT_SYMBOL(hci_register_cb);
3629
hci_unregister_cb(struct hci_cb * cb)3630 int hci_unregister_cb(struct hci_cb *cb)
3631 {
3632 BT_DBG("%p name %s", cb, cb->name);
3633
3634 mutex_lock(&hci_cb_list_lock);
3635 list_del(&cb->list);
3636 mutex_unlock(&hci_cb_list_lock);
3637
3638 return 0;
3639 }
3640 EXPORT_SYMBOL(hci_unregister_cb);
3641
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3642 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3643 {
3644 int err;
3645
3646 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3647
3648 /* Time stamp */
3649 __net_timestamp(skb);
3650
3651 /* Send copy to monitor */
3652 hci_send_to_monitor(hdev, skb);
3653
3654 if (atomic_read(&hdev->promisc)) {
3655 /* Send copy to the sockets */
3656 hci_send_to_sock(hdev, skb);
3657 }
3658
3659 /* Get rid of skb owner, prior to sending to the driver. */
3660 skb_orphan(skb);
3661
3662 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3663 kfree_skb(skb);
3664 return;
3665 }
3666
3667 err = hdev->send(hdev, skb);
3668 if (err < 0) {
3669 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3670 kfree_skb(skb);
3671 }
3672 }
3673
3674 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3675 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3676 const void *param)
3677 {
3678 struct sk_buff *skb;
3679
3680 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3681
3682 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3683 if (!skb) {
3684 BT_ERR("%s no memory for command", hdev->name);
3685 return -ENOMEM;
3686 }
3687
3688 /* Stand-alone HCI commands must be flagged as
3689 * single-command requests.
3690 */
3691 bt_cb(skb)->hci.req_start = true;
3692
3693 skb_queue_tail(&hdev->cmd_q, skb);
3694 queue_work(hdev->workqueue, &hdev->cmd_work);
3695
3696 return 0;
3697 }
3698
3699 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3700 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3701 {
3702 struct hci_command_hdr *hdr;
3703
3704 if (!hdev->sent_cmd)
3705 return NULL;
3706
3707 hdr = (void *) hdev->sent_cmd->data;
3708
3709 if (hdr->opcode != cpu_to_le16(opcode))
3710 return NULL;
3711
3712 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3713
3714 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3715 }
3716
3717 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)3718 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3719 const void *param, u32 timeout)
3720 {
3721 struct sk_buff *skb;
3722
3723 if (!test_bit(HCI_UP, &hdev->flags))
3724 return ERR_PTR(-ENETDOWN);
3725
3726 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3727
3728 hci_req_lock(hdev);
3729 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3730 hci_req_unlock(hdev);
3731
3732 return skb;
3733 }
3734 EXPORT_SYMBOL(hci_cmd_sync);
3735
3736 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3737 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3738 {
3739 struct hci_acl_hdr *hdr;
3740 int len = skb->len;
3741
3742 skb_push(skb, HCI_ACL_HDR_SIZE);
3743 skb_reset_transport_header(skb);
3744 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3745 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3746 hdr->dlen = cpu_to_le16(len);
3747 }
3748
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3749 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3750 struct sk_buff *skb, __u16 flags)
3751 {
3752 struct hci_conn *conn = chan->conn;
3753 struct hci_dev *hdev = conn->hdev;
3754 struct sk_buff *list;
3755
3756 skb->len = skb_headlen(skb);
3757 skb->data_len = 0;
3758
3759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3760
3761 switch (hdev->dev_type) {
3762 case HCI_BREDR:
3763 hci_add_acl_hdr(skb, conn->handle, flags);
3764 break;
3765 case HCI_AMP:
3766 hci_add_acl_hdr(skb, chan->handle, flags);
3767 break;
3768 default:
3769 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3770 return;
3771 }
3772
3773 list = skb_shinfo(skb)->frag_list;
3774 if (!list) {
3775 /* Non fragmented */
3776 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3777
3778 skb_queue_tail(queue, skb);
3779 } else {
3780 /* Fragmented */
3781 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3782
3783 skb_shinfo(skb)->frag_list = NULL;
3784
3785 /* Queue all fragments atomically. We need to use spin_lock_bh
3786 * here because of 6LoWPAN links, as there this function is
3787 * called from softirq and using normal spin lock could cause
3788 * deadlocks.
3789 */
3790 spin_lock_bh(&queue->lock);
3791
3792 __skb_queue_tail(queue, skb);
3793
3794 flags &= ~ACL_START;
3795 flags |= ACL_CONT;
3796 do {
3797 skb = list; list = list->next;
3798
3799 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3800 hci_add_acl_hdr(skb, conn->handle, flags);
3801
3802 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3803
3804 __skb_queue_tail(queue, skb);
3805 } while (list);
3806
3807 spin_unlock_bh(&queue->lock);
3808 }
3809 }
3810
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3811 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3812 {
3813 struct hci_dev *hdev = chan->conn->hdev;
3814
3815 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3816
3817 hci_queue_acl(chan, &chan->data_q, skb, flags);
3818
3819 queue_work(hdev->workqueue, &hdev->tx_work);
3820 }
3821
3822 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3823 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3824 {
3825 struct hci_dev *hdev = conn->hdev;
3826 struct hci_sco_hdr hdr;
3827
3828 BT_DBG("%s len %d", hdev->name, skb->len);
3829
3830 hdr.handle = cpu_to_le16(conn->handle);
3831 hdr.dlen = skb->len;
3832
3833 skb_push(skb, HCI_SCO_HDR_SIZE);
3834 skb_reset_transport_header(skb);
3835 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3836
3837 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3838
3839 skb_queue_tail(&conn->data_q, skb);
3840 queue_work(hdev->workqueue, &hdev->tx_work);
3841 }
3842
3843 /* ---- HCI TX task (outgoing data) ---- */
3844
3845 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3846 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3847 int *quote)
3848 {
3849 struct hci_conn_hash *h = &hdev->conn_hash;
3850 struct hci_conn *conn = NULL, *c;
3851 unsigned int num = 0, min = ~0;
3852
3853 /* We don't have to lock device here. Connections are always
3854 * added and removed with TX task disabled. */
3855
3856 rcu_read_lock();
3857
3858 list_for_each_entry_rcu(c, &h->list, list) {
3859 if (c->type != type || skb_queue_empty(&c->data_q))
3860 continue;
3861
3862 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3863 continue;
3864
3865 num++;
3866
3867 if (c->sent < min) {
3868 min = c->sent;
3869 conn = c;
3870 }
3871
3872 if (hci_conn_num(hdev, type) == num)
3873 break;
3874 }
3875
3876 rcu_read_unlock();
3877
3878 if (conn) {
3879 int cnt, q;
3880
3881 switch (conn->type) {
3882 case ACL_LINK:
3883 cnt = hdev->acl_cnt;
3884 break;
3885 case SCO_LINK:
3886 case ESCO_LINK:
3887 cnt = hdev->sco_cnt;
3888 break;
3889 case LE_LINK:
3890 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3891 break;
3892 default:
3893 cnt = 0;
3894 BT_ERR("Unknown link type");
3895 }
3896
3897 q = cnt / num;
3898 *quote = q ? q : 1;
3899 } else
3900 *quote = 0;
3901
3902 BT_DBG("conn %p quote %d", conn, *quote);
3903 return conn;
3904 }
3905
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3906 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3907 {
3908 struct hci_conn_hash *h = &hdev->conn_hash;
3909 struct hci_conn *c;
3910
3911 BT_ERR("%s link tx timeout", hdev->name);
3912
3913 rcu_read_lock();
3914
3915 /* Kill stalled connections */
3916 list_for_each_entry_rcu(c, &h->list, list) {
3917 if (c->type == type && c->sent) {
3918 BT_ERR("%s killing stalled connection %pMR",
3919 hdev->name, &c->dst);
3920 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3921 }
3922 }
3923
3924 rcu_read_unlock();
3925 }
3926
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3927 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3928 int *quote)
3929 {
3930 struct hci_conn_hash *h = &hdev->conn_hash;
3931 struct hci_chan *chan = NULL;
3932 unsigned int num = 0, min = ~0, cur_prio = 0;
3933 struct hci_conn *conn;
3934 int cnt, q, conn_num = 0;
3935
3936 BT_DBG("%s", hdev->name);
3937
3938 rcu_read_lock();
3939
3940 list_for_each_entry_rcu(conn, &h->list, list) {
3941 struct hci_chan *tmp;
3942
3943 if (conn->type != type)
3944 continue;
3945
3946 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3947 continue;
3948
3949 conn_num++;
3950
3951 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3952 struct sk_buff *skb;
3953
3954 if (skb_queue_empty(&tmp->data_q))
3955 continue;
3956
3957 skb = skb_peek(&tmp->data_q);
3958 if (skb->priority < cur_prio)
3959 continue;
3960
3961 if (skb->priority > cur_prio) {
3962 num = 0;
3963 min = ~0;
3964 cur_prio = skb->priority;
3965 }
3966
3967 num++;
3968
3969 if (conn->sent < min) {
3970 min = conn->sent;
3971 chan = tmp;
3972 }
3973 }
3974
3975 if (hci_conn_num(hdev, type) == conn_num)
3976 break;
3977 }
3978
3979 rcu_read_unlock();
3980
3981 if (!chan)
3982 return NULL;
3983
3984 switch (chan->conn->type) {
3985 case ACL_LINK:
3986 cnt = hdev->acl_cnt;
3987 break;
3988 case AMP_LINK:
3989 cnt = hdev->block_cnt;
3990 break;
3991 case SCO_LINK:
3992 case ESCO_LINK:
3993 cnt = hdev->sco_cnt;
3994 break;
3995 case LE_LINK:
3996 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3997 break;
3998 default:
3999 cnt = 0;
4000 BT_ERR("Unknown link type");
4001 }
4002
4003 q = cnt / num;
4004 *quote = q ? q : 1;
4005 BT_DBG("chan %p quote %d", chan, *quote);
4006 return chan;
4007 }
4008
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)4009 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4010 {
4011 struct hci_conn_hash *h = &hdev->conn_hash;
4012 struct hci_conn *conn;
4013 int num = 0;
4014
4015 BT_DBG("%s", hdev->name);
4016
4017 rcu_read_lock();
4018
4019 list_for_each_entry_rcu(conn, &h->list, list) {
4020 struct hci_chan *chan;
4021
4022 if (conn->type != type)
4023 continue;
4024
4025 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4026 continue;
4027
4028 num++;
4029
4030 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4031 struct sk_buff *skb;
4032
4033 if (chan->sent) {
4034 chan->sent = 0;
4035 continue;
4036 }
4037
4038 if (skb_queue_empty(&chan->data_q))
4039 continue;
4040
4041 skb = skb_peek(&chan->data_q);
4042 if (skb->priority >= HCI_PRIO_MAX - 1)
4043 continue;
4044
4045 skb->priority = HCI_PRIO_MAX - 1;
4046
4047 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4048 skb->priority);
4049 }
4050
4051 if (hci_conn_num(hdev, type) == num)
4052 break;
4053 }
4054
4055 rcu_read_unlock();
4056
4057 }
4058
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4059 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4060 {
4061 /* Calculate count of blocks used by this packet */
4062 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4063 }
4064
__check_timeout(struct hci_dev * hdev,unsigned int cnt)4065 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4066 {
4067 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4068 /* ACL tx timeout must be longer than maximum
4069 * link supervision timeout (40.9 seconds) */
4070 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4071 HCI_ACL_TX_TIMEOUT))
4072 hci_link_tx_to(hdev, ACL_LINK);
4073 }
4074 }
4075
hci_sched_acl_pkt(struct hci_dev * hdev)4076 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4077 {
4078 unsigned int cnt = hdev->acl_cnt;
4079 struct hci_chan *chan;
4080 struct sk_buff *skb;
4081 int quote;
4082
4083 __check_timeout(hdev, cnt);
4084
4085 while (hdev->acl_cnt &&
4086 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4087 u32 priority = (skb_peek(&chan->data_q))->priority;
4088 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4089 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4090 skb->len, skb->priority);
4091
4092 /* Stop if priority has changed */
4093 if (skb->priority < priority)
4094 break;
4095
4096 skb = skb_dequeue(&chan->data_q);
4097
4098 hci_conn_enter_active_mode(chan->conn,
4099 bt_cb(skb)->force_active);
4100
4101 hci_send_frame(hdev, skb);
4102 hdev->acl_last_tx = jiffies;
4103
4104 hdev->acl_cnt--;
4105 chan->sent++;
4106 chan->conn->sent++;
4107 }
4108 }
4109
4110 if (cnt != hdev->acl_cnt)
4111 hci_prio_recalculate(hdev, ACL_LINK);
4112 }
4113
hci_sched_acl_blk(struct hci_dev * hdev)4114 static void hci_sched_acl_blk(struct hci_dev *hdev)
4115 {
4116 unsigned int cnt = hdev->block_cnt;
4117 struct hci_chan *chan;
4118 struct sk_buff *skb;
4119 int quote;
4120 u8 type;
4121
4122 __check_timeout(hdev, cnt);
4123
4124 BT_DBG("%s", hdev->name);
4125
4126 if (hdev->dev_type == HCI_AMP)
4127 type = AMP_LINK;
4128 else
4129 type = ACL_LINK;
4130
4131 while (hdev->block_cnt > 0 &&
4132 (chan = hci_chan_sent(hdev, type, "e))) {
4133 u32 priority = (skb_peek(&chan->data_q))->priority;
4134 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4135 int blocks;
4136
4137 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4138 skb->len, skb->priority);
4139
4140 /* Stop if priority has changed */
4141 if (skb->priority < priority)
4142 break;
4143
4144 skb = skb_dequeue(&chan->data_q);
4145
4146 blocks = __get_blocks(hdev, skb);
4147 if (blocks > hdev->block_cnt)
4148 return;
4149
4150 hci_conn_enter_active_mode(chan->conn,
4151 bt_cb(skb)->force_active);
4152
4153 hci_send_frame(hdev, skb);
4154 hdev->acl_last_tx = jiffies;
4155
4156 hdev->block_cnt -= blocks;
4157 quote -= blocks;
4158
4159 chan->sent += blocks;
4160 chan->conn->sent += blocks;
4161 }
4162 }
4163
4164 if (cnt != hdev->block_cnt)
4165 hci_prio_recalculate(hdev, type);
4166 }
4167
hci_sched_acl(struct hci_dev * hdev)4168 static void hci_sched_acl(struct hci_dev *hdev)
4169 {
4170 BT_DBG("%s", hdev->name);
4171
4172 /* No ACL link over BR/EDR controller */
4173 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4174 return;
4175
4176 /* No AMP link over AMP controller */
4177 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4178 return;
4179
4180 switch (hdev->flow_ctl_mode) {
4181 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4182 hci_sched_acl_pkt(hdev);
4183 break;
4184
4185 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4186 hci_sched_acl_blk(hdev);
4187 break;
4188 }
4189 }
4190
4191 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4192 static void hci_sched_sco(struct hci_dev *hdev)
4193 {
4194 struct hci_conn *conn;
4195 struct sk_buff *skb;
4196 int quote;
4197
4198 BT_DBG("%s", hdev->name);
4199
4200 if (!hci_conn_num(hdev, SCO_LINK))
4201 return;
4202
4203 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4204 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4205 BT_DBG("skb %p len %d", skb, skb->len);
4206 hci_send_frame(hdev, skb);
4207
4208 conn->sent++;
4209 if (conn->sent == ~0)
4210 conn->sent = 0;
4211 }
4212 }
4213 }
4214
hci_sched_esco(struct hci_dev * hdev)4215 static void hci_sched_esco(struct hci_dev *hdev)
4216 {
4217 struct hci_conn *conn;
4218 struct sk_buff *skb;
4219 int quote;
4220
4221 BT_DBG("%s", hdev->name);
4222
4223 if (!hci_conn_num(hdev, ESCO_LINK))
4224 return;
4225
4226 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4227 "e))) {
4228 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4229 BT_DBG("skb %p len %d", skb, skb->len);
4230 hci_send_frame(hdev, skb);
4231
4232 conn->sent++;
4233 if (conn->sent == ~0)
4234 conn->sent = 0;
4235 }
4236 }
4237 }
4238
hci_sched_le(struct hci_dev * hdev)4239 static void hci_sched_le(struct hci_dev *hdev)
4240 {
4241 struct hci_chan *chan;
4242 struct sk_buff *skb;
4243 int quote, cnt, tmp;
4244
4245 BT_DBG("%s", hdev->name);
4246
4247 if (!hci_conn_num(hdev, LE_LINK))
4248 return;
4249
4250 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4251 /* LE tx timeout must be longer than maximum
4252 * link supervision timeout (40.9 seconds) */
4253 if (!hdev->le_cnt && hdev->le_pkts &&
4254 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4255 hci_link_tx_to(hdev, LE_LINK);
4256 }
4257
4258 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4259 tmp = cnt;
4260 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4261 u32 priority = (skb_peek(&chan->data_q))->priority;
4262 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4263 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4264 skb->len, skb->priority);
4265
4266 /* Stop if priority has changed */
4267 if (skb->priority < priority)
4268 break;
4269
4270 skb = skb_dequeue(&chan->data_q);
4271
4272 hci_send_frame(hdev, skb);
4273 hdev->le_last_tx = jiffies;
4274
4275 cnt--;
4276 chan->sent++;
4277 chan->conn->sent++;
4278 }
4279 }
4280
4281 if (hdev->le_pkts)
4282 hdev->le_cnt = cnt;
4283 else
4284 hdev->acl_cnt = cnt;
4285
4286 if (cnt != tmp)
4287 hci_prio_recalculate(hdev, LE_LINK);
4288 }
4289
hci_tx_work(struct work_struct * work)4290 static void hci_tx_work(struct work_struct *work)
4291 {
4292 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4293 struct sk_buff *skb;
4294
4295 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4296 hdev->sco_cnt, hdev->le_cnt);
4297
4298 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4299 /* Schedule queues and send stuff to HCI driver */
4300 hci_sched_acl(hdev);
4301 hci_sched_sco(hdev);
4302 hci_sched_esco(hdev);
4303 hci_sched_le(hdev);
4304 }
4305
4306 /* Send next queued raw (unknown type) packet */
4307 while ((skb = skb_dequeue(&hdev->raw_q)))
4308 hci_send_frame(hdev, skb);
4309 }
4310
4311 /* ----- HCI RX task (incoming data processing) ----- */
4312
4313 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4314 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4315 {
4316 struct hci_acl_hdr *hdr = (void *) skb->data;
4317 struct hci_conn *conn;
4318 __u16 handle, flags;
4319
4320 skb_pull(skb, HCI_ACL_HDR_SIZE);
4321
4322 handle = __le16_to_cpu(hdr->handle);
4323 flags = hci_flags(handle);
4324 handle = hci_handle(handle);
4325
4326 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4327 handle, flags);
4328
4329 hdev->stat.acl_rx++;
4330
4331 hci_dev_lock(hdev);
4332 conn = hci_conn_hash_lookup_handle(hdev, handle);
4333 hci_dev_unlock(hdev);
4334
4335 if (conn) {
4336 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4337
4338 /* Send to upper protocol */
4339 l2cap_recv_acldata(conn, skb, flags);
4340 return;
4341 } else {
4342 BT_ERR("%s ACL packet for unknown connection handle %d",
4343 hdev->name, handle);
4344 }
4345
4346 kfree_skb(skb);
4347 }
4348
4349 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4350 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4351 {
4352 struct hci_sco_hdr *hdr = (void *) skb->data;
4353 struct hci_conn *conn;
4354 __u16 handle;
4355
4356 skb_pull(skb, HCI_SCO_HDR_SIZE);
4357
4358 handle = __le16_to_cpu(hdr->handle);
4359
4360 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4361
4362 hdev->stat.sco_rx++;
4363
4364 hci_dev_lock(hdev);
4365 conn = hci_conn_hash_lookup_handle(hdev, handle);
4366 hci_dev_unlock(hdev);
4367
4368 if (conn) {
4369 /* Send to upper protocol */
4370 sco_recv_scodata(conn, skb);
4371 return;
4372 } else {
4373 BT_ERR("%s SCO packet for unknown connection handle %d",
4374 hdev->name, handle);
4375 }
4376
4377 kfree_skb(skb);
4378 }
4379
hci_req_is_complete(struct hci_dev * hdev)4380 static bool hci_req_is_complete(struct hci_dev *hdev)
4381 {
4382 struct sk_buff *skb;
4383
4384 skb = skb_peek(&hdev->cmd_q);
4385 if (!skb)
4386 return true;
4387
4388 return bt_cb(skb)->hci.req_start;
4389 }
4390
hci_resend_last(struct hci_dev * hdev)4391 static void hci_resend_last(struct hci_dev *hdev)
4392 {
4393 struct hci_command_hdr *sent;
4394 struct sk_buff *skb;
4395 u16 opcode;
4396
4397 if (!hdev->sent_cmd)
4398 return;
4399
4400 sent = (void *) hdev->sent_cmd->data;
4401 opcode = __le16_to_cpu(sent->opcode);
4402 if (opcode == HCI_OP_RESET)
4403 return;
4404
4405 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4406 if (!skb)
4407 return;
4408
4409 skb_queue_head(&hdev->cmd_q, skb);
4410 queue_work(hdev->workqueue, &hdev->cmd_work);
4411 }
4412
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4413 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4414 hci_req_complete_t *req_complete,
4415 hci_req_complete_skb_t *req_complete_skb)
4416 {
4417 struct sk_buff *skb;
4418 unsigned long flags;
4419
4420 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4421
4422 /* If the completed command doesn't match the last one that was
4423 * sent we need to do special handling of it.
4424 */
4425 if (!hci_sent_cmd_data(hdev, opcode)) {
4426 /* Some CSR based controllers generate a spontaneous
4427 * reset complete event during init and any pending
4428 * command will never be completed. In such a case we
4429 * need to resend whatever was the last sent
4430 * command.
4431 */
4432 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4433 hci_resend_last(hdev);
4434
4435 return;
4436 }
4437
4438 /* If the command succeeded and there's still more commands in
4439 * this request the request is not yet complete.
4440 */
4441 if (!status && !hci_req_is_complete(hdev))
4442 return;
4443
4444 /* If this was the last command in a request the complete
4445 * callback would be found in hdev->sent_cmd instead of the
4446 * command queue (hdev->cmd_q).
4447 */
4448 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4449 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4450 return;
4451 }
4452
4453 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4454 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4455 return;
4456 }
4457
4458 /* Remove all pending commands belonging to this request */
4459 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4460 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4461 if (bt_cb(skb)->hci.req_start) {
4462 __skb_queue_head(&hdev->cmd_q, skb);
4463 break;
4464 }
4465
4466 *req_complete = bt_cb(skb)->hci.req_complete;
4467 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4468 kfree_skb(skb);
4469 }
4470 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4471 }
4472
hci_rx_work(struct work_struct * work)4473 static void hci_rx_work(struct work_struct *work)
4474 {
4475 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4476 struct sk_buff *skb;
4477
4478 BT_DBG("%s", hdev->name);
4479
4480 while ((skb = skb_dequeue(&hdev->rx_q))) {
4481 /* Send copy to monitor */
4482 hci_send_to_monitor(hdev, skb);
4483
4484 if (atomic_read(&hdev->promisc)) {
4485 /* Send copy to the sockets */
4486 hci_send_to_sock(hdev, skb);
4487 }
4488
4489 /* If the device has been opened in HCI_USER_CHANNEL,
4490 * the userspace has exclusive access to device.
4491 * When device is HCI_INIT, we still need to process
4492 * the data packets to the driver in order
4493 * to complete its setup().
4494 */
4495 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4496 !test_bit(HCI_INIT, &hdev->flags)) {
4497 kfree_skb(skb);
4498 continue;
4499 }
4500
4501 if (test_bit(HCI_INIT, &hdev->flags)) {
4502 /* Don't process data packets in this states. */
4503 switch (bt_cb(skb)->pkt_type) {
4504 case HCI_ACLDATA_PKT:
4505 case HCI_SCODATA_PKT:
4506 kfree_skb(skb);
4507 continue;
4508 }
4509 }
4510
4511 /* Process frame */
4512 switch (bt_cb(skb)->pkt_type) {
4513 case HCI_EVENT_PKT:
4514 BT_DBG("%s Event packet", hdev->name);
4515 hci_event_packet(hdev, skb);
4516 break;
4517
4518 case HCI_ACLDATA_PKT:
4519 BT_DBG("%s ACL data packet", hdev->name);
4520 hci_acldata_packet(hdev, skb);
4521 break;
4522
4523 case HCI_SCODATA_PKT:
4524 BT_DBG("%s SCO data packet", hdev->name);
4525 hci_scodata_packet(hdev, skb);
4526 break;
4527
4528 default:
4529 kfree_skb(skb);
4530 break;
4531 }
4532 }
4533 }
4534
hci_cmd_work(struct work_struct * work)4535 static void hci_cmd_work(struct work_struct *work)
4536 {
4537 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4538 struct sk_buff *skb;
4539
4540 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4541 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4542
4543 /* Send queued commands */
4544 if (atomic_read(&hdev->cmd_cnt)) {
4545 skb = skb_dequeue(&hdev->cmd_q);
4546 if (!skb)
4547 return;
4548
4549 kfree_skb(hdev->sent_cmd);
4550
4551 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4552 if (hdev->sent_cmd) {
4553 atomic_dec(&hdev->cmd_cnt);
4554 hci_send_frame(hdev, skb);
4555 if (test_bit(HCI_RESET, &hdev->flags))
4556 cancel_delayed_work(&hdev->cmd_timer);
4557 else
4558 schedule_delayed_work(&hdev->cmd_timer,
4559 HCI_CMD_TIMEOUT);
4560 } else {
4561 skb_queue_head(&hdev->cmd_q, skb);
4562 queue_work(hdev->workqueue, &hdev->cmd_work);
4563 }
4564 }
4565 }
4566