1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64 {
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76 {
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120 };
121
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124 {
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136 {
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
163
164 if (err < 0)
165 return err;
166
167 done:
168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181 };
182
hci_debugfs_create_basic(struct hci_dev * hdev)183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191 }
192
hci_reset_req(struct hci_request * req,unsigned long opt)193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195 BT_DBG("%s %ld", req->hdev->name, opt);
196
197 /* Reset device */
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
200 return 0;
201 }
202
bredr_init(struct hci_request * req)203 static void bredr_init(struct hci_request *req)
204 {
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207 /* Read Local Supported Features */
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210 /* Read Local Version */
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213 /* Read BD Address */
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
amp_init1(struct hci_request * req)217 static void amp_init1(struct hci_request *req)
218 {
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221 /* Read Local Version */
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227 /* Read Local AMP Info */
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230 /* Read Data Blk size */
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
amp_init2(struct hci_request * req)240 static int amp_init2(struct hci_request *req)
241 {
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249 return 0;
250 }
251
hci_init1_req(struct hci_request * req,unsigned long opt)252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254 struct hci_dev *hdev = req->hdev;
255
256 BT_DBG("%s %ld", hdev->name, opt);
257
258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
261
262 switch (hdev->dev_type) {
263 case HCI_PRIMARY:
264 bredr_init(req);
265 break;
266 case HCI_AMP:
267 amp_init1(req);
268 break;
269 default:
270 BT_ERR("Unknown device type %d", hdev->dev_type);
271 break;
272 }
273
274 return 0;
275 }
276
bredr_setup(struct hci_request * req)277 static void bredr_setup(struct hci_request *req)
278 {
279 __le16 param;
280 __u8 flt_type;
281
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285 /* Read Class of Device */
286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288 /* Read Local Name */
289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291 /* Read Voice Setting */
292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304 /* Connection accept timeout ~20 secs */
305 param = cpu_to_le16(0x7d00);
306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
307 }
308
le_setup(struct hci_request * req)309 static void le_setup(struct hci_request *req)
310 {
311 struct hci_dev *hdev = req->hdev;
312
313 /* Read LE Buffer Size */
314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316 /* Read LE Local Supported Features */
317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
hci_setup_event_mask(struct hci_request * req)327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329 struct hci_dev *hdev = req->hdev;
330
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333 * command otherwise.
334 */
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
339 */
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 return;
342
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
345 } else {
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
351
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
355 */
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
360 }
361
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
364 */
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
367 * Complete
368 */
369
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
373 }
374 }
375
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
386 }
387
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
390
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
396
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
414 */
415 }
416
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
419
420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
hci_init2_req(struct hci_request * req,unsigned long opt)423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425 struct hci_dev *hdev = req->hdev;
426
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
429
430 if (lmp_bredr_capable(hdev))
431 bredr_setup(req);
432 else
433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435 if (lmp_le_capable(hdev))
436 le_setup(req);
437
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
440 *
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
445 */
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450 if (lmp_ssp_capable(hdev)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
456 */
457 hdev->max_page = 0x01;
458
459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460 u8 mode = 0x01;
461
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
464 } else {
465 struct hci_cp_write_eir cp;
466
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
469
470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471 }
472 }
473
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476 u8 mode;
477
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
480 * events.
481 */
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 }
486
487 if (lmp_inq_tx_pwr_capable(hdev))
488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
492
493 cp.page = 0x01;
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 sizeof(cp), &cp);
496 }
497
498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499 u8 enable = 1;
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 &enable);
502 }
503
504 return 0;
505 }
506
hci_setup_link_policy(struct hci_request * req)507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509 struct hci_dev *hdev = req->hdev;
510 struct hci_cp_write_def_link_policy cp;
511 u16 link_policy = 0;
512
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
521
522 cp.policy = cpu_to_le16(link_policy);
523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
hci_set_le_support(struct hci_request * req)526 static void hci_set_le_support(struct hci_request *req)
527 {
528 struct hci_dev *hdev = req->hdev;
529 struct hci_cp_write_le_host_supported cp;
530
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
533 return;
534
535 memset(&cp, 0, sizeof(cp));
536
537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538 cp.le = 0x01;
539 cp.simul = 0x00;
540 }
541
542 if (cp.le != lmp_host_le_capable(hdev))
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 &cp);
545 }
546
hci_set_event_mask_page_2(struct hci_request * req)547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551 bool changed = false;
552
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
555 */
556 if (lmp_csb_master_capable(hdev)) {
557 events[1] |= 0x40; /* Triggered Clock Capture */
558 events[1] |= 0x80; /* Synchronization Train Complete */
559 events[2] |= 0x10; /* Slave Page Response Timeout */
560 events[2] |= 0x20; /* CSB Channel Map Change */
561 changed = true;
562 }
563
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
566 */
567 if (lmp_csb_slave_capable(hdev)) {
568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
572 changed = true;
573 }
574
575 /* Enable Authenticated Payload Timeout Expired event if supported */
576 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577 events[2] |= 0x80;
578 changed = true;
579 }
580
581 /* Some Broadcom based controllers indicate support for Set Event
582 * Mask Page 2 command, but then actually do not support it. Since
583 * the default value is all bits set to zero, the command is only
584 * required if the event mask has to be changed. In case no change
585 * to the event mask is needed, skip this command.
586 */
587 if (changed)
588 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589 sizeof(events), events);
590 }
591
hci_init3_req(struct hci_request * req,unsigned long opt)592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594 struct hci_dev *hdev = req->hdev;
595 u8 p;
596
597 hci_setup_event_mask(req);
598
599 if (hdev->commands[6] & 0x20 &&
600 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601 struct hci_cp_read_stored_link_key cp;
602
603 bacpy(&cp.bdaddr, BDADDR_ANY);
604 cp.read_all = 0x01;
605 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606 }
607
608 if (hdev->commands[5] & 0x10)
609 hci_setup_link_policy(req);
610
611 if (hdev->commands[8] & 0x01)
612 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614 /* Some older Broadcom based Bluetooth 1.2 controllers do not
615 * support the Read Page Scan Type command. Check support for
616 * this command in the bit mask of supported commands.
617 */
618 if (hdev->commands[13] & 0x01)
619 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621 if (lmp_le_capable(hdev)) {
622 u8 events[8];
623
624 memset(events, 0, sizeof(events));
625
626 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627 events[0] |= 0x10; /* LE Long Term Key Request */
628
629 /* If controller supports the Connection Parameters Request
630 * Link Layer Procedure, enable the corresponding event.
631 */
632 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633 events[0] |= 0x20; /* LE Remote Connection
634 * Parameter Request
635 */
636
637 /* If the controller supports the Data Length Extension
638 * feature, enable the corresponding event.
639 */
640 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641 events[0] |= 0x40; /* LE Data Length Change */
642
643 /* If the controller supports Extended Scanner Filter
644 * Policies, enable the correspondig event.
645 */
646 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647 events[1] |= 0x04; /* LE Direct Advertising
648 * Report
649 */
650
651 /* If the controller supports the LE Set Scan Enable command,
652 * enable the corresponding advertising report event.
653 */
654 if (hdev->commands[26] & 0x08)
655 events[0] |= 0x02; /* LE Advertising Report */
656
657 /* If the controller supports the LE Create Connection
658 * command, enable the corresponding event.
659 */
660 if (hdev->commands[26] & 0x10)
661 events[0] |= 0x01; /* LE Connection Complete */
662
663 /* If the controller supports the LE Connection Update
664 * command, enable the corresponding event.
665 */
666 if (hdev->commands[27] & 0x04)
667 events[0] |= 0x04; /* LE Connection Update
668 * Complete
669 */
670
671 /* If the controller supports the LE Read Remote Used Features
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[27] & 0x20)
675 events[0] |= 0x08; /* LE Read Remote Used
676 * Features Complete
677 */
678
679 /* If the controller supports the LE Read Local P-256
680 * Public Key command, enable the corresponding event.
681 */
682 if (hdev->commands[34] & 0x02)
683 events[0] |= 0x80; /* LE Read Local P-256
684 * Public Key Complete
685 */
686
687 /* If the controller supports the LE Generate DHKey
688 * command, enable the corresponding event.
689 */
690 if (hdev->commands[34] & 0x04)
691 events[1] |= 0x01; /* LE Generate DHKey Complete */
692
693 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
694 events);
695
696 if (hdev->commands[25] & 0x40) {
697 /* Read LE Advertising Channel TX Power */
698 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
699 }
700
701 if (hdev->commands[26] & 0x40) {
702 /* Read LE White List Size */
703 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
704 0, NULL);
705 }
706
707 if (hdev->commands[26] & 0x80) {
708 /* Clear LE White List */
709 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
710 }
711
712 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
713 /* Read LE Maximum Data Length */
714 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
715
716 /* Read LE Suggested Default Data Length */
717 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
718 }
719
720 hci_set_le_support(req);
721 }
722
723 /* Read features beyond page 1 if available */
724 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
725 struct hci_cp_read_local_ext_features cp;
726
727 cp.page = p;
728 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
729 sizeof(cp), &cp);
730 }
731
732 return 0;
733 }
734
hci_init4_req(struct hci_request * req,unsigned long opt)735 static int hci_init4_req(struct hci_request *req, unsigned long opt)
736 {
737 struct hci_dev *hdev = req->hdev;
738
739 /* Some Broadcom based Bluetooth controllers do not support the
740 * Delete Stored Link Key command. They are clearly indicating its
741 * absence in the bit mask of supported commands.
742 *
743 * Check the supported commands and only if the the command is marked
744 * as supported send it. If not supported assume that the controller
745 * does not have actual support for stored link keys which makes this
746 * command redundant anyway.
747 *
748 * Some controllers indicate that they support handling deleting
749 * stored link keys, but they don't. The quirk lets a driver
750 * just disable this command.
751 */
752 if (hdev->commands[6] & 0x80 &&
753 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
754 struct hci_cp_delete_stored_link_key cp;
755
756 bacpy(&cp.bdaddr, BDADDR_ANY);
757 cp.delete_all = 0x01;
758 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
759 sizeof(cp), &cp);
760 }
761
762 /* Set event mask page 2 if the HCI command for it is supported */
763 if (hdev->commands[22] & 0x04)
764 hci_set_event_mask_page_2(req);
765
766 /* Read local codec list if the HCI command is supported */
767 if (hdev->commands[29] & 0x20)
768 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
769
770 /* Get MWS transport configuration if the HCI command is supported */
771 if (hdev->commands[30] & 0x08)
772 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
773
774 /* Check for Synchronization Train support */
775 if (lmp_sync_train_capable(hdev))
776 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
777
778 /* Enable Secure Connections if supported and configured */
779 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
780 bredr_sc_enabled(hdev)) {
781 u8 support = 0x01;
782
783 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
784 sizeof(support), &support);
785 }
786
787 return 0;
788 }
789
__hci_init(struct hci_dev * hdev)790 static int __hci_init(struct hci_dev *hdev)
791 {
792 int err;
793
794 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
795 if (err < 0)
796 return err;
797
798 if (hci_dev_test_flag(hdev, HCI_SETUP))
799 hci_debugfs_create_basic(hdev);
800
801 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
802 if (err < 0)
803 return err;
804
805 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
806 * BR/EDR/LE type controllers. AMP controllers only need the
807 * first two stages of init.
808 */
809 if (hdev->dev_type != HCI_PRIMARY)
810 return 0;
811
812 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
813 if (err < 0)
814 return err;
815
816 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
817 if (err < 0)
818 return err;
819
820 /* This function is only called when the controller is actually in
821 * configured state. When the controller is marked as unconfigured,
822 * this initialization procedure is not run.
823 *
824 * It means that it is possible that a controller runs through its
825 * setup phase and then discovers missing settings. If that is the
826 * case, then this function will not be called. It then will only
827 * be called during the config phase.
828 *
829 * So only when in setup phase or config phase, create the debugfs
830 * entries and register the SMP channels.
831 */
832 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
833 !hci_dev_test_flag(hdev, HCI_CONFIG))
834 return 0;
835
836 hci_debugfs_create_common(hdev);
837
838 if (lmp_bredr_capable(hdev))
839 hci_debugfs_create_bredr(hdev);
840
841 if (lmp_le_capable(hdev))
842 hci_debugfs_create_le(hdev);
843
844 return 0;
845 }
846
hci_init0_req(struct hci_request * req,unsigned long opt)847 static int hci_init0_req(struct hci_request *req, unsigned long opt)
848 {
849 struct hci_dev *hdev = req->hdev;
850
851 BT_DBG("%s %ld", hdev->name, opt);
852
853 /* Reset */
854 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
855 hci_reset_req(req, 0);
856
857 /* Read Local Version */
858 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
859
860 /* Read BD Address */
861 if (hdev->set_bdaddr)
862 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
863
864 return 0;
865 }
866
__hci_unconf_init(struct hci_dev * hdev)867 static int __hci_unconf_init(struct hci_dev *hdev)
868 {
869 int err;
870
871 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
872 return 0;
873
874 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
875 if (err < 0)
876 return err;
877
878 if (hci_dev_test_flag(hdev, HCI_SETUP))
879 hci_debugfs_create_basic(hdev);
880
881 return 0;
882 }
883
hci_scan_req(struct hci_request * req,unsigned long opt)884 static int hci_scan_req(struct hci_request *req, unsigned long opt)
885 {
886 __u8 scan = opt;
887
888 BT_DBG("%s %x", req->hdev->name, scan);
889
890 /* Inquiry and Page scans */
891 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
892 return 0;
893 }
894
hci_auth_req(struct hci_request * req,unsigned long opt)895 static int hci_auth_req(struct hci_request *req, unsigned long opt)
896 {
897 __u8 auth = opt;
898
899 BT_DBG("%s %x", req->hdev->name, auth);
900
901 /* Authentication */
902 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
903 return 0;
904 }
905
hci_encrypt_req(struct hci_request * req,unsigned long opt)906 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
907 {
908 __u8 encrypt = opt;
909
910 BT_DBG("%s %x", req->hdev->name, encrypt);
911
912 /* Encryption */
913 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
914 return 0;
915 }
916
hci_linkpol_req(struct hci_request * req,unsigned long opt)917 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
918 {
919 __le16 policy = cpu_to_le16(opt);
920
921 BT_DBG("%s %x", req->hdev->name, policy);
922
923 /* Default link policy */
924 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
925 return 0;
926 }
927
928 /* Get HCI device by index.
929 * Device is held on return. */
hci_dev_get(int index)930 struct hci_dev *hci_dev_get(int index)
931 {
932 struct hci_dev *hdev = NULL, *d;
933
934 BT_DBG("%d", index);
935
936 if (index < 0)
937 return NULL;
938
939 read_lock(&hci_dev_list_lock);
940 list_for_each_entry(d, &hci_dev_list, list) {
941 if (d->id == index) {
942 hdev = hci_dev_hold(d);
943 break;
944 }
945 }
946 read_unlock(&hci_dev_list_lock);
947 return hdev;
948 }
949
950 /* ---- Inquiry support ---- */
951
hci_discovery_active(struct hci_dev * hdev)952 bool hci_discovery_active(struct hci_dev *hdev)
953 {
954 struct discovery_state *discov = &hdev->discovery;
955
956 switch (discov->state) {
957 case DISCOVERY_FINDING:
958 case DISCOVERY_RESOLVING:
959 return true;
960
961 default:
962 return false;
963 }
964 }
965
hci_discovery_set_state(struct hci_dev * hdev,int state)966 void hci_discovery_set_state(struct hci_dev *hdev, int state)
967 {
968 int old_state = hdev->discovery.state;
969
970 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
971
972 if (old_state == state)
973 return;
974
975 hdev->discovery.state = state;
976
977 switch (state) {
978 case DISCOVERY_STOPPED:
979 hci_update_background_scan(hdev);
980
981 if (old_state != DISCOVERY_STARTING)
982 mgmt_discovering(hdev, 0);
983 break;
984 case DISCOVERY_STARTING:
985 break;
986 case DISCOVERY_FINDING:
987 mgmt_discovering(hdev, 1);
988 break;
989 case DISCOVERY_RESOLVING:
990 break;
991 case DISCOVERY_STOPPING:
992 break;
993 }
994 }
995
hci_inquiry_cache_flush(struct hci_dev * hdev)996 void hci_inquiry_cache_flush(struct hci_dev *hdev)
997 {
998 struct discovery_state *cache = &hdev->discovery;
999 struct inquiry_entry *p, *n;
1000
1001 list_for_each_entry_safe(p, n, &cache->all, all) {
1002 list_del(&p->all);
1003 kfree(p);
1004 }
1005
1006 INIT_LIST_HEAD(&cache->unknown);
1007 INIT_LIST_HEAD(&cache->resolve);
1008 }
1009
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1010 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1011 bdaddr_t *bdaddr)
1012 {
1013 struct discovery_state *cache = &hdev->discovery;
1014 struct inquiry_entry *e;
1015
1016 BT_DBG("cache %p, %pMR", cache, bdaddr);
1017
1018 list_for_each_entry(e, &cache->all, all) {
1019 if (!bacmp(&e->data.bdaddr, bdaddr))
1020 return e;
1021 }
1022
1023 return NULL;
1024 }
1025
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1026 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1027 bdaddr_t *bdaddr)
1028 {
1029 struct discovery_state *cache = &hdev->discovery;
1030 struct inquiry_entry *e;
1031
1032 BT_DBG("cache %p, %pMR", cache, bdaddr);
1033
1034 list_for_each_entry(e, &cache->unknown, list) {
1035 if (!bacmp(&e->data.bdaddr, bdaddr))
1036 return e;
1037 }
1038
1039 return NULL;
1040 }
1041
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1042 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1043 bdaddr_t *bdaddr,
1044 int state)
1045 {
1046 struct discovery_state *cache = &hdev->discovery;
1047 struct inquiry_entry *e;
1048
1049 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1050
1051 list_for_each_entry(e, &cache->resolve, list) {
1052 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1053 return e;
1054 if (!bacmp(&e->data.bdaddr, bdaddr))
1055 return e;
1056 }
1057
1058 return NULL;
1059 }
1060
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1061 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1062 struct inquiry_entry *ie)
1063 {
1064 struct discovery_state *cache = &hdev->discovery;
1065 struct list_head *pos = &cache->resolve;
1066 struct inquiry_entry *p;
1067
1068 list_del(&ie->list);
1069
1070 list_for_each_entry(p, &cache->resolve, list) {
1071 if (p->name_state != NAME_PENDING &&
1072 abs(p->data.rssi) >= abs(ie->data.rssi))
1073 break;
1074 pos = &p->list;
1075 }
1076
1077 list_add(&ie->list, pos);
1078 }
1079
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1080 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1081 bool name_known)
1082 {
1083 struct discovery_state *cache = &hdev->discovery;
1084 struct inquiry_entry *ie;
1085 u32 flags = 0;
1086
1087 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1088
1089 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1090
1091 if (!data->ssp_mode)
1092 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1093
1094 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1095 if (ie) {
1096 if (!ie->data.ssp_mode)
1097 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1098
1099 if (ie->name_state == NAME_NEEDED &&
1100 data->rssi != ie->data.rssi) {
1101 ie->data.rssi = data->rssi;
1102 hci_inquiry_cache_update_resolve(hdev, ie);
1103 }
1104
1105 goto update;
1106 }
1107
1108 /* Entry not in the cache. Add new one. */
1109 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1110 if (!ie) {
1111 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1112 goto done;
1113 }
1114
1115 list_add(&ie->all, &cache->all);
1116
1117 if (name_known) {
1118 ie->name_state = NAME_KNOWN;
1119 } else {
1120 ie->name_state = NAME_NOT_KNOWN;
1121 list_add(&ie->list, &cache->unknown);
1122 }
1123
1124 update:
1125 if (name_known && ie->name_state != NAME_KNOWN &&
1126 ie->name_state != NAME_PENDING) {
1127 ie->name_state = NAME_KNOWN;
1128 list_del(&ie->list);
1129 }
1130
1131 memcpy(&ie->data, data, sizeof(*data));
1132 ie->timestamp = jiffies;
1133 cache->timestamp = jiffies;
1134
1135 if (ie->name_state == NAME_NOT_KNOWN)
1136 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1137
1138 done:
1139 return flags;
1140 }
1141
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1142 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1143 {
1144 struct discovery_state *cache = &hdev->discovery;
1145 struct inquiry_info *info = (struct inquiry_info *) buf;
1146 struct inquiry_entry *e;
1147 int copied = 0;
1148
1149 list_for_each_entry(e, &cache->all, all) {
1150 struct inquiry_data *data = &e->data;
1151
1152 if (copied >= num)
1153 break;
1154
1155 bacpy(&info->bdaddr, &data->bdaddr);
1156 info->pscan_rep_mode = data->pscan_rep_mode;
1157 info->pscan_period_mode = data->pscan_period_mode;
1158 info->pscan_mode = data->pscan_mode;
1159 memcpy(info->dev_class, data->dev_class, 3);
1160 info->clock_offset = data->clock_offset;
1161
1162 info++;
1163 copied++;
1164 }
1165
1166 BT_DBG("cache %p, copied %d", cache, copied);
1167 return copied;
1168 }
1169
hci_inq_req(struct hci_request * req,unsigned long opt)1170 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1171 {
1172 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1173 struct hci_dev *hdev = req->hdev;
1174 struct hci_cp_inquiry cp;
1175
1176 BT_DBG("%s", hdev->name);
1177
1178 if (test_bit(HCI_INQUIRY, &hdev->flags))
1179 return 0;
1180
1181 /* Start Inquiry */
1182 memcpy(&cp.lap, &ir->lap, 3);
1183 cp.length = ir->length;
1184 cp.num_rsp = ir->num_rsp;
1185 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1186
1187 return 0;
1188 }
1189
hci_inquiry(void __user * arg)1190 int hci_inquiry(void __user *arg)
1191 {
1192 __u8 __user *ptr = arg;
1193 struct hci_inquiry_req ir;
1194 struct hci_dev *hdev;
1195 int err = 0, do_inquiry = 0, max_rsp;
1196 long timeo;
1197 __u8 *buf;
1198
1199 if (copy_from_user(&ir, ptr, sizeof(ir)))
1200 return -EFAULT;
1201
1202 hdev = hci_dev_get(ir.dev_id);
1203 if (!hdev)
1204 return -ENODEV;
1205
1206 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1207 err = -EBUSY;
1208 goto done;
1209 }
1210
1211 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1212 err = -EOPNOTSUPP;
1213 goto done;
1214 }
1215
1216 if (hdev->dev_type != HCI_PRIMARY) {
1217 err = -EOPNOTSUPP;
1218 goto done;
1219 }
1220
1221 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1222 err = -EOPNOTSUPP;
1223 goto done;
1224 }
1225
1226 hci_dev_lock(hdev);
1227 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1228 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1229 hci_inquiry_cache_flush(hdev);
1230 do_inquiry = 1;
1231 }
1232 hci_dev_unlock(hdev);
1233
1234 timeo = ir.length * msecs_to_jiffies(2000);
1235
1236 if (do_inquiry) {
1237 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1238 timeo, NULL);
1239 if (err < 0)
1240 goto done;
1241
1242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1243 * cleared). If it is interrupted by a signal, return -EINTR.
1244 */
1245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1246 TASK_INTERRUPTIBLE))
1247 return -EINTR;
1248 }
1249
1250 /* for unlimited number of responses we will use buffer with
1251 * 255 entries
1252 */
1253 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1254
1255 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1256 * copy it to the user space.
1257 */
1258 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1259 if (!buf) {
1260 err = -ENOMEM;
1261 goto done;
1262 }
1263
1264 hci_dev_lock(hdev);
1265 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1266 hci_dev_unlock(hdev);
1267
1268 BT_DBG("num_rsp %d", ir.num_rsp);
1269
1270 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1271 ptr += sizeof(ir);
1272 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1273 ir.num_rsp))
1274 err = -EFAULT;
1275 } else
1276 err = -EFAULT;
1277
1278 kfree(buf);
1279
1280 done:
1281 hci_dev_put(hdev);
1282 return err;
1283 }
1284
hci_dev_do_open(struct hci_dev * hdev)1285 static int hci_dev_do_open(struct hci_dev *hdev)
1286 {
1287 int ret = 0;
1288
1289 BT_DBG("%s %p", hdev->name, hdev);
1290
1291 hci_req_sync_lock(hdev);
1292
1293 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1294 ret = -ENODEV;
1295 goto done;
1296 }
1297
1298 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1299 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1300 /* Check for rfkill but allow the HCI setup stage to
1301 * proceed (which in itself doesn't cause any RF activity).
1302 */
1303 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1304 ret = -ERFKILL;
1305 goto done;
1306 }
1307
1308 /* Check for valid public address or a configured static
1309 * random adddress, but let the HCI setup proceed to
1310 * be able to determine if there is a public address
1311 * or not.
1312 *
1313 * In case of user channel usage, it is not important
1314 * if a public address or static random address is
1315 * available.
1316 *
1317 * This check is only valid for BR/EDR controllers
1318 * since AMP controllers do not have an address.
1319 */
1320 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1321 hdev->dev_type == HCI_PRIMARY &&
1322 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1323 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1324 ret = -EADDRNOTAVAIL;
1325 goto done;
1326 }
1327 }
1328
1329 if (test_bit(HCI_UP, &hdev->flags)) {
1330 ret = -EALREADY;
1331 goto done;
1332 }
1333
1334 if (hdev->open(hdev)) {
1335 ret = -EIO;
1336 goto done;
1337 }
1338
1339 set_bit(HCI_RUNNING, &hdev->flags);
1340 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1341
1342 atomic_set(&hdev->cmd_cnt, 1);
1343 set_bit(HCI_INIT, &hdev->flags);
1344
1345 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1346 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1347
1348 if (hdev->setup)
1349 ret = hdev->setup(hdev);
1350
1351 /* The transport driver can set these quirks before
1352 * creating the HCI device or in its setup callback.
1353 *
1354 * In case any of them is set, the controller has to
1355 * start up as unconfigured.
1356 */
1357 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1358 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1359 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1360
1361 /* For an unconfigured controller it is required to
1362 * read at least the version information provided by
1363 * the Read Local Version Information command.
1364 *
1365 * If the set_bdaddr driver callback is provided, then
1366 * also the original Bluetooth public device address
1367 * will be read using the Read BD Address command.
1368 */
1369 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1370 ret = __hci_unconf_init(hdev);
1371 }
1372
1373 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1374 /* If public address change is configured, ensure that
1375 * the address gets programmed. If the driver does not
1376 * support changing the public address, fail the power
1377 * on procedure.
1378 */
1379 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1380 hdev->set_bdaddr)
1381 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1382 else
1383 ret = -EADDRNOTAVAIL;
1384 }
1385
1386 if (!ret) {
1387 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1388 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1389 ret = __hci_init(hdev);
1390 if (!ret && hdev->post_init)
1391 ret = hdev->post_init(hdev);
1392 }
1393 }
1394
1395 /* If the HCI Reset command is clearing all diagnostic settings,
1396 * then they need to be reprogrammed after the init procedure
1397 * completed.
1398 */
1399 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1400 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1401 ret = hdev->set_diag(hdev, true);
1402
1403 clear_bit(HCI_INIT, &hdev->flags);
1404
1405 if (!ret) {
1406 hci_dev_hold(hdev);
1407 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1408 set_bit(HCI_UP, &hdev->flags);
1409 hci_sock_dev_event(hdev, HCI_DEV_UP);
1410 hci_leds_update_powered(hdev, true);
1411 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1412 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1413 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1414 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1415 hci_dev_test_flag(hdev, HCI_MGMT) &&
1416 hdev->dev_type == HCI_PRIMARY) {
1417 ret = __hci_req_hci_power_on(hdev);
1418 mgmt_power_on(hdev, ret);
1419 }
1420 } else {
1421 /* Init failed, cleanup */
1422 flush_work(&hdev->tx_work);
1423 flush_work(&hdev->cmd_work);
1424 flush_work(&hdev->rx_work);
1425
1426 skb_queue_purge(&hdev->cmd_q);
1427 skb_queue_purge(&hdev->rx_q);
1428
1429 if (hdev->flush)
1430 hdev->flush(hdev);
1431
1432 if (hdev->sent_cmd) {
1433 kfree_skb(hdev->sent_cmd);
1434 hdev->sent_cmd = NULL;
1435 }
1436
1437 clear_bit(HCI_RUNNING, &hdev->flags);
1438 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1439
1440 hdev->close(hdev);
1441 hdev->flags &= BIT(HCI_RAW);
1442 }
1443
1444 done:
1445 hci_req_sync_unlock(hdev);
1446 return ret;
1447 }
1448
1449 /* ---- HCI ioctl helpers ---- */
1450
hci_dev_open(__u16 dev)1451 int hci_dev_open(__u16 dev)
1452 {
1453 struct hci_dev *hdev;
1454 int err;
1455
1456 hdev = hci_dev_get(dev);
1457 if (!hdev)
1458 return -ENODEV;
1459
1460 /* Devices that are marked as unconfigured can only be powered
1461 * up as user channel. Trying to bring them up as normal devices
1462 * will result into a failure. Only user channel operation is
1463 * possible.
1464 *
1465 * When this function is called for a user channel, the flag
1466 * HCI_USER_CHANNEL will be set first before attempting to
1467 * open the device.
1468 */
1469 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1470 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1471 err = -EOPNOTSUPP;
1472 goto done;
1473 }
1474
1475 /* We need to ensure that no other power on/off work is pending
1476 * before proceeding to call hci_dev_do_open. This is
1477 * particularly important if the setup procedure has not yet
1478 * completed.
1479 */
1480 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1481 cancel_delayed_work(&hdev->power_off);
1482
1483 /* After this call it is guaranteed that the setup procedure
1484 * has finished. This means that error conditions like RFKILL
1485 * or no valid public or static random address apply.
1486 */
1487 flush_workqueue(hdev->req_workqueue);
1488
1489 /* For controllers not using the management interface and that
1490 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1491 * so that pairing works for them. Once the management interface
1492 * is in use this bit will be cleared again and userspace has
1493 * to explicitly enable it.
1494 */
1495 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1496 !hci_dev_test_flag(hdev, HCI_MGMT))
1497 hci_dev_set_flag(hdev, HCI_BONDABLE);
1498
1499 err = hci_dev_do_open(hdev);
1500
1501 done:
1502 hci_dev_put(hdev);
1503 return err;
1504 }
1505
1506 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1507 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1508 {
1509 struct hci_conn_params *p;
1510
1511 list_for_each_entry(p, &hdev->le_conn_params, list) {
1512 if (p->conn) {
1513 hci_conn_drop(p->conn);
1514 hci_conn_put(p->conn);
1515 p->conn = NULL;
1516 }
1517 list_del_init(&p->action);
1518 }
1519
1520 BT_DBG("All LE pending actions cleared");
1521 }
1522
hci_dev_do_close(struct hci_dev * hdev)1523 int hci_dev_do_close(struct hci_dev *hdev)
1524 {
1525 bool auto_off;
1526
1527 BT_DBG("%s %p", hdev->name, hdev);
1528
1529 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1530 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1531 test_bit(HCI_UP, &hdev->flags)) {
1532 /* Execute vendor specific shutdown routine */
1533 if (hdev->shutdown)
1534 hdev->shutdown(hdev);
1535 }
1536
1537 cancel_delayed_work(&hdev->power_off);
1538
1539 hci_request_cancel_all(hdev);
1540 hci_req_sync_lock(hdev);
1541
1542 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1543 cancel_delayed_work_sync(&hdev->cmd_timer);
1544 hci_req_sync_unlock(hdev);
1545 return 0;
1546 }
1547
1548 hci_leds_update_powered(hdev, false);
1549
1550 /* Flush RX and TX works */
1551 flush_work(&hdev->tx_work);
1552 flush_work(&hdev->rx_work);
1553
1554 if (hdev->discov_timeout > 0) {
1555 hdev->discov_timeout = 0;
1556 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1557 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1558 }
1559
1560 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1561 cancel_delayed_work(&hdev->service_cache);
1562
1563 if (hci_dev_test_flag(hdev, HCI_MGMT))
1564 cancel_delayed_work_sync(&hdev->rpa_expired);
1565
1566 /* Avoid potential lockdep warnings from the *_flush() calls by
1567 * ensuring the workqueue is empty up front.
1568 */
1569 drain_workqueue(hdev->workqueue);
1570
1571 hci_dev_lock(hdev);
1572
1573 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1574
1575 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1576
1577 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1578 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1579 hci_dev_test_flag(hdev, HCI_MGMT))
1580 __mgmt_power_off(hdev);
1581
1582 hci_inquiry_cache_flush(hdev);
1583 hci_pend_le_actions_clear(hdev);
1584 hci_conn_hash_flush(hdev);
1585 hci_dev_unlock(hdev);
1586
1587 smp_unregister(hdev);
1588
1589 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1590
1591 if (hdev->flush)
1592 hdev->flush(hdev);
1593
1594 /* Reset device */
1595 skb_queue_purge(&hdev->cmd_q);
1596 atomic_set(&hdev->cmd_cnt, 1);
1597 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1598 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1599 set_bit(HCI_INIT, &hdev->flags);
1600 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1601 clear_bit(HCI_INIT, &hdev->flags);
1602 }
1603
1604 /* flush cmd work */
1605 flush_work(&hdev->cmd_work);
1606
1607 /* Drop queues */
1608 skb_queue_purge(&hdev->rx_q);
1609 skb_queue_purge(&hdev->cmd_q);
1610 skb_queue_purge(&hdev->raw_q);
1611
1612 /* Drop last sent command */
1613 if (hdev->sent_cmd) {
1614 cancel_delayed_work_sync(&hdev->cmd_timer);
1615 kfree_skb(hdev->sent_cmd);
1616 hdev->sent_cmd = NULL;
1617 }
1618
1619 clear_bit(HCI_RUNNING, &hdev->flags);
1620 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1621
1622 /* After this point our queues are empty
1623 * and no tasks are scheduled. */
1624 hdev->close(hdev);
1625
1626 /* Clear flags */
1627 hdev->flags &= BIT(HCI_RAW);
1628 hci_dev_clear_volatile_flags(hdev);
1629
1630 /* Controller radio is available but is currently powered down */
1631 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1632
1633 memset(hdev->eir, 0, sizeof(hdev->eir));
1634 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1635 bacpy(&hdev->random_addr, BDADDR_ANY);
1636
1637 hci_req_sync_unlock(hdev);
1638
1639 hci_dev_put(hdev);
1640 return 0;
1641 }
1642
hci_dev_close(__u16 dev)1643 int hci_dev_close(__u16 dev)
1644 {
1645 struct hci_dev *hdev;
1646 int err;
1647
1648 hdev = hci_dev_get(dev);
1649 if (!hdev)
1650 return -ENODEV;
1651
1652 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1653 err = -EBUSY;
1654 goto done;
1655 }
1656
1657 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1658 cancel_delayed_work(&hdev->power_off);
1659
1660 err = hci_dev_do_close(hdev);
1661
1662 done:
1663 hci_dev_put(hdev);
1664 return err;
1665 }
1666
hci_dev_do_reset(struct hci_dev * hdev)1667 static int hci_dev_do_reset(struct hci_dev *hdev)
1668 {
1669 int ret;
1670
1671 BT_DBG("%s %p", hdev->name, hdev);
1672
1673 hci_req_sync_lock(hdev);
1674
1675 /* Drop queues */
1676 skb_queue_purge(&hdev->rx_q);
1677 skb_queue_purge(&hdev->cmd_q);
1678
1679 /* Avoid potential lockdep warnings from the *_flush() calls by
1680 * ensuring the workqueue is empty up front.
1681 */
1682 drain_workqueue(hdev->workqueue);
1683
1684 hci_dev_lock(hdev);
1685 hci_inquiry_cache_flush(hdev);
1686 hci_conn_hash_flush(hdev);
1687 hci_dev_unlock(hdev);
1688
1689 if (hdev->flush)
1690 hdev->flush(hdev);
1691
1692 atomic_set(&hdev->cmd_cnt, 1);
1693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1694
1695 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1696
1697 hci_req_sync_unlock(hdev);
1698 return ret;
1699 }
1700
hci_dev_reset(__u16 dev)1701 int hci_dev_reset(__u16 dev)
1702 {
1703 struct hci_dev *hdev;
1704 int err;
1705
1706 hdev = hci_dev_get(dev);
1707 if (!hdev)
1708 return -ENODEV;
1709
1710 if (!test_bit(HCI_UP, &hdev->flags)) {
1711 err = -ENETDOWN;
1712 goto done;
1713 }
1714
1715 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1716 err = -EBUSY;
1717 goto done;
1718 }
1719
1720 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1721 err = -EOPNOTSUPP;
1722 goto done;
1723 }
1724
1725 err = hci_dev_do_reset(hdev);
1726
1727 done:
1728 hci_dev_put(hdev);
1729 return err;
1730 }
1731
hci_dev_reset_stat(__u16 dev)1732 int hci_dev_reset_stat(__u16 dev)
1733 {
1734 struct hci_dev *hdev;
1735 int ret = 0;
1736
1737 hdev = hci_dev_get(dev);
1738 if (!hdev)
1739 return -ENODEV;
1740
1741 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1742 ret = -EBUSY;
1743 goto done;
1744 }
1745
1746 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1747 ret = -EOPNOTSUPP;
1748 goto done;
1749 }
1750
1751 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1752
1753 done:
1754 hci_dev_put(hdev);
1755 return ret;
1756 }
1757
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1758 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1759 {
1760 bool conn_changed, discov_changed;
1761
1762 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1763
1764 if ((scan & SCAN_PAGE))
1765 conn_changed = !hci_dev_test_and_set_flag(hdev,
1766 HCI_CONNECTABLE);
1767 else
1768 conn_changed = hci_dev_test_and_clear_flag(hdev,
1769 HCI_CONNECTABLE);
1770
1771 if ((scan & SCAN_INQUIRY)) {
1772 discov_changed = !hci_dev_test_and_set_flag(hdev,
1773 HCI_DISCOVERABLE);
1774 } else {
1775 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1776 discov_changed = hci_dev_test_and_clear_flag(hdev,
1777 HCI_DISCOVERABLE);
1778 }
1779
1780 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1781 return;
1782
1783 if (conn_changed || discov_changed) {
1784 /* In case this was disabled through mgmt */
1785 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1786
1787 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1788 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1789
1790 mgmt_new_settings(hdev);
1791 }
1792 }
1793
hci_dev_cmd(unsigned int cmd,void __user * arg)1794 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1795 {
1796 struct hci_dev *hdev;
1797 struct hci_dev_req dr;
1798 int err = 0;
1799
1800 if (copy_from_user(&dr, arg, sizeof(dr)))
1801 return -EFAULT;
1802
1803 hdev = hci_dev_get(dr.dev_id);
1804 if (!hdev)
1805 return -ENODEV;
1806
1807 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1808 err = -EBUSY;
1809 goto done;
1810 }
1811
1812 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1813 err = -EOPNOTSUPP;
1814 goto done;
1815 }
1816
1817 if (hdev->dev_type != HCI_PRIMARY) {
1818 err = -EOPNOTSUPP;
1819 goto done;
1820 }
1821
1822 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1823 err = -EOPNOTSUPP;
1824 goto done;
1825 }
1826
1827 switch (cmd) {
1828 case HCISETAUTH:
1829 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1830 HCI_INIT_TIMEOUT, NULL);
1831 break;
1832
1833 case HCISETENCRYPT:
1834 if (!lmp_encrypt_capable(hdev)) {
1835 err = -EOPNOTSUPP;
1836 break;
1837 }
1838
1839 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1840 /* Auth must be enabled first */
1841 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1842 HCI_INIT_TIMEOUT, NULL);
1843 if (err)
1844 break;
1845 }
1846
1847 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1848 HCI_INIT_TIMEOUT, NULL);
1849 break;
1850
1851 case HCISETSCAN:
1852 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1853 HCI_INIT_TIMEOUT, NULL);
1854
1855 /* Ensure that the connectable and discoverable states
1856 * get correctly modified as this was a non-mgmt change.
1857 */
1858 if (!err)
1859 hci_update_scan_state(hdev, dr.dev_opt);
1860 break;
1861
1862 case HCISETLINKPOL:
1863 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1864 HCI_INIT_TIMEOUT, NULL);
1865 break;
1866
1867 case HCISETLINKMODE:
1868 hdev->link_mode = ((__u16) dr.dev_opt) &
1869 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1870 break;
1871
1872 case HCISETPTYPE:
1873 hdev->pkt_type = (__u16) dr.dev_opt;
1874 break;
1875
1876 case HCISETACLMTU:
1877 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1878 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1879 break;
1880
1881 case HCISETSCOMTU:
1882 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1883 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1884 break;
1885
1886 default:
1887 err = -EINVAL;
1888 break;
1889 }
1890
1891 done:
1892 hci_dev_put(hdev);
1893 return err;
1894 }
1895
hci_get_dev_list(void __user * arg)1896 int hci_get_dev_list(void __user *arg)
1897 {
1898 struct hci_dev *hdev;
1899 struct hci_dev_list_req *dl;
1900 struct hci_dev_req *dr;
1901 int n = 0, size, err;
1902 __u16 dev_num;
1903
1904 if (get_user(dev_num, (__u16 __user *) arg))
1905 return -EFAULT;
1906
1907 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1908 return -EINVAL;
1909
1910 size = sizeof(*dl) + dev_num * sizeof(*dr);
1911
1912 dl = kzalloc(size, GFP_KERNEL);
1913 if (!dl)
1914 return -ENOMEM;
1915
1916 dr = dl->dev_req;
1917
1918 read_lock(&hci_dev_list_lock);
1919 list_for_each_entry(hdev, &hci_dev_list, list) {
1920 unsigned long flags = hdev->flags;
1921
1922 /* When the auto-off is configured it means the transport
1923 * is running, but in that case still indicate that the
1924 * device is actually down.
1925 */
1926 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1927 flags &= ~BIT(HCI_UP);
1928
1929 (dr + n)->dev_id = hdev->id;
1930 (dr + n)->dev_opt = flags;
1931
1932 if (++n >= dev_num)
1933 break;
1934 }
1935 read_unlock(&hci_dev_list_lock);
1936
1937 dl->dev_num = n;
1938 size = sizeof(*dl) + n * sizeof(*dr);
1939
1940 err = copy_to_user(arg, dl, size);
1941 kfree(dl);
1942
1943 return err ? -EFAULT : 0;
1944 }
1945
hci_get_dev_info(void __user * arg)1946 int hci_get_dev_info(void __user *arg)
1947 {
1948 struct hci_dev *hdev;
1949 struct hci_dev_info di;
1950 unsigned long flags;
1951 int err = 0;
1952
1953 if (copy_from_user(&di, arg, sizeof(di)))
1954 return -EFAULT;
1955
1956 hdev = hci_dev_get(di.dev_id);
1957 if (!hdev)
1958 return -ENODEV;
1959
1960 /* When the auto-off is configured it means the transport
1961 * is running, but in that case still indicate that the
1962 * device is actually down.
1963 */
1964 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1965 flags = hdev->flags & ~BIT(HCI_UP);
1966 else
1967 flags = hdev->flags;
1968
1969 strcpy(di.name, hdev->name);
1970 di.bdaddr = hdev->bdaddr;
1971 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1972 di.flags = flags;
1973 di.pkt_type = hdev->pkt_type;
1974 if (lmp_bredr_capable(hdev)) {
1975 di.acl_mtu = hdev->acl_mtu;
1976 di.acl_pkts = hdev->acl_pkts;
1977 di.sco_mtu = hdev->sco_mtu;
1978 di.sco_pkts = hdev->sco_pkts;
1979 } else {
1980 di.acl_mtu = hdev->le_mtu;
1981 di.acl_pkts = hdev->le_pkts;
1982 di.sco_mtu = 0;
1983 di.sco_pkts = 0;
1984 }
1985 di.link_policy = hdev->link_policy;
1986 di.link_mode = hdev->link_mode;
1987
1988 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1989 memcpy(&di.features, &hdev->features, sizeof(di.features));
1990
1991 if (copy_to_user(arg, &di, sizeof(di)))
1992 err = -EFAULT;
1993
1994 hci_dev_put(hdev);
1995
1996 return err;
1997 }
1998
1999 /* ---- Interface to HCI drivers ---- */
2000
hci_rfkill_set_block(void * data,bool blocked)2001 static int hci_rfkill_set_block(void *data, bool blocked)
2002 {
2003 struct hci_dev *hdev = data;
2004
2005 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2006
2007 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2008 return -EBUSY;
2009
2010 if (blocked) {
2011 hci_dev_set_flag(hdev, HCI_RFKILLED);
2012 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2013 !hci_dev_test_flag(hdev, HCI_CONFIG))
2014 hci_dev_do_close(hdev);
2015 } else {
2016 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2017 }
2018
2019 return 0;
2020 }
2021
2022 static const struct rfkill_ops hci_rfkill_ops = {
2023 .set_block = hci_rfkill_set_block,
2024 };
2025
hci_power_on(struct work_struct * work)2026 static void hci_power_on(struct work_struct *work)
2027 {
2028 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2029 int err;
2030
2031 BT_DBG("%s", hdev->name);
2032
2033 if (test_bit(HCI_UP, &hdev->flags) &&
2034 hci_dev_test_flag(hdev, HCI_MGMT) &&
2035 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2036 cancel_delayed_work(&hdev->power_off);
2037 hci_req_sync_lock(hdev);
2038 err = __hci_req_hci_power_on(hdev);
2039 hci_req_sync_unlock(hdev);
2040 mgmt_power_on(hdev, err);
2041 return;
2042 }
2043
2044 err = hci_dev_do_open(hdev);
2045 if (err < 0) {
2046 hci_dev_lock(hdev);
2047 mgmt_set_powered_failed(hdev, err);
2048 hci_dev_unlock(hdev);
2049 return;
2050 }
2051
2052 /* During the HCI setup phase, a few error conditions are
2053 * ignored and they need to be checked now. If they are still
2054 * valid, it is important to turn the device back off.
2055 */
2056 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2057 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2058 (hdev->dev_type == HCI_PRIMARY &&
2059 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2060 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2061 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2062 hci_dev_do_close(hdev);
2063 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2064 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2065 HCI_AUTO_OFF_TIMEOUT);
2066 }
2067
2068 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2069 /* For unconfigured devices, set the HCI_RAW flag
2070 * so that userspace can easily identify them.
2071 */
2072 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2073 set_bit(HCI_RAW, &hdev->flags);
2074
2075 /* For fully configured devices, this will send
2076 * the Index Added event. For unconfigured devices,
2077 * it will send Unconfigued Index Added event.
2078 *
2079 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2080 * and no event will be send.
2081 */
2082 mgmt_index_added(hdev);
2083 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2084 /* When the controller is now configured, then it
2085 * is important to clear the HCI_RAW flag.
2086 */
2087 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2088 clear_bit(HCI_RAW, &hdev->flags);
2089
2090 /* Powering on the controller with HCI_CONFIG set only
2091 * happens with the transition from unconfigured to
2092 * configured. This will send the Index Added event.
2093 */
2094 mgmt_index_added(hdev);
2095 }
2096 }
2097
hci_power_off(struct work_struct * work)2098 static void hci_power_off(struct work_struct *work)
2099 {
2100 struct hci_dev *hdev = container_of(work, struct hci_dev,
2101 power_off.work);
2102
2103 BT_DBG("%s", hdev->name);
2104
2105 hci_dev_do_close(hdev);
2106 }
2107
hci_error_reset(struct work_struct * work)2108 static void hci_error_reset(struct work_struct *work)
2109 {
2110 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (hdev->hw_error)
2115 hdev->hw_error(hdev, hdev->hw_error_code);
2116 else
2117 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2118 hdev->hw_error_code);
2119
2120 if (hci_dev_do_close(hdev))
2121 return;
2122
2123 hci_dev_do_open(hdev);
2124 }
2125
hci_uuids_clear(struct hci_dev * hdev)2126 void hci_uuids_clear(struct hci_dev *hdev)
2127 {
2128 struct bt_uuid *uuid, *tmp;
2129
2130 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2131 list_del(&uuid->list);
2132 kfree(uuid);
2133 }
2134 }
2135
hci_link_keys_clear(struct hci_dev * hdev)2136 void hci_link_keys_clear(struct hci_dev *hdev)
2137 {
2138 struct link_key *key;
2139
2140 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2141 list_del_rcu(&key->list);
2142 kfree_rcu(key, rcu);
2143 }
2144 }
2145
hci_smp_ltks_clear(struct hci_dev * hdev)2146 void hci_smp_ltks_clear(struct hci_dev *hdev)
2147 {
2148 struct smp_ltk *k;
2149
2150 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2151 list_del_rcu(&k->list);
2152 kfree_rcu(k, rcu);
2153 }
2154 }
2155
hci_smp_irks_clear(struct hci_dev * hdev)2156 void hci_smp_irks_clear(struct hci_dev *hdev)
2157 {
2158 struct smp_irk *k;
2159
2160 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2161 list_del_rcu(&k->list);
2162 kfree_rcu(k, rcu);
2163 }
2164 }
2165
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2167 {
2168 struct link_key *k;
2169
2170 rcu_read_lock();
2171 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2172 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2173 rcu_read_unlock();
2174 return k;
2175 }
2176 }
2177 rcu_read_unlock();
2178
2179 return NULL;
2180 }
2181
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2182 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2183 u8 key_type, u8 old_key_type)
2184 {
2185 /* Legacy key */
2186 if (key_type < 0x03)
2187 return true;
2188
2189 /* Debug keys are insecure so don't store them persistently */
2190 if (key_type == HCI_LK_DEBUG_COMBINATION)
2191 return false;
2192
2193 /* Changed combination key and there's no previous one */
2194 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2195 return false;
2196
2197 /* Security mode 3 case */
2198 if (!conn)
2199 return true;
2200
2201 /* BR/EDR key derived using SC from an LE link */
2202 if (conn->type == LE_LINK)
2203 return true;
2204
2205 /* Neither local nor remote side had no-bonding as requirement */
2206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2207 return true;
2208
2209 /* Local side had dedicated bonding as requirement */
2210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2211 return true;
2212
2213 /* Remote side had dedicated bonding as requirement */
2214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2215 return true;
2216
2217 /* If none of the above criteria match, then don't store the key
2218 * persistently */
2219 return false;
2220 }
2221
ltk_role(u8 type)2222 static u8 ltk_role(u8 type)
2223 {
2224 if (type == SMP_LTK)
2225 return HCI_ROLE_MASTER;
2226
2227 return HCI_ROLE_SLAVE;
2228 }
2229
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2230 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2231 u8 addr_type, u8 role)
2232 {
2233 struct smp_ltk *k;
2234
2235 rcu_read_lock();
2236 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2237 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2238 continue;
2239
2240 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2241 rcu_read_unlock();
2242 return k;
2243 }
2244 }
2245 rcu_read_unlock();
2246
2247 return NULL;
2248 }
2249
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2250 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2251 {
2252 struct smp_irk *irk;
2253
2254 rcu_read_lock();
2255 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2256 if (!bacmp(&irk->rpa, rpa)) {
2257 rcu_read_unlock();
2258 return irk;
2259 }
2260 }
2261
2262 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2263 if (smp_irk_matches(hdev, irk->val, rpa)) {
2264 bacpy(&irk->rpa, rpa);
2265 rcu_read_unlock();
2266 return irk;
2267 }
2268 }
2269 rcu_read_unlock();
2270
2271 return NULL;
2272 }
2273
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2274 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2275 u8 addr_type)
2276 {
2277 struct smp_irk *irk;
2278
2279 /* Identity Address must be public or static random */
2280 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2281 return NULL;
2282
2283 rcu_read_lock();
2284 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2285 if (addr_type == irk->addr_type &&
2286 bacmp(bdaddr, &irk->bdaddr) == 0) {
2287 rcu_read_unlock();
2288 return irk;
2289 }
2290 }
2291 rcu_read_unlock();
2292
2293 return NULL;
2294 }
2295
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2296 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2297 bdaddr_t *bdaddr, u8 *val, u8 type,
2298 u8 pin_len, bool *persistent)
2299 {
2300 struct link_key *key, *old_key;
2301 u8 old_key_type;
2302
2303 old_key = hci_find_link_key(hdev, bdaddr);
2304 if (old_key) {
2305 old_key_type = old_key->type;
2306 key = old_key;
2307 } else {
2308 old_key_type = conn ? conn->key_type : 0xff;
2309 key = kzalloc(sizeof(*key), GFP_KERNEL);
2310 if (!key)
2311 return NULL;
2312 list_add_rcu(&key->list, &hdev->link_keys);
2313 }
2314
2315 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2316
2317 /* Some buggy controller combinations generate a changed
2318 * combination key for legacy pairing even when there's no
2319 * previous key */
2320 if (type == HCI_LK_CHANGED_COMBINATION &&
2321 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2322 type = HCI_LK_COMBINATION;
2323 if (conn)
2324 conn->key_type = type;
2325 }
2326
2327 bacpy(&key->bdaddr, bdaddr);
2328 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2329 key->pin_len = pin_len;
2330
2331 if (type == HCI_LK_CHANGED_COMBINATION)
2332 key->type = old_key_type;
2333 else
2334 key->type = type;
2335
2336 if (persistent)
2337 *persistent = hci_persistent_key(hdev, conn, type,
2338 old_key_type);
2339
2340 return key;
2341 }
2342
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2343 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2344 u8 addr_type, u8 type, u8 authenticated,
2345 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2346 {
2347 struct smp_ltk *key, *old_key;
2348 u8 role = ltk_role(type);
2349
2350 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2351 if (old_key)
2352 key = old_key;
2353 else {
2354 key = kzalloc(sizeof(*key), GFP_KERNEL);
2355 if (!key)
2356 return NULL;
2357 list_add_rcu(&key->list, &hdev->long_term_keys);
2358 }
2359
2360 bacpy(&key->bdaddr, bdaddr);
2361 key->bdaddr_type = addr_type;
2362 memcpy(key->val, tk, sizeof(key->val));
2363 key->authenticated = authenticated;
2364 key->ediv = ediv;
2365 key->rand = rand;
2366 key->enc_size = enc_size;
2367 key->type = type;
2368
2369 return key;
2370 }
2371
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2372 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2373 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2374 {
2375 struct smp_irk *irk;
2376
2377 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2378 if (!irk) {
2379 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2380 if (!irk)
2381 return NULL;
2382
2383 bacpy(&irk->bdaddr, bdaddr);
2384 irk->addr_type = addr_type;
2385
2386 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2387 }
2388
2389 memcpy(irk->val, val, 16);
2390 bacpy(&irk->rpa, rpa);
2391
2392 return irk;
2393 }
2394
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2395 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2396 {
2397 struct link_key *key;
2398
2399 key = hci_find_link_key(hdev, bdaddr);
2400 if (!key)
2401 return -ENOENT;
2402
2403 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2404
2405 list_del_rcu(&key->list);
2406 kfree_rcu(key, rcu);
2407
2408 return 0;
2409 }
2410
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2411 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2412 {
2413 struct smp_ltk *k;
2414 int removed = 0;
2415
2416 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2417 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2418 continue;
2419
2420 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2421
2422 list_del_rcu(&k->list);
2423 kfree_rcu(k, rcu);
2424 removed++;
2425 }
2426
2427 return removed ? 0 : -ENOENT;
2428 }
2429
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2430 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2431 {
2432 struct smp_irk *k;
2433
2434 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2435 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2436 continue;
2437
2438 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2439
2440 list_del_rcu(&k->list);
2441 kfree_rcu(k, rcu);
2442 }
2443 }
2444
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2445 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2446 {
2447 struct smp_ltk *k;
2448 struct smp_irk *irk;
2449 u8 addr_type;
2450
2451 if (type == BDADDR_BREDR) {
2452 if (hci_find_link_key(hdev, bdaddr))
2453 return true;
2454 return false;
2455 }
2456
2457 /* Convert to HCI addr type which struct smp_ltk uses */
2458 if (type == BDADDR_LE_PUBLIC)
2459 addr_type = ADDR_LE_DEV_PUBLIC;
2460 else
2461 addr_type = ADDR_LE_DEV_RANDOM;
2462
2463 irk = hci_get_irk(hdev, bdaddr, addr_type);
2464 if (irk) {
2465 bdaddr = &irk->bdaddr;
2466 addr_type = irk->addr_type;
2467 }
2468
2469 rcu_read_lock();
2470 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2471 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2472 rcu_read_unlock();
2473 return true;
2474 }
2475 }
2476 rcu_read_unlock();
2477
2478 return false;
2479 }
2480
2481 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2482 static void hci_cmd_timeout(struct work_struct *work)
2483 {
2484 struct hci_dev *hdev = container_of(work, struct hci_dev,
2485 cmd_timer.work);
2486
2487 if (hdev->sent_cmd) {
2488 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2489 u16 opcode = __le16_to_cpu(sent->opcode);
2490
2491 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2492 } else {
2493 BT_ERR("%s command tx timeout", hdev->name);
2494 }
2495
2496 atomic_set(&hdev->cmd_cnt, 1);
2497 queue_work(hdev->workqueue, &hdev->cmd_work);
2498 }
2499
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2500 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2501 bdaddr_t *bdaddr, u8 bdaddr_type)
2502 {
2503 struct oob_data *data;
2504
2505 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2506 if (bacmp(bdaddr, &data->bdaddr) != 0)
2507 continue;
2508 if (data->bdaddr_type != bdaddr_type)
2509 continue;
2510 return data;
2511 }
2512
2513 return NULL;
2514 }
2515
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2516 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2517 u8 bdaddr_type)
2518 {
2519 struct oob_data *data;
2520
2521 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2522 if (!data)
2523 return -ENOENT;
2524
2525 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2526
2527 list_del(&data->list);
2528 kfree(data);
2529
2530 return 0;
2531 }
2532
hci_remote_oob_data_clear(struct hci_dev * hdev)2533 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2534 {
2535 struct oob_data *data, *n;
2536
2537 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2538 list_del(&data->list);
2539 kfree(data);
2540 }
2541 }
2542
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2543 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2544 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2545 u8 *hash256, u8 *rand256)
2546 {
2547 struct oob_data *data;
2548
2549 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2550 if (!data) {
2551 data = kmalloc(sizeof(*data), GFP_KERNEL);
2552 if (!data)
2553 return -ENOMEM;
2554
2555 bacpy(&data->bdaddr, bdaddr);
2556 data->bdaddr_type = bdaddr_type;
2557 list_add(&data->list, &hdev->remote_oob_data);
2558 }
2559
2560 if (hash192 && rand192) {
2561 memcpy(data->hash192, hash192, sizeof(data->hash192));
2562 memcpy(data->rand192, rand192, sizeof(data->rand192));
2563 if (hash256 && rand256)
2564 data->present = 0x03;
2565 } else {
2566 memset(data->hash192, 0, sizeof(data->hash192));
2567 memset(data->rand192, 0, sizeof(data->rand192));
2568 if (hash256 && rand256)
2569 data->present = 0x02;
2570 else
2571 data->present = 0x00;
2572 }
2573
2574 if (hash256 && rand256) {
2575 memcpy(data->hash256, hash256, sizeof(data->hash256));
2576 memcpy(data->rand256, rand256, sizeof(data->rand256));
2577 } else {
2578 memset(data->hash256, 0, sizeof(data->hash256));
2579 memset(data->rand256, 0, sizeof(data->rand256));
2580 if (hash192 && rand192)
2581 data->present = 0x01;
2582 }
2583
2584 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2585
2586 return 0;
2587 }
2588
2589 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2590 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2591 {
2592 struct adv_info *adv_instance;
2593
2594 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2595 if (adv_instance->instance == instance)
2596 return adv_instance;
2597 }
2598
2599 return NULL;
2600 }
2601
2602 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2603 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2604 {
2605 struct adv_info *cur_instance;
2606
2607 cur_instance = hci_find_adv_instance(hdev, instance);
2608 if (!cur_instance)
2609 return NULL;
2610
2611 if (cur_instance == list_last_entry(&hdev->adv_instances,
2612 struct adv_info, list))
2613 return list_first_entry(&hdev->adv_instances,
2614 struct adv_info, list);
2615 else
2616 return list_next_entry(cur_instance, list);
2617 }
2618
2619 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2620 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2621 {
2622 struct adv_info *adv_instance;
2623
2624 adv_instance = hci_find_adv_instance(hdev, instance);
2625 if (!adv_instance)
2626 return -ENOENT;
2627
2628 BT_DBG("%s removing %dMR", hdev->name, instance);
2629
2630 if (hdev->cur_adv_instance == instance) {
2631 if (hdev->adv_instance_timeout) {
2632 cancel_delayed_work(&hdev->adv_instance_expire);
2633 hdev->adv_instance_timeout = 0;
2634 }
2635 hdev->cur_adv_instance = 0x00;
2636 }
2637
2638 list_del(&adv_instance->list);
2639 kfree(adv_instance);
2640
2641 hdev->adv_instance_cnt--;
2642
2643 return 0;
2644 }
2645
2646 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2647 void hci_adv_instances_clear(struct hci_dev *hdev)
2648 {
2649 struct adv_info *adv_instance, *n;
2650
2651 if (hdev->adv_instance_timeout) {
2652 cancel_delayed_work(&hdev->adv_instance_expire);
2653 hdev->adv_instance_timeout = 0;
2654 }
2655
2656 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2657 list_del(&adv_instance->list);
2658 kfree(adv_instance);
2659 }
2660
2661 hdev->adv_instance_cnt = 0;
2662 hdev->cur_adv_instance = 0x00;
2663 }
2664
2665 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2666 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2667 u16 adv_data_len, u8 *adv_data,
2668 u16 scan_rsp_len, u8 *scan_rsp_data,
2669 u16 timeout, u16 duration)
2670 {
2671 struct adv_info *adv_instance;
2672
2673 adv_instance = hci_find_adv_instance(hdev, instance);
2674 if (adv_instance) {
2675 memset(adv_instance->adv_data, 0,
2676 sizeof(adv_instance->adv_data));
2677 memset(adv_instance->scan_rsp_data, 0,
2678 sizeof(adv_instance->scan_rsp_data));
2679 } else {
2680 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2681 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2682 return -EOVERFLOW;
2683
2684 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2685 if (!adv_instance)
2686 return -ENOMEM;
2687
2688 adv_instance->pending = true;
2689 adv_instance->instance = instance;
2690 list_add(&adv_instance->list, &hdev->adv_instances);
2691 hdev->adv_instance_cnt++;
2692 }
2693
2694 adv_instance->flags = flags;
2695 adv_instance->adv_data_len = adv_data_len;
2696 adv_instance->scan_rsp_len = scan_rsp_len;
2697
2698 if (adv_data_len)
2699 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2700
2701 if (scan_rsp_len)
2702 memcpy(adv_instance->scan_rsp_data,
2703 scan_rsp_data, scan_rsp_len);
2704
2705 adv_instance->timeout = timeout;
2706 adv_instance->remaining_time = timeout;
2707
2708 if (duration == 0)
2709 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2710 else
2711 adv_instance->duration = duration;
2712
2713 BT_DBG("%s for %dMR", hdev->name, instance);
2714
2715 return 0;
2716 }
2717
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2718 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2719 bdaddr_t *bdaddr, u8 type)
2720 {
2721 struct bdaddr_list *b;
2722
2723 list_for_each_entry(b, bdaddr_list, list) {
2724 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2725 return b;
2726 }
2727
2728 return NULL;
2729 }
2730
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2731 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2732 {
2733 struct bdaddr_list *b, *n;
2734
2735 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2736 list_del(&b->list);
2737 kfree(b);
2738 }
2739 }
2740
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2741 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2742 {
2743 struct bdaddr_list *entry;
2744
2745 if (!bacmp(bdaddr, BDADDR_ANY))
2746 return -EBADF;
2747
2748 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2749 return -EEXIST;
2750
2751 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2752 if (!entry)
2753 return -ENOMEM;
2754
2755 bacpy(&entry->bdaddr, bdaddr);
2756 entry->bdaddr_type = type;
2757
2758 list_add(&entry->list, list);
2759
2760 return 0;
2761 }
2762
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2763 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2764 {
2765 struct bdaddr_list *entry;
2766
2767 if (!bacmp(bdaddr, BDADDR_ANY)) {
2768 hci_bdaddr_list_clear(list);
2769 return 0;
2770 }
2771
2772 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2773 if (!entry)
2774 return -ENOENT;
2775
2776 list_del(&entry->list);
2777 kfree(entry);
2778
2779 return 0;
2780 }
2781
2782 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2783 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2784 bdaddr_t *addr, u8 addr_type)
2785 {
2786 struct hci_conn_params *params;
2787
2788 list_for_each_entry(params, &hdev->le_conn_params, list) {
2789 if (bacmp(¶ms->addr, addr) == 0 &&
2790 params->addr_type == addr_type) {
2791 return params;
2792 }
2793 }
2794
2795 return NULL;
2796 }
2797
2798 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2799 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2800 bdaddr_t *addr, u8 addr_type)
2801 {
2802 struct hci_conn_params *param;
2803
2804 list_for_each_entry(param, list, action) {
2805 if (bacmp(¶m->addr, addr) == 0 &&
2806 param->addr_type == addr_type)
2807 return param;
2808 }
2809
2810 return NULL;
2811 }
2812
2813 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2814 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2815 bdaddr_t *addr, u8 addr_type)
2816 {
2817 struct hci_conn_params *params;
2818
2819 params = hci_conn_params_lookup(hdev, addr, addr_type);
2820 if (params)
2821 return params;
2822
2823 params = kzalloc(sizeof(*params), GFP_KERNEL);
2824 if (!params) {
2825 BT_ERR("Out of memory");
2826 return NULL;
2827 }
2828
2829 bacpy(¶ms->addr, addr);
2830 params->addr_type = addr_type;
2831
2832 list_add(¶ms->list, &hdev->le_conn_params);
2833 INIT_LIST_HEAD(¶ms->action);
2834
2835 params->conn_min_interval = hdev->le_conn_min_interval;
2836 params->conn_max_interval = hdev->le_conn_max_interval;
2837 params->conn_latency = hdev->le_conn_latency;
2838 params->supervision_timeout = hdev->le_supv_timeout;
2839 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2840
2841 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2842
2843 return params;
2844 }
2845
hci_conn_params_free(struct hci_conn_params * params)2846 static void hci_conn_params_free(struct hci_conn_params *params)
2847 {
2848 if (params->conn) {
2849 hci_conn_drop(params->conn);
2850 hci_conn_put(params->conn);
2851 }
2852
2853 list_del(¶ms->action);
2854 list_del(¶ms->list);
2855 kfree(params);
2856 }
2857
2858 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2859 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2860 {
2861 struct hci_conn_params *params;
2862
2863 params = hci_conn_params_lookup(hdev, addr, addr_type);
2864 if (!params)
2865 return;
2866
2867 hci_conn_params_free(params);
2868
2869 hci_update_background_scan(hdev);
2870
2871 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2872 }
2873
2874 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2875 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2876 {
2877 struct hci_conn_params *params, *tmp;
2878
2879 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2880 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2881 continue;
2882
2883 /* If trying to estabilish one time connection to disabled
2884 * device, leave the params, but mark them as just once.
2885 */
2886 if (params->explicit_connect) {
2887 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2888 continue;
2889 }
2890
2891 list_del(¶ms->list);
2892 kfree(params);
2893 }
2894
2895 BT_DBG("All LE disabled connection parameters were removed");
2896 }
2897
2898 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2899 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2900 {
2901 struct hci_conn_params *params, *tmp;
2902
2903 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2904 hci_conn_params_free(params);
2905
2906 BT_DBG("All LE connection parameters were removed");
2907 }
2908
2909 /* Copy the Identity Address of the controller.
2910 *
2911 * If the controller has a public BD_ADDR, then by default use that one.
2912 * If this is a LE only controller without a public address, default to
2913 * the static random address.
2914 *
2915 * For debugging purposes it is possible to force controllers with a
2916 * public address to use the static random address instead.
2917 *
2918 * In case BR/EDR has been disabled on a dual-mode controller and
2919 * userspace has configured a static address, then that address
2920 * becomes the identity address instead of the public BR/EDR address.
2921 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2922 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2923 u8 *bdaddr_type)
2924 {
2925 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2926 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2927 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2928 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2929 bacpy(bdaddr, &hdev->static_addr);
2930 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2931 } else {
2932 bacpy(bdaddr, &hdev->bdaddr);
2933 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2934 }
2935 }
2936
2937 /* Alloc HCI device */
hci_alloc_dev(void)2938 struct hci_dev *hci_alloc_dev(void)
2939 {
2940 struct hci_dev *hdev;
2941
2942 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2943 if (!hdev)
2944 return NULL;
2945
2946 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2947 hdev->esco_type = (ESCO_HV1);
2948 hdev->link_mode = (HCI_LM_ACCEPT);
2949 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2950 hdev->io_capability = 0x03; /* No Input No Output */
2951 hdev->manufacturer = 0xffff; /* Default to internal use */
2952 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2953 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2954 hdev->adv_instance_cnt = 0;
2955 hdev->cur_adv_instance = 0x00;
2956 hdev->adv_instance_timeout = 0;
2957
2958 hdev->sniff_max_interval = 800;
2959 hdev->sniff_min_interval = 80;
2960
2961 hdev->le_adv_channel_map = 0x07;
2962 hdev->le_adv_min_interval = 0x0800;
2963 hdev->le_adv_max_interval = 0x0800;
2964 hdev->le_scan_interval = 0x0060;
2965 hdev->le_scan_window = 0x0030;
2966 hdev->le_conn_min_interval = 0x0028;
2967 hdev->le_conn_max_interval = 0x0038;
2968 hdev->le_conn_latency = 0x0000;
2969 hdev->le_supv_timeout = 0x002a;
2970 hdev->le_def_tx_len = 0x001b;
2971 hdev->le_def_tx_time = 0x0148;
2972 hdev->le_max_tx_len = 0x001b;
2973 hdev->le_max_tx_time = 0x0148;
2974 hdev->le_max_rx_len = 0x001b;
2975 hdev->le_max_rx_time = 0x0148;
2976
2977 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2978 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2979 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2980 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2981
2982 mutex_init(&hdev->lock);
2983 mutex_init(&hdev->req_lock);
2984
2985 INIT_LIST_HEAD(&hdev->mgmt_pending);
2986 INIT_LIST_HEAD(&hdev->blacklist);
2987 INIT_LIST_HEAD(&hdev->whitelist);
2988 INIT_LIST_HEAD(&hdev->uuids);
2989 INIT_LIST_HEAD(&hdev->link_keys);
2990 INIT_LIST_HEAD(&hdev->long_term_keys);
2991 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2992 INIT_LIST_HEAD(&hdev->remote_oob_data);
2993 INIT_LIST_HEAD(&hdev->le_white_list);
2994 INIT_LIST_HEAD(&hdev->le_conn_params);
2995 INIT_LIST_HEAD(&hdev->pend_le_conns);
2996 INIT_LIST_HEAD(&hdev->pend_le_reports);
2997 INIT_LIST_HEAD(&hdev->conn_hash.list);
2998 INIT_LIST_HEAD(&hdev->adv_instances);
2999
3000 INIT_WORK(&hdev->rx_work, hci_rx_work);
3001 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3002 INIT_WORK(&hdev->tx_work, hci_tx_work);
3003 INIT_WORK(&hdev->power_on, hci_power_on);
3004 INIT_WORK(&hdev->error_reset, hci_error_reset);
3005
3006 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3007
3008 skb_queue_head_init(&hdev->rx_q);
3009 skb_queue_head_init(&hdev->cmd_q);
3010 skb_queue_head_init(&hdev->raw_q);
3011
3012 init_waitqueue_head(&hdev->req_wait_q);
3013
3014 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3015
3016 hci_request_setup(hdev);
3017
3018 hci_init_sysfs(hdev);
3019 discovery_init(hdev);
3020
3021 return hdev;
3022 }
3023 EXPORT_SYMBOL(hci_alloc_dev);
3024
3025 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3026 void hci_free_dev(struct hci_dev *hdev)
3027 {
3028 /* will free via device release */
3029 put_device(&hdev->dev);
3030 }
3031 EXPORT_SYMBOL(hci_free_dev);
3032
3033 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3034 int hci_register_dev(struct hci_dev *hdev)
3035 {
3036 int id, error;
3037
3038 if (!hdev->open || !hdev->close || !hdev->send)
3039 return -EINVAL;
3040
3041 /* Do not allow HCI_AMP devices to register at index 0,
3042 * so the index can be used as the AMP controller ID.
3043 */
3044 switch (hdev->dev_type) {
3045 case HCI_PRIMARY:
3046 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3047 break;
3048 case HCI_AMP:
3049 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3050 break;
3051 default:
3052 return -EINVAL;
3053 }
3054
3055 if (id < 0)
3056 return id;
3057
3058 sprintf(hdev->name, "hci%d", id);
3059 hdev->id = id;
3060
3061 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3062
3063 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3064 WQ_MEM_RECLAIM, 1, hdev->name);
3065 if (!hdev->workqueue) {
3066 error = -ENOMEM;
3067 goto err;
3068 }
3069
3070 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3071 WQ_MEM_RECLAIM, 1, hdev->name);
3072 if (!hdev->req_workqueue) {
3073 destroy_workqueue(hdev->workqueue);
3074 error = -ENOMEM;
3075 goto err;
3076 }
3077
3078 if (!IS_ERR_OR_NULL(bt_debugfs))
3079 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3080
3081 dev_set_name(&hdev->dev, "%s", hdev->name);
3082
3083 error = device_add(&hdev->dev);
3084 if (error < 0)
3085 goto err_wqueue;
3086
3087 hci_leds_init(hdev);
3088
3089 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3090 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3091 hdev);
3092 if (hdev->rfkill) {
3093 if (rfkill_register(hdev->rfkill) < 0) {
3094 rfkill_destroy(hdev->rfkill);
3095 hdev->rfkill = NULL;
3096 }
3097 }
3098
3099 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3100 hci_dev_set_flag(hdev, HCI_RFKILLED);
3101
3102 hci_dev_set_flag(hdev, HCI_SETUP);
3103 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3104
3105 if (hdev->dev_type == HCI_PRIMARY) {
3106 /* Assume BR/EDR support until proven otherwise (such as
3107 * through reading supported features during init.
3108 */
3109 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3110 }
3111
3112 write_lock(&hci_dev_list_lock);
3113 list_add(&hdev->list, &hci_dev_list);
3114 write_unlock(&hci_dev_list_lock);
3115
3116 /* Devices that are marked for raw-only usage are unconfigured
3117 * and should not be included in normal operation.
3118 */
3119 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3120 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3121
3122 hci_sock_dev_event(hdev, HCI_DEV_REG);
3123 hci_dev_hold(hdev);
3124
3125 queue_work(hdev->req_workqueue, &hdev->power_on);
3126
3127 return id;
3128
3129 err_wqueue:
3130 destroy_workqueue(hdev->workqueue);
3131 destroy_workqueue(hdev->req_workqueue);
3132 err:
3133 ida_simple_remove(&hci_index_ida, hdev->id);
3134
3135 return error;
3136 }
3137 EXPORT_SYMBOL(hci_register_dev);
3138
3139 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3140 void hci_unregister_dev(struct hci_dev *hdev)
3141 {
3142 int id;
3143
3144 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3145
3146 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3147
3148 id = hdev->id;
3149
3150 write_lock(&hci_dev_list_lock);
3151 list_del(&hdev->list);
3152 write_unlock(&hci_dev_list_lock);
3153
3154 cancel_work_sync(&hdev->power_on);
3155
3156 hci_dev_do_close(hdev);
3157
3158 if (!test_bit(HCI_INIT, &hdev->flags) &&
3159 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3160 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3161 hci_dev_lock(hdev);
3162 mgmt_index_removed(hdev);
3163 hci_dev_unlock(hdev);
3164 }
3165
3166 /* mgmt_index_removed should take care of emptying the
3167 * pending list */
3168 BUG_ON(!list_empty(&hdev->mgmt_pending));
3169
3170 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3171
3172 if (hdev->rfkill) {
3173 rfkill_unregister(hdev->rfkill);
3174 rfkill_destroy(hdev->rfkill);
3175 }
3176
3177 device_del(&hdev->dev);
3178
3179 debugfs_remove_recursive(hdev->debugfs);
3180 kfree_const(hdev->hw_info);
3181 kfree_const(hdev->fw_info);
3182
3183 destroy_workqueue(hdev->workqueue);
3184 destroy_workqueue(hdev->req_workqueue);
3185
3186 hci_dev_lock(hdev);
3187 hci_bdaddr_list_clear(&hdev->blacklist);
3188 hci_bdaddr_list_clear(&hdev->whitelist);
3189 hci_uuids_clear(hdev);
3190 hci_link_keys_clear(hdev);
3191 hci_smp_ltks_clear(hdev);
3192 hci_smp_irks_clear(hdev);
3193 hci_remote_oob_data_clear(hdev);
3194 hci_adv_instances_clear(hdev);
3195 hci_bdaddr_list_clear(&hdev->le_white_list);
3196 hci_conn_params_clear_all(hdev);
3197 hci_discovery_filter_clear(hdev);
3198 hci_dev_unlock(hdev);
3199
3200 hci_dev_put(hdev);
3201
3202 ida_simple_remove(&hci_index_ida, id);
3203 }
3204 EXPORT_SYMBOL(hci_unregister_dev);
3205
3206 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3207 int hci_suspend_dev(struct hci_dev *hdev)
3208 {
3209 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3210 return 0;
3211 }
3212 EXPORT_SYMBOL(hci_suspend_dev);
3213
3214 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3215 int hci_resume_dev(struct hci_dev *hdev)
3216 {
3217 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3218 return 0;
3219 }
3220 EXPORT_SYMBOL(hci_resume_dev);
3221
3222 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3223 int hci_reset_dev(struct hci_dev *hdev)
3224 {
3225 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3226 struct sk_buff *skb;
3227
3228 skb = bt_skb_alloc(3, GFP_ATOMIC);
3229 if (!skb)
3230 return -ENOMEM;
3231
3232 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3233 memcpy(skb_put(skb, 3), hw_err, 3);
3234
3235 /* Send Hardware Error to upper stack */
3236 return hci_recv_frame(hdev, skb);
3237 }
3238 EXPORT_SYMBOL(hci_reset_dev);
3239
3240 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3241 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3242 {
3243 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3244 && !test_bit(HCI_INIT, &hdev->flags))) {
3245 kfree_skb(skb);
3246 return -ENXIO;
3247 }
3248
3249 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3250 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3251 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3252 kfree_skb(skb);
3253 return -EINVAL;
3254 }
3255
3256 /* Incoming skb */
3257 bt_cb(skb)->incoming = 1;
3258
3259 /* Time stamp */
3260 __net_timestamp(skb);
3261
3262 skb_queue_tail(&hdev->rx_q, skb);
3263 queue_work(hdev->workqueue, &hdev->rx_work);
3264
3265 return 0;
3266 }
3267 EXPORT_SYMBOL(hci_recv_frame);
3268
3269 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)3270 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3271 {
3272 /* Mark as diagnostic packet */
3273 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3274
3275 /* Time stamp */
3276 __net_timestamp(skb);
3277
3278 skb_queue_tail(&hdev->rx_q, skb);
3279 queue_work(hdev->workqueue, &hdev->rx_work);
3280
3281 return 0;
3282 }
3283 EXPORT_SYMBOL(hci_recv_diag);
3284
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)3285 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3286 {
3287 va_list vargs;
3288
3289 va_start(vargs, fmt);
3290 kfree_const(hdev->hw_info);
3291 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3292 va_end(vargs);
3293 }
3294 EXPORT_SYMBOL(hci_set_hw_info);
3295
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3296 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3297 {
3298 va_list vargs;
3299
3300 va_start(vargs, fmt);
3301 kfree_const(hdev->fw_info);
3302 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3303 va_end(vargs);
3304 }
3305 EXPORT_SYMBOL(hci_set_fw_info);
3306
3307 /* ---- Interface to upper protocols ---- */
3308
hci_register_cb(struct hci_cb * cb)3309 int hci_register_cb(struct hci_cb *cb)
3310 {
3311 BT_DBG("%p name %s", cb, cb->name);
3312
3313 mutex_lock(&hci_cb_list_lock);
3314 list_add_tail(&cb->list, &hci_cb_list);
3315 mutex_unlock(&hci_cb_list_lock);
3316
3317 return 0;
3318 }
3319 EXPORT_SYMBOL(hci_register_cb);
3320
hci_unregister_cb(struct hci_cb * cb)3321 int hci_unregister_cb(struct hci_cb *cb)
3322 {
3323 BT_DBG("%p name %s", cb, cb->name);
3324
3325 mutex_lock(&hci_cb_list_lock);
3326 list_del(&cb->list);
3327 mutex_unlock(&hci_cb_list_lock);
3328
3329 return 0;
3330 }
3331 EXPORT_SYMBOL(hci_unregister_cb);
3332
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3333 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3334 {
3335 int err;
3336
3337 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3338 skb->len);
3339
3340 /* Time stamp */
3341 __net_timestamp(skb);
3342
3343 /* Send copy to monitor */
3344 hci_send_to_monitor(hdev, skb);
3345
3346 if (atomic_read(&hdev->promisc)) {
3347 /* Send copy to the sockets */
3348 hci_send_to_sock(hdev, skb);
3349 }
3350
3351 /* Get rid of skb owner, prior to sending to the driver. */
3352 skb_orphan(skb);
3353
3354 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3355 kfree_skb(skb);
3356 return;
3357 }
3358
3359 err = hdev->send(hdev, skb);
3360 if (err < 0) {
3361 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3362 kfree_skb(skb);
3363 }
3364 }
3365
3366 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3367 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3368 const void *param)
3369 {
3370 struct sk_buff *skb;
3371
3372 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3373
3374 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3375 if (!skb) {
3376 BT_ERR("%s no memory for command", hdev->name);
3377 return -ENOMEM;
3378 }
3379
3380 /* Stand-alone HCI commands must be flagged as
3381 * single-command requests.
3382 */
3383 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3384
3385 skb_queue_tail(&hdev->cmd_q, skb);
3386 queue_work(hdev->workqueue, &hdev->cmd_work);
3387
3388 return 0;
3389 }
3390
3391 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3392 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3393 {
3394 struct hci_command_hdr *hdr;
3395
3396 if (!hdev->sent_cmd)
3397 return NULL;
3398
3399 hdr = (void *) hdev->sent_cmd->data;
3400
3401 if (hdr->opcode != cpu_to_le16(opcode))
3402 return NULL;
3403
3404 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3405
3406 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3407 }
3408
3409 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)3410 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3411 const void *param, u32 timeout)
3412 {
3413 struct sk_buff *skb;
3414
3415 if (!test_bit(HCI_UP, &hdev->flags))
3416 return ERR_PTR(-ENETDOWN);
3417
3418 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3419
3420 hci_req_sync_lock(hdev);
3421 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3422 hci_req_sync_unlock(hdev);
3423
3424 return skb;
3425 }
3426 EXPORT_SYMBOL(hci_cmd_sync);
3427
3428 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3429 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3430 {
3431 struct hci_acl_hdr *hdr;
3432 int len = skb->len;
3433
3434 skb_push(skb, HCI_ACL_HDR_SIZE);
3435 skb_reset_transport_header(skb);
3436 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3437 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3438 hdr->dlen = cpu_to_le16(len);
3439 }
3440
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3441 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3442 struct sk_buff *skb, __u16 flags)
3443 {
3444 struct hci_conn *conn = chan->conn;
3445 struct hci_dev *hdev = conn->hdev;
3446 struct sk_buff *list;
3447
3448 skb->len = skb_headlen(skb);
3449 skb->data_len = 0;
3450
3451 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3452
3453 switch (hdev->dev_type) {
3454 case HCI_PRIMARY:
3455 hci_add_acl_hdr(skb, conn->handle, flags);
3456 break;
3457 case HCI_AMP:
3458 hci_add_acl_hdr(skb, chan->handle, flags);
3459 break;
3460 default:
3461 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3462 return;
3463 }
3464
3465 list = skb_shinfo(skb)->frag_list;
3466 if (!list) {
3467 /* Non fragmented */
3468 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3469
3470 skb_queue_tail(queue, skb);
3471 } else {
3472 /* Fragmented */
3473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3474
3475 skb_shinfo(skb)->frag_list = NULL;
3476
3477 /* Queue all fragments atomically. We need to use spin_lock_bh
3478 * here because of 6LoWPAN links, as there this function is
3479 * called from softirq and using normal spin lock could cause
3480 * deadlocks.
3481 */
3482 spin_lock_bh(&queue->lock);
3483
3484 __skb_queue_tail(queue, skb);
3485
3486 flags &= ~ACL_START;
3487 flags |= ACL_CONT;
3488 do {
3489 skb = list; list = list->next;
3490
3491 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3492 hci_add_acl_hdr(skb, conn->handle, flags);
3493
3494 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3495
3496 __skb_queue_tail(queue, skb);
3497 } while (list);
3498
3499 spin_unlock_bh(&queue->lock);
3500 }
3501 }
3502
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3503 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3504 {
3505 struct hci_dev *hdev = chan->conn->hdev;
3506
3507 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3508
3509 hci_queue_acl(chan, &chan->data_q, skb, flags);
3510
3511 queue_work(hdev->workqueue, &hdev->tx_work);
3512 }
3513
3514 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3515 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3516 {
3517 struct hci_dev *hdev = conn->hdev;
3518 struct hci_sco_hdr hdr;
3519
3520 BT_DBG("%s len %d", hdev->name, skb->len);
3521
3522 hdr.handle = cpu_to_le16(conn->handle);
3523 hdr.dlen = skb->len;
3524
3525 skb_push(skb, HCI_SCO_HDR_SIZE);
3526 skb_reset_transport_header(skb);
3527 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3528
3529 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3530
3531 skb_queue_tail(&conn->data_q, skb);
3532 queue_work(hdev->workqueue, &hdev->tx_work);
3533 }
3534
3535 /* ---- HCI TX task (outgoing data) ---- */
3536
3537 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3538 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3539 int *quote)
3540 {
3541 struct hci_conn_hash *h = &hdev->conn_hash;
3542 struct hci_conn *conn = NULL, *c;
3543 unsigned int num = 0, min = ~0;
3544
3545 /* We don't have to lock device here. Connections are always
3546 * added and removed with TX task disabled. */
3547
3548 rcu_read_lock();
3549
3550 list_for_each_entry_rcu(c, &h->list, list) {
3551 if (c->type != type || skb_queue_empty(&c->data_q))
3552 continue;
3553
3554 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3555 continue;
3556
3557 num++;
3558
3559 if (c->sent < min) {
3560 min = c->sent;
3561 conn = c;
3562 }
3563
3564 if (hci_conn_num(hdev, type) == num)
3565 break;
3566 }
3567
3568 rcu_read_unlock();
3569
3570 if (conn) {
3571 int cnt, q;
3572
3573 switch (conn->type) {
3574 case ACL_LINK:
3575 cnt = hdev->acl_cnt;
3576 break;
3577 case SCO_LINK:
3578 case ESCO_LINK:
3579 cnt = hdev->sco_cnt;
3580 break;
3581 case LE_LINK:
3582 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3583 break;
3584 default:
3585 cnt = 0;
3586 BT_ERR("Unknown link type");
3587 }
3588
3589 q = cnt / num;
3590 *quote = q ? q : 1;
3591 } else
3592 *quote = 0;
3593
3594 BT_DBG("conn %p quote %d", conn, *quote);
3595 return conn;
3596 }
3597
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3598 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3599 {
3600 struct hci_conn_hash *h = &hdev->conn_hash;
3601 struct hci_conn *c;
3602
3603 BT_ERR("%s link tx timeout", hdev->name);
3604
3605 rcu_read_lock();
3606
3607 /* Kill stalled connections */
3608 list_for_each_entry_rcu(c, &h->list, list) {
3609 if (c->type == type && c->sent) {
3610 BT_ERR("%s killing stalled connection %pMR",
3611 hdev->name, &c->dst);
3612 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3613 }
3614 }
3615
3616 rcu_read_unlock();
3617 }
3618
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3619 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3620 int *quote)
3621 {
3622 struct hci_conn_hash *h = &hdev->conn_hash;
3623 struct hci_chan *chan = NULL;
3624 unsigned int num = 0, min = ~0, cur_prio = 0;
3625 struct hci_conn *conn;
3626 int cnt, q, conn_num = 0;
3627
3628 BT_DBG("%s", hdev->name);
3629
3630 rcu_read_lock();
3631
3632 list_for_each_entry_rcu(conn, &h->list, list) {
3633 struct hci_chan *tmp;
3634
3635 if (conn->type != type)
3636 continue;
3637
3638 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3639 continue;
3640
3641 conn_num++;
3642
3643 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3644 struct sk_buff *skb;
3645
3646 if (skb_queue_empty(&tmp->data_q))
3647 continue;
3648
3649 skb = skb_peek(&tmp->data_q);
3650 if (skb->priority < cur_prio)
3651 continue;
3652
3653 if (skb->priority > cur_prio) {
3654 num = 0;
3655 min = ~0;
3656 cur_prio = skb->priority;
3657 }
3658
3659 num++;
3660
3661 if (conn->sent < min) {
3662 min = conn->sent;
3663 chan = tmp;
3664 }
3665 }
3666
3667 if (hci_conn_num(hdev, type) == conn_num)
3668 break;
3669 }
3670
3671 rcu_read_unlock();
3672
3673 if (!chan)
3674 return NULL;
3675
3676 switch (chan->conn->type) {
3677 case ACL_LINK:
3678 cnt = hdev->acl_cnt;
3679 break;
3680 case AMP_LINK:
3681 cnt = hdev->block_cnt;
3682 break;
3683 case SCO_LINK:
3684 case ESCO_LINK:
3685 cnt = hdev->sco_cnt;
3686 break;
3687 case LE_LINK:
3688 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3689 break;
3690 default:
3691 cnt = 0;
3692 BT_ERR("Unknown link type");
3693 }
3694
3695 q = cnt / num;
3696 *quote = q ? q : 1;
3697 BT_DBG("chan %p quote %d", chan, *quote);
3698 return chan;
3699 }
3700
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3701 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3702 {
3703 struct hci_conn_hash *h = &hdev->conn_hash;
3704 struct hci_conn *conn;
3705 int num = 0;
3706
3707 BT_DBG("%s", hdev->name);
3708
3709 rcu_read_lock();
3710
3711 list_for_each_entry_rcu(conn, &h->list, list) {
3712 struct hci_chan *chan;
3713
3714 if (conn->type != type)
3715 continue;
3716
3717 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3718 continue;
3719
3720 num++;
3721
3722 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3723 struct sk_buff *skb;
3724
3725 if (chan->sent) {
3726 chan->sent = 0;
3727 continue;
3728 }
3729
3730 if (skb_queue_empty(&chan->data_q))
3731 continue;
3732
3733 skb = skb_peek(&chan->data_q);
3734 if (skb->priority >= HCI_PRIO_MAX - 1)
3735 continue;
3736
3737 skb->priority = HCI_PRIO_MAX - 1;
3738
3739 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3740 skb->priority);
3741 }
3742
3743 if (hci_conn_num(hdev, type) == num)
3744 break;
3745 }
3746
3747 rcu_read_unlock();
3748
3749 }
3750
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3751 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3752 {
3753 /* Calculate count of blocks used by this packet */
3754 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3755 }
3756
__check_timeout(struct hci_dev * hdev,unsigned int cnt)3757 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3758 {
3759 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3760 /* ACL tx timeout must be longer than maximum
3761 * link supervision timeout (40.9 seconds) */
3762 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3763 HCI_ACL_TX_TIMEOUT))
3764 hci_link_tx_to(hdev, ACL_LINK);
3765 }
3766 }
3767
hci_sched_acl_pkt(struct hci_dev * hdev)3768 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3769 {
3770 unsigned int cnt = hdev->acl_cnt;
3771 struct hci_chan *chan;
3772 struct sk_buff *skb;
3773 int quote;
3774
3775 __check_timeout(hdev, cnt);
3776
3777 while (hdev->acl_cnt &&
3778 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3779 u32 priority = (skb_peek(&chan->data_q))->priority;
3780 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3781 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3782 skb->len, skb->priority);
3783
3784 /* Stop if priority has changed */
3785 if (skb->priority < priority)
3786 break;
3787
3788 skb = skb_dequeue(&chan->data_q);
3789
3790 hci_conn_enter_active_mode(chan->conn,
3791 bt_cb(skb)->force_active);
3792
3793 hci_send_frame(hdev, skb);
3794 hdev->acl_last_tx = jiffies;
3795
3796 hdev->acl_cnt--;
3797 chan->sent++;
3798 chan->conn->sent++;
3799 }
3800 }
3801
3802 if (cnt != hdev->acl_cnt)
3803 hci_prio_recalculate(hdev, ACL_LINK);
3804 }
3805
hci_sched_acl_blk(struct hci_dev * hdev)3806 static void hci_sched_acl_blk(struct hci_dev *hdev)
3807 {
3808 unsigned int cnt = hdev->block_cnt;
3809 struct hci_chan *chan;
3810 struct sk_buff *skb;
3811 int quote;
3812 u8 type;
3813
3814 __check_timeout(hdev, cnt);
3815
3816 BT_DBG("%s", hdev->name);
3817
3818 if (hdev->dev_type == HCI_AMP)
3819 type = AMP_LINK;
3820 else
3821 type = ACL_LINK;
3822
3823 while (hdev->block_cnt > 0 &&
3824 (chan = hci_chan_sent(hdev, type, "e))) {
3825 u32 priority = (skb_peek(&chan->data_q))->priority;
3826 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3827 int blocks;
3828
3829 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3830 skb->len, skb->priority);
3831
3832 /* Stop if priority has changed */
3833 if (skb->priority < priority)
3834 break;
3835
3836 skb = skb_dequeue(&chan->data_q);
3837
3838 blocks = __get_blocks(hdev, skb);
3839 if (blocks > hdev->block_cnt)
3840 return;
3841
3842 hci_conn_enter_active_mode(chan->conn,
3843 bt_cb(skb)->force_active);
3844
3845 hci_send_frame(hdev, skb);
3846 hdev->acl_last_tx = jiffies;
3847
3848 hdev->block_cnt -= blocks;
3849 quote -= blocks;
3850
3851 chan->sent += blocks;
3852 chan->conn->sent += blocks;
3853 }
3854 }
3855
3856 if (cnt != hdev->block_cnt)
3857 hci_prio_recalculate(hdev, type);
3858 }
3859
hci_sched_acl(struct hci_dev * hdev)3860 static void hci_sched_acl(struct hci_dev *hdev)
3861 {
3862 BT_DBG("%s", hdev->name);
3863
3864 /* No ACL link over BR/EDR controller */
3865 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3866 return;
3867
3868 /* No AMP link over AMP controller */
3869 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3870 return;
3871
3872 switch (hdev->flow_ctl_mode) {
3873 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3874 hci_sched_acl_pkt(hdev);
3875 break;
3876
3877 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3878 hci_sched_acl_blk(hdev);
3879 break;
3880 }
3881 }
3882
3883 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3884 static void hci_sched_sco(struct hci_dev *hdev)
3885 {
3886 struct hci_conn *conn;
3887 struct sk_buff *skb;
3888 int quote;
3889
3890 BT_DBG("%s", hdev->name);
3891
3892 if (!hci_conn_num(hdev, SCO_LINK))
3893 return;
3894
3895 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3896 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3897 BT_DBG("skb %p len %d", skb, skb->len);
3898 hci_send_frame(hdev, skb);
3899
3900 conn->sent++;
3901 if (conn->sent == ~0)
3902 conn->sent = 0;
3903 }
3904 }
3905 }
3906
hci_sched_esco(struct hci_dev * hdev)3907 static void hci_sched_esco(struct hci_dev *hdev)
3908 {
3909 struct hci_conn *conn;
3910 struct sk_buff *skb;
3911 int quote;
3912
3913 BT_DBG("%s", hdev->name);
3914
3915 if (!hci_conn_num(hdev, ESCO_LINK))
3916 return;
3917
3918 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3919 "e))) {
3920 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3921 BT_DBG("skb %p len %d", skb, skb->len);
3922 hci_send_frame(hdev, skb);
3923
3924 conn->sent++;
3925 if (conn->sent == ~0)
3926 conn->sent = 0;
3927 }
3928 }
3929 }
3930
hci_sched_le(struct hci_dev * hdev)3931 static void hci_sched_le(struct hci_dev *hdev)
3932 {
3933 struct hci_chan *chan;
3934 struct sk_buff *skb;
3935 int quote, cnt, tmp;
3936
3937 BT_DBG("%s", hdev->name);
3938
3939 if (!hci_conn_num(hdev, LE_LINK))
3940 return;
3941
3942 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3943 /* LE tx timeout must be longer than maximum
3944 * link supervision timeout (40.9 seconds) */
3945 if (!hdev->le_cnt && hdev->le_pkts &&
3946 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3947 hci_link_tx_to(hdev, LE_LINK);
3948 }
3949
3950 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3951 tmp = cnt;
3952 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3953 u32 priority = (skb_peek(&chan->data_q))->priority;
3954 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3955 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3956 skb->len, skb->priority);
3957
3958 /* Stop if priority has changed */
3959 if (skb->priority < priority)
3960 break;
3961
3962 skb = skb_dequeue(&chan->data_q);
3963
3964 hci_send_frame(hdev, skb);
3965 hdev->le_last_tx = jiffies;
3966
3967 cnt--;
3968 chan->sent++;
3969 chan->conn->sent++;
3970 }
3971 }
3972
3973 if (hdev->le_pkts)
3974 hdev->le_cnt = cnt;
3975 else
3976 hdev->acl_cnt = cnt;
3977
3978 if (cnt != tmp)
3979 hci_prio_recalculate(hdev, LE_LINK);
3980 }
3981
hci_tx_work(struct work_struct * work)3982 static void hci_tx_work(struct work_struct *work)
3983 {
3984 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3985 struct sk_buff *skb;
3986
3987 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3988 hdev->sco_cnt, hdev->le_cnt);
3989
3990 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3991 /* Schedule queues and send stuff to HCI driver */
3992 hci_sched_acl(hdev);
3993 hci_sched_sco(hdev);
3994 hci_sched_esco(hdev);
3995 hci_sched_le(hdev);
3996 }
3997
3998 /* Send next queued raw (unknown type) packet */
3999 while ((skb = skb_dequeue(&hdev->raw_q)))
4000 hci_send_frame(hdev, skb);
4001 }
4002
4003 /* ----- HCI RX task (incoming data processing) ----- */
4004
4005 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4006 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4007 {
4008 struct hci_acl_hdr *hdr = (void *) skb->data;
4009 struct hci_conn *conn;
4010 __u16 handle, flags;
4011
4012 skb_pull(skb, HCI_ACL_HDR_SIZE);
4013
4014 handle = __le16_to_cpu(hdr->handle);
4015 flags = hci_flags(handle);
4016 handle = hci_handle(handle);
4017
4018 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4019 handle, flags);
4020
4021 hdev->stat.acl_rx++;
4022
4023 hci_dev_lock(hdev);
4024 conn = hci_conn_hash_lookup_handle(hdev, handle);
4025 hci_dev_unlock(hdev);
4026
4027 if (conn) {
4028 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4029
4030 /* Send to upper protocol */
4031 l2cap_recv_acldata(conn, skb, flags);
4032 return;
4033 } else {
4034 BT_ERR("%s ACL packet for unknown connection handle %d",
4035 hdev->name, handle);
4036 }
4037
4038 kfree_skb(skb);
4039 }
4040
4041 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4042 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4043 {
4044 struct hci_sco_hdr *hdr = (void *) skb->data;
4045 struct hci_conn *conn;
4046 __u16 handle;
4047
4048 skb_pull(skb, HCI_SCO_HDR_SIZE);
4049
4050 handle = __le16_to_cpu(hdr->handle);
4051
4052 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4053
4054 hdev->stat.sco_rx++;
4055
4056 hci_dev_lock(hdev);
4057 conn = hci_conn_hash_lookup_handle(hdev, handle);
4058 hci_dev_unlock(hdev);
4059
4060 if (conn) {
4061 /* Send to upper protocol */
4062 sco_recv_scodata(conn, skb);
4063 return;
4064 } else {
4065 BT_ERR("%s SCO packet for unknown connection handle %d",
4066 hdev->name, handle);
4067 }
4068
4069 kfree_skb(skb);
4070 }
4071
hci_req_is_complete(struct hci_dev * hdev)4072 static bool hci_req_is_complete(struct hci_dev *hdev)
4073 {
4074 struct sk_buff *skb;
4075
4076 skb = skb_peek(&hdev->cmd_q);
4077 if (!skb)
4078 return true;
4079
4080 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4081 }
4082
hci_resend_last(struct hci_dev * hdev)4083 static void hci_resend_last(struct hci_dev *hdev)
4084 {
4085 struct hci_command_hdr *sent;
4086 struct sk_buff *skb;
4087 u16 opcode;
4088
4089 if (!hdev->sent_cmd)
4090 return;
4091
4092 sent = (void *) hdev->sent_cmd->data;
4093 opcode = __le16_to_cpu(sent->opcode);
4094 if (opcode == HCI_OP_RESET)
4095 return;
4096
4097 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4098 if (!skb)
4099 return;
4100
4101 skb_queue_head(&hdev->cmd_q, skb);
4102 queue_work(hdev->workqueue, &hdev->cmd_work);
4103 }
4104
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4105 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4106 hci_req_complete_t *req_complete,
4107 hci_req_complete_skb_t *req_complete_skb)
4108 {
4109 struct sk_buff *skb;
4110 unsigned long flags;
4111
4112 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4113
4114 /* If the completed command doesn't match the last one that was
4115 * sent we need to do special handling of it.
4116 */
4117 if (!hci_sent_cmd_data(hdev, opcode)) {
4118 /* Some CSR based controllers generate a spontaneous
4119 * reset complete event during init and any pending
4120 * command will never be completed. In such a case we
4121 * need to resend whatever was the last sent
4122 * command.
4123 */
4124 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4125 hci_resend_last(hdev);
4126
4127 return;
4128 }
4129
4130 /* If the command succeeded and there's still more commands in
4131 * this request the request is not yet complete.
4132 */
4133 if (!status && !hci_req_is_complete(hdev))
4134 return;
4135
4136 /* If this was the last command in a request the complete
4137 * callback would be found in hdev->sent_cmd instead of the
4138 * command queue (hdev->cmd_q).
4139 */
4140 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4141 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4142 return;
4143 }
4144
4145 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4146 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4147 return;
4148 }
4149
4150 /* Remove all pending commands belonging to this request */
4151 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4152 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4153 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4154 __skb_queue_head(&hdev->cmd_q, skb);
4155 break;
4156 }
4157
4158 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4159 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4160 else
4161 *req_complete = bt_cb(skb)->hci.req_complete;
4162 kfree_skb(skb);
4163 }
4164 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4165 }
4166
hci_rx_work(struct work_struct * work)4167 static void hci_rx_work(struct work_struct *work)
4168 {
4169 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4170 struct sk_buff *skb;
4171
4172 BT_DBG("%s", hdev->name);
4173
4174 while ((skb = skb_dequeue(&hdev->rx_q))) {
4175 /* Send copy to monitor */
4176 hci_send_to_monitor(hdev, skb);
4177
4178 if (atomic_read(&hdev->promisc)) {
4179 /* Send copy to the sockets */
4180 hci_send_to_sock(hdev, skb);
4181 }
4182
4183 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4184 kfree_skb(skb);
4185 continue;
4186 }
4187
4188 if (test_bit(HCI_INIT, &hdev->flags)) {
4189 /* Don't process data packets in this states. */
4190 switch (hci_skb_pkt_type(skb)) {
4191 case HCI_ACLDATA_PKT:
4192 case HCI_SCODATA_PKT:
4193 kfree_skb(skb);
4194 continue;
4195 }
4196 }
4197
4198 /* Process frame */
4199 switch (hci_skb_pkt_type(skb)) {
4200 case HCI_EVENT_PKT:
4201 BT_DBG("%s Event packet", hdev->name);
4202 hci_event_packet(hdev, skb);
4203 break;
4204
4205 case HCI_ACLDATA_PKT:
4206 BT_DBG("%s ACL data packet", hdev->name);
4207 hci_acldata_packet(hdev, skb);
4208 break;
4209
4210 case HCI_SCODATA_PKT:
4211 BT_DBG("%s SCO data packet", hdev->name);
4212 hci_scodata_packet(hdev, skb);
4213 break;
4214
4215 default:
4216 kfree_skb(skb);
4217 break;
4218 }
4219 }
4220 }
4221
hci_cmd_work(struct work_struct * work)4222 static void hci_cmd_work(struct work_struct *work)
4223 {
4224 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4225 struct sk_buff *skb;
4226
4227 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4228 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4229
4230 /* Send queued commands */
4231 if (atomic_read(&hdev->cmd_cnt)) {
4232 skb = skb_dequeue(&hdev->cmd_q);
4233 if (!skb)
4234 return;
4235
4236 kfree_skb(hdev->sent_cmd);
4237
4238 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4239 if (hdev->sent_cmd) {
4240 atomic_dec(&hdev->cmd_cnt);
4241 hci_send_frame(hdev, skb);
4242 if (test_bit(HCI_RESET, &hdev->flags))
4243 cancel_delayed_work(&hdev->cmd_timer);
4244 else
4245 schedule_delayed_work(&hdev->cmd_timer,
4246 HCI_CMD_TIMEOUT);
4247 } else {
4248 skb_queue_head(&hdev->cmd_q, skb);
4249 queue_work(hdev->workqueue, &hdev->cmd_work);
4250 }
4251 }
4252 }
4253