1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63 /* ---- HCI debugfs entries ---- */
64
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67 {
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 bool enable;
83 int err;
84
85 if (!test_bit(HCI_UP, &hdev->flags))
86 return -ENETDOWN;
87
88 err = kstrtobool_from_user(user_buf, count, &enable);
89 if (err)
90 return err;
91
92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93 return -EALREADY;
94
95 hci_req_sync_lock(hdev);
96 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
102 hci_req_sync_unlock(hdev);
103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
107 kfree_skb(skb);
108
109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111 return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119 };
120
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123 {
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135 {
136 struct hci_dev *hdev = file->private_data;
137 bool enable;
138 int err;
139
140 err = kstrtobool_from_user(user_buf, count, &enable);
141 if (err)
142 return err;
143
144 /* When the diagnostic flags are not persistent and the transport
145 * is not active or in user channel operation, then there is no need
146 * for the vendor callback. Instead just store the desired value and
147 * the setting will be programmed when the controller gets powered on.
148 */
149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150 (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152 goto done;
153
154 hci_req_sync_lock(hdev);
155 err = hdev->set_diag(hdev, enable);
156 hci_req_sync_unlock(hdev);
157
158 if (err < 0)
159 return err;
160
161 done:
162 if (enable)
163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164 else
165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167 return count;
168 }
169
170 static const struct file_operations vendor_diag_fops = {
171 .open = simple_open,
172 .read = vendor_diag_read,
173 .write = vendor_diag_write,
174 .llseek = default_llseek,
175 };
176
hci_debugfs_create_basic(struct hci_dev * hdev)177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 &dut_mode_fops);
181
182 if (hdev->set_diag)
183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 &vendor_diag_fops);
185 }
186
hci_reset_req(struct hci_request * req,unsigned long opt)187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189 BT_DBG("%s %ld", req->hdev->name, opt);
190
191 /* Reset device */
192 set_bit(HCI_RESET, &req->hdev->flags);
193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
194 return 0;
195 }
196
bredr_init(struct hci_request * req)197 static void bredr_init(struct hci_request *req)
198 {
199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201 /* Read Local Supported Features */
202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204 /* Read Local Version */
205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207 /* Read BD Address */
208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210
amp_init1(struct hci_request * req)211 static void amp_init1(struct hci_request *req)
212 {
213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215 /* Read Local Version */
216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218 /* Read Local Supported Commands */
219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221 /* Read Local AMP Info */
222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224 /* Read Data Blk size */
225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227 /* Read Flow Control Mode */
228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230 /* Read Location Data */
231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233
amp_init2(struct hci_request * req)234 static int amp_init2(struct hci_request *req)
235 {
236 /* Read Local Supported Features. Not all AMP controllers
237 * support this so it's placed conditionally in the second
238 * stage init.
239 */
240 if (req->hdev->commands[14] & 0x20)
241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243 return 0;
244 }
245
hci_init1_req(struct hci_request * req,unsigned long opt)246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248 struct hci_dev *hdev = req->hdev;
249
250 BT_DBG("%s %ld", hdev->name, opt);
251
252 /* Reset */
253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254 hci_reset_req(req, 0);
255
256 switch (hdev->dev_type) {
257 case HCI_PRIMARY:
258 bredr_init(req);
259 break;
260 case HCI_AMP:
261 amp_init1(req);
262 break;
263 default:
264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265 break;
266 }
267
268 return 0;
269 }
270
bredr_setup(struct hci_request * req)271 static void bredr_setup(struct hci_request *req)
272 {
273 __le16 param;
274 __u8 flt_type;
275
276 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279 /* Read Class of Device */
280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282 /* Read Local Name */
283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285 /* Read Voice Setting */
286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288 /* Read Number of Supported IAC */
289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291 /* Read Current IAC LAP */
292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294 /* Clear Event Filters */
295 flt_type = HCI_FLT_CLEAR_ALL;
296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298 /* Connection accept timeout ~20 secs */
299 param = cpu_to_le16(0x7d00);
300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
301 }
302
le_setup(struct hci_request * req)303 static void le_setup(struct hci_request *req)
304 {
305 struct hci_dev *hdev = req->hdev;
306
307 /* Read LE Buffer Size */
308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310 /* Read LE Local Supported Features */
311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313 /* Read LE Supported States */
314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316 /* LE-only controllers have LE implicitly enabled */
317 if (!lmp_bredr_capable(hdev))
318 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320
hci_setup_event_mask(struct hci_request * req)321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323 struct hci_dev *hdev = req->hdev;
324
325 /* The second byte is 0xff instead of 0x9f (two reserved bits
326 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327 * command otherwise.
328 */
329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332 * any event mask for pre 1.2 devices.
333 */
334 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335 return;
336
337 if (lmp_bredr_capable(hdev)) {
338 events[4] |= 0x01; /* Flow Specification Complete */
339 } else {
340 /* Use a different default for LE-only devices */
341 memset(events, 0, sizeof(events));
342 events[1] |= 0x20; /* Command Complete */
343 events[1] |= 0x40; /* Command Status */
344 events[1] |= 0x80; /* Hardware Error */
345
346 /* If the controller supports the Disconnect command, enable
347 * the corresponding event. In addition enable packet flow
348 * control related events.
349 */
350 if (hdev->commands[0] & 0x20) {
351 events[0] |= 0x10; /* Disconnection Complete */
352 events[2] |= 0x04; /* Number of Completed Packets */
353 events[3] |= 0x02; /* Data Buffer Overflow */
354 }
355
356 /* If the controller supports the Read Remote Version
357 * Information command, enable the corresponding event.
358 */
359 if (hdev->commands[2] & 0x80)
360 events[1] |= 0x08; /* Read Remote Version Information
361 * Complete
362 */
363
364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 events[0] |= 0x80; /* Encryption Change */
366 events[5] |= 0x80; /* Encryption Key Refresh Complete */
367 }
368 }
369
370 if (lmp_inq_rssi_capable(hdev) ||
371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
374 if (lmp_ext_feat_capable(hdev))
375 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377 if (lmp_esco_capable(hdev)) {
378 events[5] |= 0x08; /* Synchronous Connection Complete */
379 events[5] |= 0x10; /* Synchronous Connection Changed */
380 }
381
382 if (lmp_sniffsubr_capable(hdev))
383 events[5] |= 0x20; /* Sniff Subrating */
384
385 if (lmp_pause_enc_capable(hdev))
386 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388 if (lmp_ext_inq_capable(hdev))
389 events[5] |= 0x40; /* Extended Inquiry Result */
390
391 if (lmp_no_flush_capable(hdev))
392 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394 if (lmp_lsto_capable(hdev))
395 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397 if (lmp_ssp_capable(hdev)) {
398 events[6] |= 0x01; /* IO Capability Request */
399 events[6] |= 0x02; /* IO Capability Response */
400 events[6] |= 0x04; /* User Confirmation Request */
401 events[6] |= 0x08; /* User Passkey Request */
402 events[6] |= 0x10; /* Remote OOB Data Request */
403 events[6] |= 0x20; /* Simple Pairing Complete */
404 events[7] |= 0x04; /* User Passkey Notification */
405 events[7] |= 0x08; /* Keypress Notification */
406 events[7] |= 0x10; /* Remote Host Supported
407 * Features Notification
408 */
409 }
410
411 if (lmp_le_capable(hdev))
412 events[7] |= 0x20; /* LE Meta-Event */
413
414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416
hci_init2_req(struct hci_request * req,unsigned long opt)417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419 struct hci_dev *hdev = req->hdev;
420
421 if (hdev->dev_type == HCI_AMP)
422 return amp_init2(req);
423
424 if (lmp_bredr_capable(hdev))
425 bredr_setup(req);
426 else
427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429 if (lmp_le_capable(hdev))
430 le_setup(req);
431
432 /* All Bluetooth 1.2 and later controllers should support the
433 * HCI command for reading the local supported commands.
434 *
435 * Unfortunately some controllers indicate Bluetooth 1.2 support,
436 * but do not have support for this command. If that is the case,
437 * the driver can quirk the behavior and skip reading the local
438 * supported commands.
439 */
440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444 if (lmp_ssp_capable(hdev)) {
445 /* When SSP is available, then the host features page
446 * should also be available as well. However some
447 * controllers list the max_page as 0 as long as SSP
448 * has not been enabled. To achieve proper debugging
449 * output, force the minimum max_page to 1 at least.
450 */
451 hdev->max_page = 0x01;
452
453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454 u8 mode = 0x01;
455
456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 sizeof(mode), &mode);
458 } else {
459 struct hci_cp_write_eir cp;
460
461 memset(hdev->eir, 0, sizeof(hdev->eir));
462 memset(&cp, 0, sizeof(cp));
463
464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465 }
466 }
467
468 if (lmp_inq_rssi_capable(hdev) ||
469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470 u8 mode;
471
472 /* If Extended Inquiry Result events are supported, then
473 * they are clearly preferred over Inquiry Result with RSSI
474 * events.
475 */
476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479 }
480
481 if (lmp_inq_tx_pwr_capable(hdev))
482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484 if (lmp_ext_feat_capable(hdev)) {
485 struct hci_cp_read_local_ext_features cp;
486
487 cp.page = 0x01;
488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 sizeof(cp), &cp);
490 }
491
492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493 u8 enable = 1;
494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495 &enable);
496 }
497
498 return 0;
499 }
500
hci_setup_link_policy(struct hci_request * req)501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503 struct hci_dev *hdev = req->hdev;
504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519
hci_set_le_support(struct hci_request * req)520 static void hci_set_le_support(struct hci_request *req)
521 {
522 struct hci_dev *hdev = req->hdev;
523 struct hci_cp_write_le_host_supported cp;
524
525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
527 return;
528
529 memset(&cp, 0, sizeof(cp));
530
531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532 cp.le = 0x01;
533 cp.simul = 0x00;
534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
539 }
540
hci_set_event_mask_page_2(struct hci_request * req)541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545 bool changed = false;
546
547 /* If Connectionless Slave Broadcast master role is supported
548 * enable all necessary events for it.
549 */
550 if (lmp_csb_master_capable(hdev)) {
551 events[1] |= 0x40; /* Triggered Clock Capture */
552 events[1] |= 0x80; /* Synchronization Train Complete */
553 events[2] |= 0x10; /* Slave Page Response Timeout */
554 events[2] |= 0x20; /* CSB Channel Map Change */
555 changed = true;
556 }
557
558 /* If Connectionless Slave Broadcast slave role is supported
559 * enable all necessary events for it.
560 */
561 if (lmp_csb_slave_capable(hdev)) {
562 events[2] |= 0x01; /* Synchronization Train Received */
563 events[2] |= 0x02; /* CSB Receive */
564 events[2] |= 0x04; /* CSB Timeout */
565 events[2] |= 0x08; /* Truncated Page Complete */
566 changed = true;
567 }
568
569 /* Enable Authenticated Payload Timeout Expired event if supported */
570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571 events[2] |= 0x80;
572 changed = true;
573 }
574
575 /* Some Broadcom based controllers indicate support for Set Event
576 * Mask Page 2 command, but then actually do not support it. Since
577 * the default value is all bits set to zero, the command is only
578 * required if the event mask has to be changed. In case no change
579 * to the event mask is needed, skip this command.
580 */
581 if (changed)
582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 sizeof(events), events);
584 }
585
hci_init3_req(struct hci_request * req,unsigned long opt)586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588 struct hci_dev *hdev = req->hdev;
589 u8 p;
590
591 hci_setup_event_mask(req);
592
593 if (hdev->commands[6] & 0x20 &&
594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595 struct hci_cp_read_stored_link_key cp;
596
597 bacpy(&cp.bdaddr, BDADDR_ANY);
598 cp.read_all = 0x01;
599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600 }
601
602 if (hdev->commands[5] & 0x10)
603 hci_setup_link_policy(req);
604
605 if (hdev->commands[8] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608 if (hdev->commands[18] & 0x04 &&
609 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
615 */
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
626
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
629 */
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
632 * Parameter Request
633 */
634
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
637 */
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
640
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
643 */
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
646 * Complete
647 */
648
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
651 */
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
654 * Report
655 */
656
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
659 */
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
662 * Algorithm
663 */
664
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
667 */
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
670
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
676
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
679 */
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
682 * Complete
683 */
684
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
687 */
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
690 * Features Complete
691 */
692
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
695 */
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
699 */
700
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
703 */
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
706
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
709 */
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
712
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
716 */
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
719 * Report
720 */
721
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
724 */
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
727 * Terminated
728 */
729
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
740 */
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
744 if (hdev->commands[26] & 0x40) {
745 /* Read LE Accept List Size */
746 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
747 0, NULL);
748 }
749
750 if (hdev->commands[26] & 0x80) {
751 /* Clear LE Accept List */
752 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
753 }
754
755 if (hdev->commands[34] & 0x40) {
756 /* Read LE Resolving List Size */
757 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758 0, NULL);
759 }
760
761 if (hdev->commands[34] & 0x20) {
762 /* Clear LE Resolving List */
763 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764 }
765
766 if (hdev->commands[35] & 0x04) {
767 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
768
769 /* Set RPA timeout */
770 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
771 &rpa_timeout);
772 }
773
774 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
775 /* Read LE Maximum Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
777
778 /* Read LE Suggested Default Data Length */
779 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
780 }
781
782 if (ext_adv_capable(hdev)) {
783 /* Read LE Number of Supported Advertising Sets */
784 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
785 0, NULL);
786 }
787
788 hci_set_le_support(req);
789 }
790
791 /* Read features beyond page 1 if available */
792 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793 struct hci_cp_read_local_ext_features cp;
794
795 cp.page = p;
796 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797 sizeof(cp), &cp);
798 }
799
800 return 0;
801 }
802
hci_init4_req(struct hci_request * req,unsigned long opt)803 static int hci_init4_req(struct hci_request *req, unsigned long opt)
804 {
805 struct hci_dev *hdev = req->hdev;
806
807 /* Some Broadcom based Bluetooth controllers do not support the
808 * Delete Stored Link Key command. They are clearly indicating its
809 * absence in the bit mask of supported commands.
810 *
811 * Check the supported commands and only if the command is marked
812 * as supported send it. If not supported assume that the controller
813 * does not have actual support for stored link keys which makes this
814 * command redundant anyway.
815 *
816 * Some controllers indicate that they support handling deleting
817 * stored link keys, but they don't. The quirk lets a driver
818 * just disable this command.
819 */
820 if (hdev->commands[6] & 0x80 &&
821 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
822 struct hci_cp_delete_stored_link_key cp;
823
824 bacpy(&cp.bdaddr, BDADDR_ANY);
825 cp.delete_all = 0x01;
826 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
827 sizeof(cp), &cp);
828 }
829
830 /* Set event mask page 2 if the HCI command for it is supported */
831 if (hdev->commands[22] & 0x04)
832 hci_set_event_mask_page_2(req);
833
834 /* Read local codec list if the HCI command is supported */
835 if (hdev->commands[29] & 0x20)
836 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
837
838 /* Read local pairing options if the HCI command is supported */
839 if (hdev->commands[41] & 0x08)
840 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
841
842 /* Get MWS transport configuration if the HCI command is supported */
843 if (hdev->commands[30] & 0x08)
844 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
845
846 /* Check for Synchronization Train support */
847 if (lmp_sync_train_capable(hdev))
848 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
849
850 /* Enable Secure Connections if supported and configured */
851 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
852 bredr_sc_enabled(hdev)) {
853 u8 support = 0x01;
854
855 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
856 sizeof(support), &support);
857 }
858
859 /* Set erroneous data reporting if supported to the wideband speech
860 * setting value
861 */
862 if (hdev->commands[18] & 0x08 &&
863 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
864 bool enabled = hci_dev_test_flag(hdev,
865 HCI_WIDEBAND_SPEECH_ENABLED);
866
867 if (enabled !=
868 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
869 struct hci_cp_write_def_err_data_reporting cp;
870
871 cp.err_data_reporting = enabled ?
872 ERR_DATA_REPORTING_ENABLED :
873 ERR_DATA_REPORTING_DISABLED;
874
875 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
876 sizeof(cp), &cp);
877 }
878 }
879
880 /* Set Suggested Default Data Length to maximum if supported */
881 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
882 struct hci_cp_le_write_def_data_len cp;
883
884 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
885 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
886 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
887 }
888
889 /* Set Default PHY parameters if command is supported */
890 if (hdev->commands[35] & 0x20) {
891 struct hci_cp_le_set_default_phy cp;
892
893 cp.all_phys = 0x00;
894 cp.tx_phys = hdev->le_tx_def_phys;
895 cp.rx_phys = hdev->le_rx_def_phys;
896
897 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
898 }
899
900 return 0;
901 }
902
__hci_init(struct hci_dev * hdev)903 static int __hci_init(struct hci_dev *hdev)
904 {
905 int err;
906
907 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
908 if (err < 0)
909 return err;
910
911 if (hci_dev_test_flag(hdev, HCI_SETUP))
912 hci_debugfs_create_basic(hdev);
913
914 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
915 if (err < 0)
916 return err;
917
918 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
919 * BR/EDR/LE type controllers. AMP controllers only need the
920 * first two stages of init.
921 */
922 if (hdev->dev_type != HCI_PRIMARY)
923 return 0;
924
925 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
926 if (err < 0)
927 return err;
928
929 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
930 if (err < 0)
931 return err;
932
933 /* This function is only called when the controller is actually in
934 * configured state. When the controller is marked as unconfigured,
935 * this initialization procedure is not run.
936 *
937 * It means that it is possible that a controller runs through its
938 * setup phase and then discovers missing settings. If that is the
939 * case, then this function will not be called. It then will only
940 * be called during the config phase.
941 *
942 * So only when in setup phase or config phase, create the debugfs
943 * entries and register the SMP channels.
944 */
945 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946 !hci_dev_test_flag(hdev, HCI_CONFIG))
947 return 0;
948
949 hci_debugfs_create_common(hdev);
950
951 if (lmp_bredr_capable(hdev))
952 hci_debugfs_create_bredr(hdev);
953
954 if (lmp_le_capable(hdev))
955 hci_debugfs_create_le(hdev);
956
957 return 0;
958 }
959
hci_init0_req(struct hci_request * req,unsigned long opt)960 static int hci_init0_req(struct hci_request *req, unsigned long opt)
961 {
962 struct hci_dev *hdev = req->hdev;
963
964 BT_DBG("%s %ld", hdev->name, opt);
965
966 /* Reset */
967 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
968 hci_reset_req(req, 0);
969
970 /* Read Local Version */
971 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972
973 /* Read BD Address */
974 if (hdev->set_bdaddr)
975 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
976
977 return 0;
978 }
979
__hci_unconf_init(struct hci_dev * hdev)980 static int __hci_unconf_init(struct hci_dev *hdev)
981 {
982 int err;
983
984 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985 return 0;
986
987 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
988 if (err < 0)
989 return err;
990
991 if (hci_dev_test_flag(hdev, HCI_SETUP))
992 hci_debugfs_create_basic(hdev);
993
994 return 0;
995 }
996
hci_scan_req(struct hci_request * req,unsigned long opt)997 static int hci_scan_req(struct hci_request *req, unsigned long opt)
998 {
999 __u8 scan = opt;
1000
1001 BT_DBG("%s %x", req->hdev->name, scan);
1002
1003 /* Inquiry and Page scans */
1004 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1005 return 0;
1006 }
1007
hci_auth_req(struct hci_request * req,unsigned long opt)1008 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1009 {
1010 __u8 auth = opt;
1011
1012 BT_DBG("%s %x", req->hdev->name, auth);
1013
1014 /* Authentication */
1015 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1016 return 0;
1017 }
1018
hci_encrypt_req(struct hci_request * req,unsigned long opt)1019 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1020 {
1021 __u8 encrypt = opt;
1022
1023 BT_DBG("%s %x", req->hdev->name, encrypt);
1024
1025 /* Encryption */
1026 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1027 return 0;
1028 }
1029
hci_linkpol_req(struct hci_request * req,unsigned long opt)1030 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1031 {
1032 __le16 policy = cpu_to_le16(opt);
1033
1034 BT_DBG("%s %x", req->hdev->name, policy);
1035
1036 /* Default link policy */
1037 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1038 return 0;
1039 }
1040
1041 /* Get HCI device by index.
1042 * Device is held on return. */
__hci_dev_get(int index,int * srcu_index)1043 static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
1044 {
1045 struct hci_dev *hdev = NULL, *d;
1046
1047 BT_DBG("%d", index);
1048
1049 if (index < 0)
1050 return NULL;
1051
1052 read_lock(&hci_dev_list_lock);
1053 list_for_each_entry(d, &hci_dev_list, list) {
1054 if (d->id == index) {
1055 hdev = hci_dev_hold(d);
1056 if (srcu_index)
1057 *srcu_index = srcu_read_lock(&d->srcu);
1058 break;
1059 }
1060 }
1061 read_unlock(&hci_dev_list_lock);
1062 return hdev;
1063 }
1064
hci_dev_get(int index)1065 struct hci_dev *hci_dev_get(int index)
1066 {
1067 return __hci_dev_get(index, NULL);
1068 }
1069
hci_dev_get_srcu(int index,int * srcu_index)1070 static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
1071 {
1072 return __hci_dev_get(index, srcu_index);
1073 }
1074
hci_dev_put_srcu(struct hci_dev * hdev,int srcu_index)1075 static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
1076 {
1077 srcu_read_unlock(&hdev->srcu, srcu_index);
1078 hci_dev_put(hdev);
1079 }
1080
1081 /* ---- Inquiry support ---- */
1082
hci_discovery_active(struct hci_dev * hdev)1083 bool hci_discovery_active(struct hci_dev *hdev)
1084 {
1085 struct discovery_state *discov = &hdev->discovery;
1086
1087 switch (discov->state) {
1088 case DISCOVERY_FINDING:
1089 case DISCOVERY_RESOLVING:
1090 return true;
1091
1092 default:
1093 return false;
1094 }
1095 }
1096
hci_discovery_set_state(struct hci_dev * hdev,int state)1097 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1098 {
1099 int old_state = hdev->discovery.state;
1100
1101 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1102
1103 if (old_state == state)
1104 return;
1105
1106 hdev->discovery.state = state;
1107
1108 switch (state) {
1109 case DISCOVERY_STOPPED:
1110 hci_update_background_scan(hdev);
1111
1112 if (old_state != DISCOVERY_STARTING)
1113 mgmt_discovering(hdev, 0);
1114 break;
1115 case DISCOVERY_STARTING:
1116 break;
1117 case DISCOVERY_FINDING:
1118 mgmt_discovering(hdev, 1);
1119 break;
1120 case DISCOVERY_RESOLVING:
1121 break;
1122 case DISCOVERY_STOPPING:
1123 break;
1124 }
1125 }
1126
hci_inquiry_cache_flush(struct hci_dev * hdev)1127 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1128 {
1129 struct discovery_state *cache = &hdev->discovery;
1130 struct inquiry_entry *p, *n;
1131
1132 list_for_each_entry_safe(p, n, &cache->all, all) {
1133 list_del(&p->all);
1134 kfree(p);
1135 }
1136
1137 INIT_LIST_HEAD(&cache->unknown);
1138 INIT_LIST_HEAD(&cache->resolve);
1139 }
1140
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1141 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1143 {
1144 struct discovery_state *cache = &hdev->discovery;
1145 struct inquiry_entry *e;
1146
1147 BT_DBG("cache %p, %pMR", cache, bdaddr);
1148
1149 list_for_each_entry(e, &cache->all, all) {
1150 if (!bacmp(&e->data.bdaddr, bdaddr))
1151 return e;
1152 }
1153
1154 return NULL;
1155 }
1156
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1157 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1158 bdaddr_t *bdaddr)
1159 {
1160 struct discovery_state *cache = &hdev->discovery;
1161 struct inquiry_entry *e;
1162
1163 BT_DBG("cache %p, %pMR", cache, bdaddr);
1164
1165 list_for_each_entry(e, &cache->unknown, list) {
1166 if (!bacmp(&e->data.bdaddr, bdaddr))
1167 return e;
1168 }
1169
1170 return NULL;
1171 }
1172
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1173 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1174 bdaddr_t *bdaddr,
1175 int state)
1176 {
1177 struct discovery_state *cache = &hdev->discovery;
1178 struct inquiry_entry *e;
1179
1180 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1181
1182 list_for_each_entry(e, &cache->resolve, list) {
1183 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1184 return e;
1185 if (!bacmp(&e->data.bdaddr, bdaddr))
1186 return e;
1187 }
1188
1189 return NULL;
1190 }
1191
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1192 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1193 struct inquiry_entry *ie)
1194 {
1195 struct discovery_state *cache = &hdev->discovery;
1196 struct list_head *pos = &cache->resolve;
1197 struct inquiry_entry *p;
1198
1199 list_del(&ie->list);
1200
1201 list_for_each_entry(p, &cache->resolve, list) {
1202 if (p->name_state != NAME_PENDING &&
1203 abs(p->data.rssi) >= abs(ie->data.rssi))
1204 break;
1205 pos = &p->list;
1206 }
1207
1208 list_add(&ie->list, pos);
1209 }
1210
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1211 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1212 bool name_known)
1213 {
1214 struct discovery_state *cache = &hdev->discovery;
1215 struct inquiry_entry *ie;
1216 u32 flags = 0;
1217
1218 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1219
1220 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1221
1222 if (!data->ssp_mode)
1223 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1224
1225 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1226 if (ie) {
1227 if (!ie->data.ssp_mode)
1228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1229
1230 if (ie->name_state == NAME_NEEDED &&
1231 data->rssi != ie->data.rssi) {
1232 ie->data.rssi = data->rssi;
1233 hci_inquiry_cache_update_resolve(hdev, ie);
1234 }
1235
1236 goto update;
1237 }
1238
1239 /* Entry not in the cache. Add new one. */
1240 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1241 if (!ie) {
1242 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1243 goto done;
1244 }
1245
1246 list_add(&ie->all, &cache->all);
1247
1248 if (name_known) {
1249 ie->name_state = NAME_KNOWN;
1250 } else {
1251 ie->name_state = NAME_NOT_KNOWN;
1252 list_add(&ie->list, &cache->unknown);
1253 }
1254
1255 update:
1256 if (name_known && ie->name_state != NAME_KNOWN &&
1257 ie->name_state != NAME_PENDING) {
1258 ie->name_state = NAME_KNOWN;
1259 list_del(&ie->list);
1260 }
1261
1262 memcpy(&ie->data, data, sizeof(*data));
1263 ie->timestamp = jiffies;
1264 cache->timestamp = jiffies;
1265
1266 if (ie->name_state == NAME_NOT_KNOWN)
1267 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1268
1269 done:
1270 return flags;
1271 }
1272
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1273 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1274 {
1275 struct discovery_state *cache = &hdev->discovery;
1276 struct inquiry_info *info = (struct inquiry_info *) buf;
1277 struct inquiry_entry *e;
1278 int copied = 0;
1279
1280 list_for_each_entry(e, &cache->all, all) {
1281 struct inquiry_data *data = &e->data;
1282
1283 if (copied >= num)
1284 break;
1285
1286 bacpy(&info->bdaddr, &data->bdaddr);
1287 info->pscan_rep_mode = data->pscan_rep_mode;
1288 info->pscan_period_mode = data->pscan_period_mode;
1289 info->pscan_mode = data->pscan_mode;
1290 memcpy(info->dev_class, data->dev_class, 3);
1291 info->clock_offset = data->clock_offset;
1292
1293 info++;
1294 copied++;
1295 }
1296
1297 BT_DBG("cache %p, copied %d", cache, copied);
1298 return copied;
1299 }
1300
hci_inq_req(struct hci_request * req,unsigned long opt)1301 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1302 {
1303 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1304 struct hci_dev *hdev = req->hdev;
1305 struct hci_cp_inquiry cp;
1306
1307 BT_DBG("%s", hdev->name);
1308
1309 if (test_bit(HCI_INQUIRY, &hdev->flags))
1310 return 0;
1311
1312 /* Start Inquiry */
1313 memcpy(&cp.lap, &ir->lap, 3);
1314 cp.length = ir->length;
1315 cp.num_rsp = ir->num_rsp;
1316 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1317
1318 return 0;
1319 }
1320
hci_inquiry(void __user * arg)1321 int hci_inquiry(void __user *arg)
1322 {
1323 __u8 __user *ptr = arg;
1324 struct hci_inquiry_req ir;
1325 struct hci_dev *hdev;
1326 int err = 0, do_inquiry = 0, max_rsp;
1327 long timeo;
1328 __u8 *buf;
1329
1330 if (copy_from_user(&ir, ptr, sizeof(ir)))
1331 return -EFAULT;
1332
1333 hdev = hci_dev_get(ir.dev_id);
1334 if (!hdev)
1335 return -ENODEV;
1336
1337 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1338 err = -EBUSY;
1339 goto done;
1340 }
1341
1342 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1343 err = -EOPNOTSUPP;
1344 goto done;
1345 }
1346
1347 if (hdev->dev_type != HCI_PRIMARY) {
1348 err = -EOPNOTSUPP;
1349 goto done;
1350 }
1351
1352 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1353 err = -EOPNOTSUPP;
1354 goto done;
1355 }
1356
1357 /* Restrict maximum inquiry length to 60 seconds */
1358 if (ir.length > 60) {
1359 err = -EINVAL;
1360 goto done;
1361 }
1362
1363 hci_dev_lock(hdev);
1364 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1365 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1366 hci_inquiry_cache_flush(hdev);
1367 do_inquiry = 1;
1368 }
1369 hci_dev_unlock(hdev);
1370
1371 timeo = ir.length * msecs_to_jiffies(2000);
1372
1373 if (do_inquiry) {
1374 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1375 timeo, NULL);
1376 if (err < 0)
1377 goto done;
1378
1379 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1380 * cleared). If it is interrupted by a signal, return -EINTR.
1381 */
1382 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1383 TASK_INTERRUPTIBLE)) {
1384 err = -EINTR;
1385 goto done;
1386 }
1387 }
1388
1389 /* for unlimited number of responses we will use buffer with
1390 * 255 entries
1391 */
1392 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1393
1394 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1395 * copy it to the user space.
1396 */
1397 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1398 if (!buf) {
1399 err = -ENOMEM;
1400 goto done;
1401 }
1402
1403 hci_dev_lock(hdev);
1404 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1405 hci_dev_unlock(hdev);
1406
1407 BT_DBG("num_rsp %d", ir.num_rsp);
1408
1409 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1410 ptr += sizeof(ir);
1411 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1412 ir.num_rsp))
1413 err = -EFAULT;
1414 } else
1415 err = -EFAULT;
1416
1417 kfree(buf);
1418
1419 done:
1420 hci_dev_put(hdev);
1421 return err;
1422 }
1423
1424 /**
1425 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1426 * (BD_ADDR) for a HCI device from
1427 * a firmware node property.
1428 * @hdev: The HCI device
1429 *
1430 * Search the firmware node for 'local-bd-address'.
1431 *
1432 * All-zero BD addresses are rejected, because those could be properties
1433 * that exist in the firmware tables, but were not updated by the firmware. For
1434 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1435 */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)1436 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1437 {
1438 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1439 bdaddr_t ba;
1440 int ret;
1441
1442 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1443 (u8 *)&ba, sizeof(ba));
1444 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1445 return;
1446
1447 bacpy(&hdev->public_addr, &ba);
1448 }
1449
hci_dev_do_open(struct hci_dev * hdev)1450 static int hci_dev_do_open(struct hci_dev *hdev)
1451 {
1452 int ret = 0;
1453
1454 BT_DBG("%s %p", hdev->name, hdev);
1455
1456 hci_req_sync_lock(hdev);
1457
1458 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1459 ret = -ENODEV;
1460 goto done;
1461 }
1462
1463 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1464 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1465 /* Check for rfkill but allow the HCI setup stage to
1466 * proceed (which in itself doesn't cause any RF activity).
1467 */
1468 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1469 ret = -ERFKILL;
1470 goto done;
1471 }
1472
1473 /* Check for valid public address or a configured static
1474 * random adddress, but let the HCI setup proceed to
1475 * be able to determine if there is a public address
1476 * or not.
1477 *
1478 * In case of user channel usage, it is not important
1479 * if a public address or static random address is
1480 * available.
1481 *
1482 * This check is only valid for BR/EDR controllers
1483 * since AMP controllers do not have an address.
1484 */
1485 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1486 hdev->dev_type == HCI_PRIMARY &&
1487 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1488 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1489 ret = -EADDRNOTAVAIL;
1490 goto done;
1491 }
1492 }
1493
1494 if (test_bit(HCI_UP, &hdev->flags)) {
1495 ret = -EALREADY;
1496 goto done;
1497 }
1498
1499 if (hdev->open(hdev)) {
1500 ret = -EIO;
1501 goto done;
1502 }
1503
1504 set_bit(HCI_RUNNING, &hdev->flags);
1505 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1506
1507 atomic_set(&hdev->cmd_cnt, 1);
1508 set_bit(HCI_INIT, &hdev->flags);
1509
1510 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1511 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1512 bool invalid_bdaddr;
1513
1514 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1515
1516 if (hdev->setup)
1517 ret = hdev->setup(hdev);
1518
1519 /* The transport driver can set the quirk to mark the
1520 * BD_ADDR invalid before creating the HCI device or in
1521 * its setup callback.
1522 */
1523 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1524 &hdev->quirks);
1525
1526 if (ret)
1527 goto setup_failed;
1528
1529 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1530 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1531 hci_dev_get_bd_addr_from_property(hdev);
1532
1533 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1534 hdev->set_bdaddr) {
1535 ret = hdev->set_bdaddr(hdev,
1536 &hdev->public_addr);
1537
1538 /* If setting of the BD_ADDR from the device
1539 * property succeeds, then treat the address
1540 * as valid even if the invalid BD_ADDR
1541 * quirk indicates otherwise.
1542 */
1543 if (!ret)
1544 invalid_bdaddr = false;
1545 }
1546 }
1547
1548 setup_failed:
1549 /* The transport driver can set these quirks before
1550 * creating the HCI device or in its setup callback.
1551 *
1552 * For the invalid BD_ADDR quirk it is possible that
1553 * it becomes a valid address if the bootloader does
1554 * provide it (see above).
1555 *
1556 * In case any of them is set, the controller has to
1557 * start up as unconfigured.
1558 */
1559 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1560 invalid_bdaddr)
1561 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1562
1563 /* For an unconfigured controller it is required to
1564 * read at least the version information provided by
1565 * the Read Local Version Information command.
1566 *
1567 * If the set_bdaddr driver callback is provided, then
1568 * also the original Bluetooth public device address
1569 * will be read using the Read BD Address command.
1570 */
1571 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1572 ret = __hci_unconf_init(hdev);
1573 }
1574
1575 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1576 /* If public address change is configured, ensure that
1577 * the address gets programmed. If the driver does not
1578 * support changing the public address, fail the power
1579 * on procedure.
1580 */
1581 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1582 hdev->set_bdaddr)
1583 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1584 else
1585 ret = -EADDRNOTAVAIL;
1586 }
1587
1588 if (!ret) {
1589 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1590 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1591 ret = __hci_init(hdev);
1592 if (!ret && hdev->post_init)
1593 ret = hdev->post_init(hdev);
1594 }
1595 }
1596
1597 /* If the HCI Reset command is clearing all diagnostic settings,
1598 * then they need to be reprogrammed after the init procedure
1599 * completed.
1600 */
1601 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1602 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1604 ret = hdev->set_diag(hdev, true);
1605
1606 msft_do_open(hdev);
1607
1608 clear_bit(HCI_INIT, &hdev->flags);
1609
1610 if (!ret) {
1611 hci_dev_hold(hdev);
1612 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1613 hci_adv_instances_set_rpa_expired(hdev, true);
1614 set_bit(HCI_UP, &hdev->flags);
1615 hci_sock_dev_event(hdev, HCI_DEV_UP);
1616 hci_leds_update_powered(hdev, true);
1617 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1618 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1619 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1620 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1621 hci_dev_test_flag(hdev, HCI_MGMT) &&
1622 hdev->dev_type == HCI_PRIMARY) {
1623 ret = __hci_req_hci_power_on(hdev);
1624 mgmt_power_on(hdev, ret);
1625 }
1626 } else {
1627 /* Init failed, cleanup */
1628 flush_work(&hdev->tx_work);
1629
1630 /* Since hci_rx_work() is possible to awake new cmd_work
1631 * it should be flushed first to avoid unexpected call of
1632 * hci_cmd_work()
1633 */
1634 flush_work(&hdev->rx_work);
1635 flush_work(&hdev->cmd_work);
1636
1637 skb_queue_purge(&hdev->cmd_q);
1638 skb_queue_purge(&hdev->rx_q);
1639
1640 if (hdev->flush)
1641 hdev->flush(hdev);
1642
1643 if (hdev->sent_cmd) {
1644 cancel_delayed_work_sync(&hdev->cmd_timer);
1645 kfree_skb(hdev->sent_cmd);
1646 hdev->sent_cmd = NULL;
1647 }
1648
1649 clear_bit(HCI_RUNNING, &hdev->flags);
1650 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1651
1652 hdev->close(hdev);
1653 hdev->flags &= BIT(HCI_RAW);
1654 }
1655
1656 done:
1657 hci_req_sync_unlock(hdev);
1658 return ret;
1659 }
1660
1661 /* ---- HCI ioctl helpers ---- */
1662
hci_dev_open(__u16 dev)1663 int hci_dev_open(__u16 dev)
1664 {
1665 struct hci_dev *hdev;
1666 int err;
1667
1668 hdev = hci_dev_get(dev);
1669 if (!hdev)
1670 return -ENODEV;
1671
1672 /* Devices that are marked as unconfigured can only be powered
1673 * up as user channel. Trying to bring them up as normal devices
1674 * will result into a failure. Only user channel operation is
1675 * possible.
1676 *
1677 * When this function is called for a user channel, the flag
1678 * HCI_USER_CHANNEL will be set first before attempting to
1679 * open the device.
1680 */
1681 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1682 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1683 err = -EOPNOTSUPP;
1684 goto done;
1685 }
1686
1687 /* We need to ensure that no other power on/off work is pending
1688 * before proceeding to call hci_dev_do_open. This is
1689 * particularly important if the setup procedure has not yet
1690 * completed.
1691 */
1692 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1693 cancel_delayed_work(&hdev->power_off);
1694
1695 /* After this call it is guaranteed that the setup procedure
1696 * has finished. This means that error conditions like RFKILL
1697 * or no valid public or static random address apply.
1698 */
1699 flush_workqueue(hdev->req_workqueue);
1700
1701 /* For controllers not using the management interface and that
1702 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1703 * so that pairing works for them. Once the management interface
1704 * is in use this bit will be cleared again and userspace has
1705 * to explicitly enable it.
1706 */
1707 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1708 !hci_dev_test_flag(hdev, HCI_MGMT))
1709 hci_dev_set_flag(hdev, HCI_BONDABLE);
1710
1711 err = hci_dev_do_open(hdev);
1712
1713 done:
1714 hci_dev_put(hdev);
1715 return err;
1716 }
1717
1718 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1719 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1720 {
1721 struct hci_conn_params *p;
1722
1723 list_for_each_entry(p, &hdev->le_conn_params, list) {
1724 if (p->conn) {
1725 hci_conn_drop(p->conn);
1726 hci_conn_put(p->conn);
1727 p->conn = NULL;
1728 }
1729 list_del_init(&p->action);
1730 }
1731
1732 BT_DBG("All LE pending actions cleared");
1733 }
1734
hci_dev_do_close(struct hci_dev * hdev)1735 int hci_dev_do_close(struct hci_dev *hdev)
1736 {
1737 bool auto_off;
1738
1739 BT_DBG("%s %p", hdev->name, hdev);
1740
1741 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1742 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1743 test_bit(HCI_UP, &hdev->flags)) {
1744 /* Execute vendor specific shutdown routine */
1745 if (hdev->shutdown)
1746 hdev->shutdown(hdev);
1747 }
1748
1749 cancel_delayed_work(&hdev->power_off);
1750
1751 hci_request_cancel_all(hdev);
1752 hci_req_sync_lock(hdev);
1753
1754 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1755 cancel_delayed_work_sync(&hdev->cmd_timer);
1756 hci_req_sync_unlock(hdev);
1757 return 0;
1758 }
1759
1760 hci_leds_update_powered(hdev, false);
1761
1762 /* Flush RX and TX works */
1763 flush_work(&hdev->tx_work);
1764 flush_work(&hdev->rx_work);
1765
1766 if (hdev->discov_timeout > 0) {
1767 hdev->discov_timeout = 0;
1768 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1769 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1770 }
1771
1772 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1773 cancel_delayed_work(&hdev->service_cache);
1774
1775 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1776 struct adv_info *adv_instance;
1777
1778 cancel_delayed_work_sync(&hdev->rpa_expired);
1779
1780 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1781 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1782 }
1783
1784 /* Avoid potential lockdep warnings from the *_flush() calls by
1785 * ensuring the workqueue is empty up front.
1786 */
1787 drain_workqueue(hdev->workqueue);
1788
1789 hci_dev_lock(hdev);
1790
1791 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1792
1793 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1794
1795 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1796 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1797 hci_dev_test_flag(hdev, HCI_MGMT))
1798 __mgmt_power_off(hdev);
1799
1800 hci_inquiry_cache_flush(hdev);
1801 hci_pend_le_actions_clear(hdev);
1802 hci_conn_hash_flush(hdev);
1803 hci_dev_unlock(hdev);
1804
1805 smp_unregister(hdev);
1806
1807 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1808
1809 msft_do_close(hdev);
1810
1811 if (hdev->flush)
1812 hdev->flush(hdev);
1813
1814 /* Reset device */
1815 skb_queue_purge(&hdev->cmd_q);
1816 atomic_set(&hdev->cmd_cnt, 1);
1817 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1818 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1819 set_bit(HCI_INIT, &hdev->flags);
1820 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1821 clear_bit(HCI_INIT, &hdev->flags);
1822 }
1823
1824 /* flush cmd work */
1825 flush_work(&hdev->cmd_work);
1826
1827 /* Drop queues */
1828 skb_queue_purge(&hdev->rx_q);
1829 skb_queue_purge(&hdev->cmd_q);
1830 skb_queue_purge(&hdev->raw_q);
1831
1832 /* Drop last sent command */
1833 if (hdev->sent_cmd) {
1834 cancel_delayed_work_sync(&hdev->cmd_timer);
1835 kfree_skb(hdev->sent_cmd);
1836 hdev->sent_cmd = NULL;
1837 }
1838
1839 clear_bit(HCI_RUNNING, &hdev->flags);
1840 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1841
1842 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1843 wake_up(&hdev->suspend_wait_q);
1844
1845 /* After this point our queues are empty
1846 * and no tasks are scheduled. */
1847 hdev->close(hdev);
1848
1849 /* Clear flags */
1850 hdev->flags &= BIT(HCI_RAW);
1851 hci_dev_clear_volatile_flags(hdev);
1852
1853 /* Controller radio is available but is currently powered down */
1854 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1855
1856 memset(hdev->eir, 0, sizeof(hdev->eir));
1857 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1858 bacpy(&hdev->random_addr, BDADDR_ANY);
1859
1860 hci_req_sync_unlock(hdev);
1861
1862 hci_dev_put(hdev);
1863 return 0;
1864 }
1865
hci_dev_close(__u16 dev)1866 int hci_dev_close(__u16 dev)
1867 {
1868 struct hci_dev *hdev;
1869 int err;
1870
1871 hdev = hci_dev_get(dev);
1872 if (!hdev)
1873 return -ENODEV;
1874
1875 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1876 err = -EBUSY;
1877 goto done;
1878 }
1879
1880 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1881 cancel_delayed_work(&hdev->power_off);
1882
1883 err = hci_dev_do_close(hdev);
1884
1885 done:
1886 hci_dev_put(hdev);
1887 return err;
1888 }
1889
hci_dev_do_reset(struct hci_dev * hdev)1890 static int hci_dev_do_reset(struct hci_dev *hdev)
1891 {
1892 int ret;
1893
1894 BT_DBG("%s %p", hdev->name, hdev);
1895
1896 hci_req_sync_lock(hdev);
1897
1898 /* Drop queues */
1899 skb_queue_purge(&hdev->rx_q);
1900 skb_queue_purge(&hdev->cmd_q);
1901
1902 /* Avoid potential lockdep warnings from the *_flush() calls by
1903 * ensuring the workqueue is empty up front.
1904 */
1905 drain_workqueue(hdev->workqueue);
1906
1907 hci_dev_lock(hdev);
1908 hci_inquiry_cache_flush(hdev);
1909 hci_conn_hash_flush(hdev);
1910 hci_dev_unlock(hdev);
1911
1912 if (hdev->flush)
1913 hdev->flush(hdev);
1914
1915 atomic_set(&hdev->cmd_cnt, 1);
1916 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1917
1918 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1919
1920 hci_req_sync_unlock(hdev);
1921 return ret;
1922 }
1923
hci_dev_reset(__u16 dev)1924 int hci_dev_reset(__u16 dev)
1925 {
1926 struct hci_dev *hdev;
1927 int err, srcu_index;
1928
1929 hdev = hci_dev_get_srcu(dev, &srcu_index);
1930 if (!hdev)
1931 return -ENODEV;
1932
1933 if (!test_bit(HCI_UP, &hdev->flags)) {
1934 err = -ENETDOWN;
1935 goto done;
1936 }
1937
1938 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1939 err = -EBUSY;
1940 goto done;
1941 }
1942
1943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1944 err = -EOPNOTSUPP;
1945 goto done;
1946 }
1947
1948 err = hci_dev_do_reset(hdev);
1949
1950 done:
1951 hci_dev_put_srcu(hdev, srcu_index);
1952 return err;
1953 }
1954
hci_dev_reset_stat(__u16 dev)1955 int hci_dev_reset_stat(__u16 dev)
1956 {
1957 struct hci_dev *hdev;
1958 int ret = 0;
1959
1960 hdev = hci_dev_get(dev);
1961 if (!hdev)
1962 return -ENODEV;
1963
1964 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1965 ret = -EBUSY;
1966 goto done;
1967 }
1968
1969 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1970 ret = -EOPNOTSUPP;
1971 goto done;
1972 }
1973
1974 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1975
1976 done:
1977 hci_dev_put(hdev);
1978 return ret;
1979 }
1980
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1981 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1982 {
1983 bool conn_changed, discov_changed;
1984
1985 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1986
1987 if ((scan & SCAN_PAGE))
1988 conn_changed = !hci_dev_test_and_set_flag(hdev,
1989 HCI_CONNECTABLE);
1990 else
1991 conn_changed = hci_dev_test_and_clear_flag(hdev,
1992 HCI_CONNECTABLE);
1993
1994 if ((scan & SCAN_INQUIRY)) {
1995 discov_changed = !hci_dev_test_and_set_flag(hdev,
1996 HCI_DISCOVERABLE);
1997 } else {
1998 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1999 discov_changed = hci_dev_test_and_clear_flag(hdev,
2000 HCI_DISCOVERABLE);
2001 }
2002
2003 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2004 return;
2005
2006 if (conn_changed || discov_changed) {
2007 /* In case this was disabled through mgmt */
2008 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2009
2010 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2011 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2012
2013 mgmt_new_settings(hdev);
2014 }
2015 }
2016
hci_dev_cmd(unsigned int cmd,void __user * arg)2017 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2018 {
2019 struct hci_dev *hdev;
2020 struct hci_dev_req dr;
2021 int err = 0;
2022
2023 if (copy_from_user(&dr, arg, sizeof(dr)))
2024 return -EFAULT;
2025
2026 hdev = hci_dev_get(dr.dev_id);
2027 if (!hdev)
2028 return -ENODEV;
2029
2030 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2031 err = -EBUSY;
2032 goto done;
2033 }
2034
2035 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2036 err = -EOPNOTSUPP;
2037 goto done;
2038 }
2039
2040 if (hdev->dev_type != HCI_PRIMARY) {
2041 err = -EOPNOTSUPP;
2042 goto done;
2043 }
2044
2045 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2046 err = -EOPNOTSUPP;
2047 goto done;
2048 }
2049
2050 switch (cmd) {
2051 case HCISETAUTH:
2052 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2053 HCI_INIT_TIMEOUT, NULL);
2054 break;
2055
2056 case HCISETENCRYPT:
2057 if (!lmp_encrypt_capable(hdev)) {
2058 err = -EOPNOTSUPP;
2059 break;
2060 }
2061
2062 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2063 /* Auth must be enabled first */
2064 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2065 HCI_INIT_TIMEOUT, NULL);
2066 if (err)
2067 break;
2068 }
2069
2070 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2071 HCI_INIT_TIMEOUT, NULL);
2072 break;
2073
2074 case HCISETSCAN:
2075 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2076 HCI_INIT_TIMEOUT, NULL);
2077
2078 /* Ensure that the connectable and discoverable states
2079 * get correctly modified as this was a non-mgmt change.
2080 */
2081 if (!err)
2082 hci_update_scan_state(hdev, dr.dev_opt);
2083 break;
2084
2085 case HCISETLINKPOL:
2086 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2087 HCI_INIT_TIMEOUT, NULL);
2088 break;
2089
2090 case HCISETLINKMODE:
2091 hdev->link_mode = ((__u16) dr.dev_opt) &
2092 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2093 break;
2094
2095 case HCISETPTYPE:
2096 if (hdev->pkt_type == (__u16) dr.dev_opt)
2097 break;
2098
2099 hdev->pkt_type = (__u16) dr.dev_opt;
2100 mgmt_phy_configuration_changed(hdev, NULL);
2101 break;
2102
2103 case HCISETACLMTU:
2104 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2105 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2106 break;
2107
2108 case HCISETSCOMTU:
2109 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2110 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2111 break;
2112
2113 default:
2114 err = -EINVAL;
2115 break;
2116 }
2117
2118 done:
2119 hci_dev_put(hdev);
2120 return err;
2121 }
2122
hci_get_dev_list(void __user * arg)2123 int hci_get_dev_list(void __user *arg)
2124 {
2125 struct hci_dev *hdev;
2126 struct hci_dev_list_req *dl;
2127 struct hci_dev_req *dr;
2128 int n = 0, size, err;
2129 __u16 dev_num;
2130
2131 if (get_user(dev_num, (__u16 __user *) arg))
2132 return -EFAULT;
2133
2134 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2135 return -EINVAL;
2136
2137 size = sizeof(*dl) + dev_num * sizeof(*dr);
2138
2139 dl = kzalloc(size, GFP_KERNEL);
2140 if (!dl)
2141 return -ENOMEM;
2142
2143 dr = dl->dev_req;
2144
2145 read_lock(&hci_dev_list_lock);
2146 list_for_each_entry(hdev, &hci_dev_list, list) {
2147 unsigned long flags = hdev->flags;
2148
2149 /* When the auto-off is configured it means the transport
2150 * is running, but in that case still indicate that the
2151 * device is actually down.
2152 */
2153 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2154 flags &= ~BIT(HCI_UP);
2155
2156 (dr + n)->dev_id = hdev->id;
2157 (dr + n)->dev_opt = flags;
2158
2159 if (++n >= dev_num)
2160 break;
2161 }
2162 read_unlock(&hci_dev_list_lock);
2163
2164 dl->dev_num = n;
2165 size = sizeof(*dl) + n * sizeof(*dr);
2166
2167 err = copy_to_user(arg, dl, size);
2168 kfree(dl);
2169
2170 return err ? -EFAULT : 0;
2171 }
2172
hci_get_dev_info(void __user * arg)2173 int hci_get_dev_info(void __user *arg)
2174 {
2175 struct hci_dev *hdev;
2176 struct hci_dev_info di;
2177 unsigned long flags;
2178 int err = 0;
2179
2180 if (copy_from_user(&di, arg, sizeof(di)))
2181 return -EFAULT;
2182
2183 hdev = hci_dev_get(di.dev_id);
2184 if (!hdev)
2185 return -ENODEV;
2186
2187 /* When the auto-off is configured it means the transport
2188 * is running, but in that case still indicate that the
2189 * device is actually down.
2190 */
2191 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2192 flags = hdev->flags & ~BIT(HCI_UP);
2193 else
2194 flags = hdev->flags;
2195
2196 strscpy(di.name, hdev->name, sizeof(di.name));
2197 di.bdaddr = hdev->bdaddr;
2198 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2199 di.flags = flags;
2200 di.pkt_type = hdev->pkt_type;
2201 if (lmp_bredr_capable(hdev)) {
2202 di.acl_mtu = hdev->acl_mtu;
2203 di.acl_pkts = hdev->acl_pkts;
2204 di.sco_mtu = hdev->sco_mtu;
2205 di.sco_pkts = hdev->sco_pkts;
2206 } else {
2207 di.acl_mtu = hdev->le_mtu;
2208 di.acl_pkts = hdev->le_pkts;
2209 di.sco_mtu = 0;
2210 di.sco_pkts = 0;
2211 }
2212 di.link_policy = hdev->link_policy;
2213 di.link_mode = hdev->link_mode;
2214
2215 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2216 memcpy(&di.features, &hdev->features, sizeof(di.features));
2217
2218 if (copy_to_user(arg, &di, sizeof(di)))
2219 err = -EFAULT;
2220
2221 hci_dev_put(hdev);
2222
2223 return err;
2224 }
2225
2226 /* ---- Interface to HCI drivers ---- */
2227
hci_rfkill_set_block(void * data,bool blocked)2228 static int hci_rfkill_set_block(void *data, bool blocked)
2229 {
2230 struct hci_dev *hdev = data;
2231
2232 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2233
2234 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2235 return -EBUSY;
2236
2237 if (blocked) {
2238 hci_dev_set_flag(hdev, HCI_RFKILLED);
2239 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2240 !hci_dev_test_flag(hdev, HCI_CONFIG))
2241 hci_dev_do_close(hdev);
2242 } else {
2243 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2244 }
2245
2246 return 0;
2247 }
2248
2249 static const struct rfkill_ops hci_rfkill_ops = {
2250 .set_block = hci_rfkill_set_block,
2251 };
2252
hci_power_on(struct work_struct * work)2253 static void hci_power_on(struct work_struct *work)
2254 {
2255 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2256 int err;
2257
2258 BT_DBG("%s", hdev->name);
2259
2260 if (test_bit(HCI_UP, &hdev->flags) &&
2261 hci_dev_test_flag(hdev, HCI_MGMT) &&
2262 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2263 cancel_delayed_work(&hdev->power_off);
2264 hci_req_sync_lock(hdev);
2265 err = __hci_req_hci_power_on(hdev);
2266 hci_req_sync_unlock(hdev);
2267 mgmt_power_on(hdev, err);
2268 return;
2269 }
2270
2271 err = hci_dev_do_open(hdev);
2272 if (err < 0) {
2273 hci_dev_lock(hdev);
2274 mgmt_set_powered_failed(hdev, err);
2275 hci_dev_unlock(hdev);
2276 return;
2277 }
2278
2279 /* During the HCI setup phase, a few error conditions are
2280 * ignored and they need to be checked now. If they are still
2281 * valid, it is important to turn the device back off.
2282 */
2283 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2284 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2285 (hdev->dev_type == HCI_PRIMARY &&
2286 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2287 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2288 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2289 hci_dev_do_close(hdev);
2290 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2291 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2292 HCI_AUTO_OFF_TIMEOUT);
2293 }
2294
2295 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2296 /* For unconfigured devices, set the HCI_RAW flag
2297 * so that userspace can easily identify them.
2298 */
2299 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2300 set_bit(HCI_RAW, &hdev->flags);
2301
2302 /* For fully configured devices, this will send
2303 * the Index Added event. For unconfigured devices,
2304 * it will send Unconfigued Index Added event.
2305 *
2306 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2307 * and no event will be send.
2308 */
2309 mgmt_index_added(hdev);
2310 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2311 /* When the controller is now configured, then it
2312 * is important to clear the HCI_RAW flag.
2313 */
2314 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2315 clear_bit(HCI_RAW, &hdev->flags);
2316
2317 /* Powering on the controller with HCI_CONFIG set only
2318 * happens with the transition from unconfigured to
2319 * configured. This will send the Index Added event.
2320 */
2321 mgmt_index_added(hdev);
2322 }
2323 }
2324
hci_power_off(struct work_struct * work)2325 static void hci_power_off(struct work_struct *work)
2326 {
2327 struct hci_dev *hdev = container_of(work, struct hci_dev,
2328 power_off.work);
2329
2330 BT_DBG("%s", hdev->name);
2331
2332 hci_dev_do_close(hdev);
2333 }
2334
hci_error_reset(struct work_struct * work)2335 static void hci_error_reset(struct work_struct *work)
2336 {
2337 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2338
2339 hci_dev_hold(hdev);
2340 BT_DBG("%s", hdev->name);
2341
2342 if (hdev->hw_error)
2343 hdev->hw_error(hdev, hdev->hw_error_code);
2344 else
2345 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2346
2347 if (!hci_dev_do_close(hdev))
2348 hci_dev_do_open(hdev);
2349
2350 hci_dev_put(hdev);
2351 }
2352
hci_uuids_clear(struct hci_dev * hdev)2353 void hci_uuids_clear(struct hci_dev *hdev)
2354 {
2355 struct bt_uuid *uuid, *tmp;
2356
2357 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2358 list_del(&uuid->list);
2359 kfree(uuid);
2360 }
2361 }
2362
hci_link_keys_clear(struct hci_dev * hdev)2363 void hci_link_keys_clear(struct hci_dev *hdev)
2364 {
2365 struct link_key *key, *tmp;
2366
2367 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
2368 list_del_rcu(&key->list);
2369 kfree_rcu(key, rcu);
2370 }
2371 }
2372
hci_smp_ltks_clear(struct hci_dev * hdev)2373 void hci_smp_ltks_clear(struct hci_dev *hdev)
2374 {
2375 struct smp_ltk *k, *tmp;
2376
2377 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2378 list_del_rcu(&k->list);
2379 kfree_rcu(k, rcu);
2380 }
2381 }
2382
hci_smp_irks_clear(struct hci_dev * hdev)2383 void hci_smp_irks_clear(struct hci_dev *hdev)
2384 {
2385 struct smp_irk *k, *tmp;
2386
2387 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2388 list_del_rcu(&k->list);
2389 kfree_rcu(k, rcu);
2390 }
2391 }
2392
hci_blocked_keys_clear(struct hci_dev * hdev)2393 void hci_blocked_keys_clear(struct hci_dev *hdev)
2394 {
2395 struct blocked_key *b, *tmp;
2396
2397 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
2398 list_del_rcu(&b->list);
2399 kfree_rcu(b, rcu);
2400 }
2401 }
2402
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])2403 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2404 {
2405 bool blocked = false;
2406 struct blocked_key *b;
2407
2408 rcu_read_lock();
2409 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2410 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2411 blocked = true;
2412 break;
2413 }
2414 }
2415
2416 rcu_read_unlock();
2417 return blocked;
2418 }
2419
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2420 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2421 {
2422 struct link_key *k;
2423
2424 rcu_read_lock();
2425 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2426 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2427 rcu_read_unlock();
2428
2429 if (hci_is_blocked_key(hdev,
2430 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2431 k->val)) {
2432 bt_dev_warn_ratelimited(hdev,
2433 "Link key blocked for %pMR",
2434 &k->bdaddr);
2435 return NULL;
2436 }
2437
2438 return k;
2439 }
2440 }
2441 rcu_read_unlock();
2442
2443 return NULL;
2444 }
2445
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2446 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2447 u8 key_type, u8 old_key_type)
2448 {
2449 /* Legacy key */
2450 if (key_type < 0x03)
2451 return true;
2452
2453 /* Debug keys are insecure so don't store them persistently */
2454 if (key_type == HCI_LK_DEBUG_COMBINATION)
2455 return false;
2456
2457 /* Changed combination key and there's no previous one */
2458 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2459 return false;
2460
2461 /* Security mode 3 case */
2462 if (!conn)
2463 return true;
2464
2465 /* BR/EDR key derived using SC from an LE link */
2466 if (conn->type == LE_LINK)
2467 return true;
2468
2469 /* Neither local nor remote side had no-bonding as requirement */
2470 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2471 return true;
2472
2473 /* Local side had dedicated bonding as requirement */
2474 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2475 return true;
2476
2477 /* Remote side had dedicated bonding as requirement */
2478 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2479 return true;
2480
2481 /* If none of the above criteria match, then don't store the key
2482 * persistently */
2483 return false;
2484 }
2485
ltk_role(u8 type)2486 static u8 ltk_role(u8 type)
2487 {
2488 if (type == SMP_LTK)
2489 return HCI_ROLE_MASTER;
2490
2491 return HCI_ROLE_SLAVE;
2492 }
2493
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2494 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2495 u8 addr_type, u8 role)
2496 {
2497 struct smp_ltk *k;
2498
2499 rcu_read_lock();
2500 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2501 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2502 continue;
2503
2504 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2505 rcu_read_unlock();
2506
2507 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2508 k->val)) {
2509 bt_dev_warn_ratelimited(hdev,
2510 "LTK blocked for %pMR",
2511 &k->bdaddr);
2512 return NULL;
2513 }
2514
2515 return k;
2516 }
2517 }
2518 rcu_read_unlock();
2519
2520 return NULL;
2521 }
2522
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2523 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2524 {
2525 struct smp_irk *irk_to_return = NULL;
2526 struct smp_irk *irk;
2527
2528 rcu_read_lock();
2529 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2530 if (!bacmp(&irk->rpa, rpa)) {
2531 irk_to_return = irk;
2532 goto done;
2533 }
2534 }
2535
2536 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2537 if (smp_irk_matches(hdev, irk->val, rpa)) {
2538 bacpy(&irk->rpa, rpa);
2539 irk_to_return = irk;
2540 goto done;
2541 }
2542 }
2543
2544 done:
2545 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2546 irk_to_return->val)) {
2547 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2548 &irk_to_return->bdaddr);
2549 irk_to_return = NULL;
2550 }
2551
2552 rcu_read_unlock();
2553
2554 return irk_to_return;
2555 }
2556
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2557 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2558 u8 addr_type)
2559 {
2560 struct smp_irk *irk_to_return = NULL;
2561 struct smp_irk *irk;
2562
2563 /* Identity Address must be public or static random */
2564 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2565 return NULL;
2566
2567 rcu_read_lock();
2568 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2569 if (addr_type == irk->addr_type &&
2570 bacmp(bdaddr, &irk->bdaddr) == 0) {
2571 irk_to_return = irk;
2572 goto done;
2573 }
2574 }
2575
2576 done:
2577
2578 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2579 irk_to_return->val)) {
2580 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2581 &irk_to_return->bdaddr);
2582 irk_to_return = NULL;
2583 }
2584
2585 rcu_read_unlock();
2586
2587 return irk_to_return;
2588 }
2589
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2590 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2591 bdaddr_t *bdaddr, u8 *val, u8 type,
2592 u8 pin_len, bool *persistent)
2593 {
2594 struct link_key *key, *old_key;
2595 u8 old_key_type;
2596
2597 old_key = hci_find_link_key(hdev, bdaddr);
2598 if (old_key) {
2599 old_key_type = old_key->type;
2600 key = old_key;
2601 } else {
2602 old_key_type = conn ? conn->key_type : 0xff;
2603 key = kzalloc(sizeof(*key), GFP_KERNEL);
2604 if (!key)
2605 return NULL;
2606 list_add_rcu(&key->list, &hdev->link_keys);
2607 }
2608
2609 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2610
2611 /* Some buggy controller combinations generate a changed
2612 * combination key for legacy pairing even when there's no
2613 * previous key */
2614 if (type == HCI_LK_CHANGED_COMBINATION &&
2615 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2616 type = HCI_LK_COMBINATION;
2617 if (conn)
2618 conn->key_type = type;
2619 }
2620
2621 bacpy(&key->bdaddr, bdaddr);
2622 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2623 key->pin_len = pin_len;
2624
2625 if (type == HCI_LK_CHANGED_COMBINATION)
2626 key->type = old_key_type;
2627 else
2628 key->type = type;
2629
2630 if (persistent)
2631 *persistent = hci_persistent_key(hdev, conn, type,
2632 old_key_type);
2633
2634 return key;
2635 }
2636
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2637 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2638 u8 addr_type, u8 type, u8 authenticated,
2639 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2640 {
2641 struct smp_ltk *key, *old_key;
2642 u8 role = ltk_role(type);
2643
2644 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2645 if (old_key)
2646 key = old_key;
2647 else {
2648 key = kzalloc(sizeof(*key), GFP_KERNEL);
2649 if (!key)
2650 return NULL;
2651 list_add_rcu(&key->list, &hdev->long_term_keys);
2652 }
2653
2654 bacpy(&key->bdaddr, bdaddr);
2655 key->bdaddr_type = addr_type;
2656 memcpy(key->val, tk, sizeof(key->val));
2657 key->authenticated = authenticated;
2658 key->ediv = ediv;
2659 key->rand = rand;
2660 key->enc_size = enc_size;
2661 key->type = type;
2662
2663 return key;
2664 }
2665
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2666 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2667 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2668 {
2669 struct smp_irk *irk;
2670
2671 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2672 if (!irk) {
2673 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2674 if (!irk)
2675 return NULL;
2676
2677 bacpy(&irk->bdaddr, bdaddr);
2678 irk->addr_type = addr_type;
2679
2680 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2681 }
2682
2683 memcpy(irk->val, val, 16);
2684 bacpy(&irk->rpa, rpa);
2685
2686 return irk;
2687 }
2688
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2689 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2690 {
2691 struct link_key *key;
2692
2693 key = hci_find_link_key(hdev, bdaddr);
2694 if (!key)
2695 return -ENOENT;
2696
2697 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2698
2699 list_del_rcu(&key->list);
2700 kfree_rcu(key, rcu);
2701
2702 return 0;
2703 }
2704
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2705 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2706 {
2707 struct smp_ltk *k, *tmp;
2708 int removed = 0;
2709
2710 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2711 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2712 continue;
2713
2714 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2715
2716 list_del_rcu(&k->list);
2717 kfree_rcu(k, rcu);
2718 removed++;
2719 }
2720
2721 return removed ? 0 : -ENOENT;
2722 }
2723
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2724 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2725 {
2726 struct smp_irk *k, *tmp;
2727
2728 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2729 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2730 continue;
2731
2732 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2733
2734 list_del_rcu(&k->list);
2735 kfree_rcu(k, rcu);
2736 }
2737 }
2738
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2739 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2740 {
2741 struct smp_ltk *k;
2742 struct smp_irk *irk;
2743 u8 addr_type;
2744
2745 if (type == BDADDR_BREDR) {
2746 if (hci_find_link_key(hdev, bdaddr))
2747 return true;
2748 return false;
2749 }
2750
2751 /* Convert to HCI addr type which struct smp_ltk uses */
2752 if (type == BDADDR_LE_PUBLIC)
2753 addr_type = ADDR_LE_DEV_PUBLIC;
2754 else
2755 addr_type = ADDR_LE_DEV_RANDOM;
2756
2757 irk = hci_get_irk(hdev, bdaddr, addr_type);
2758 if (irk) {
2759 bdaddr = &irk->bdaddr;
2760 addr_type = irk->addr_type;
2761 }
2762
2763 rcu_read_lock();
2764 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2765 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2766 rcu_read_unlock();
2767 return true;
2768 }
2769 }
2770 rcu_read_unlock();
2771
2772 return false;
2773 }
2774
2775 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2776 static void hci_cmd_timeout(struct work_struct *work)
2777 {
2778 struct hci_dev *hdev = container_of(work, struct hci_dev,
2779 cmd_timer.work);
2780
2781 if (hdev->sent_cmd) {
2782 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2783 u16 opcode = __le16_to_cpu(sent->opcode);
2784
2785 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2786 } else {
2787 bt_dev_err(hdev, "command tx timeout");
2788 }
2789
2790 if (hdev->cmd_timeout)
2791 hdev->cmd_timeout(hdev);
2792
2793 atomic_set(&hdev->cmd_cnt, 1);
2794 queue_work(hdev->workqueue, &hdev->cmd_work);
2795 }
2796
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2797 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2798 bdaddr_t *bdaddr, u8 bdaddr_type)
2799 {
2800 struct oob_data *data;
2801
2802 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2803 if (bacmp(bdaddr, &data->bdaddr) != 0)
2804 continue;
2805 if (data->bdaddr_type != bdaddr_type)
2806 continue;
2807 return data;
2808 }
2809
2810 return NULL;
2811 }
2812
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2813 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2814 u8 bdaddr_type)
2815 {
2816 struct oob_data *data;
2817
2818 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2819 if (!data)
2820 return -ENOENT;
2821
2822 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2823
2824 list_del(&data->list);
2825 kfree(data);
2826
2827 return 0;
2828 }
2829
hci_remote_oob_data_clear(struct hci_dev * hdev)2830 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2831 {
2832 struct oob_data *data, *n;
2833
2834 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2835 list_del(&data->list);
2836 kfree(data);
2837 }
2838 }
2839
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2840 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2841 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2842 u8 *hash256, u8 *rand256)
2843 {
2844 struct oob_data *data;
2845
2846 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2847 if (!data) {
2848 data = kmalloc(sizeof(*data), GFP_KERNEL);
2849 if (!data)
2850 return -ENOMEM;
2851
2852 bacpy(&data->bdaddr, bdaddr);
2853 data->bdaddr_type = bdaddr_type;
2854 list_add(&data->list, &hdev->remote_oob_data);
2855 }
2856
2857 if (hash192 && rand192) {
2858 memcpy(data->hash192, hash192, sizeof(data->hash192));
2859 memcpy(data->rand192, rand192, sizeof(data->rand192));
2860 if (hash256 && rand256)
2861 data->present = 0x03;
2862 } else {
2863 memset(data->hash192, 0, sizeof(data->hash192));
2864 memset(data->rand192, 0, sizeof(data->rand192));
2865 if (hash256 && rand256)
2866 data->present = 0x02;
2867 else
2868 data->present = 0x00;
2869 }
2870
2871 if (hash256 && rand256) {
2872 memcpy(data->hash256, hash256, sizeof(data->hash256));
2873 memcpy(data->rand256, rand256, sizeof(data->rand256));
2874 } else {
2875 memset(data->hash256, 0, sizeof(data->hash256));
2876 memset(data->rand256, 0, sizeof(data->rand256));
2877 if (hash192 && rand192)
2878 data->present = 0x01;
2879 }
2880
2881 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2882
2883 return 0;
2884 }
2885
2886 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2887 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2888 {
2889 struct adv_info *adv_instance;
2890
2891 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2892 if (adv_instance->instance == instance)
2893 return adv_instance;
2894 }
2895
2896 return NULL;
2897 }
2898
2899 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2900 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2901 {
2902 struct adv_info *cur_instance;
2903
2904 cur_instance = hci_find_adv_instance(hdev, instance);
2905 if (!cur_instance)
2906 return NULL;
2907
2908 if (cur_instance == list_last_entry(&hdev->adv_instances,
2909 struct adv_info, list))
2910 return list_first_entry(&hdev->adv_instances,
2911 struct adv_info, list);
2912 else
2913 return list_next_entry(cur_instance, list);
2914 }
2915
2916 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2917 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2918 {
2919 struct adv_info *adv_instance;
2920
2921 adv_instance = hci_find_adv_instance(hdev, instance);
2922 if (!adv_instance)
2923 return -ENOENT;
2924
2925 BT_DBG("%s removing %dMR", hdev->name, instance);
2926
2927 if (hdev->cur_adv_instance == instance) {
2928 if (hdev->adv_instance_timeout) {
2929 cancel_delayed_work(&hdev->adv_instance_expire);
2930 hdev->adv_instance_timeout = 0;
2931 }
2932 hdev->cur_adv_instance = 0x00;
2933 }
2934
2935 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2936
2937 list_del(&adv_instance->list);
2938 kfree(adv_instance);
2939
2940 hdev->adv_instance_cnt--;
2941
2942 return 0;
2943 }
2944
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)2945 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2946 {
2947 struct adv_info *adv_instance, *n;
2948
2949 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2950 adv_instance->rpa_expired = rpa_expired;
2951 }
2952
2953 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2954 void hci_adv_instances_clear(struct hci_dev *hdev)
2955 {
2956 struct adv_info *adv_instance, *n;
2957
2958 if (hdev->adv_instance_timeout) {
2959 cancel_delayed_work(&hdev->adv_instance_expire);
2960 hdev->adv_instance_timeout = 0;
2961 }
2962
2963 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2964 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2965 list_del(&adv_instance->list);
2966 kfree(adv_instance);
2967 }
2968
2969 hdev->adv_instance_cnt = 0;
2970 hdev->cur_adv_instance = 0x00;
2971 }
2972
adv_instance_rpa_expired(struct work_struct * work)2973 static void adv_instance_rpa_expired(struct work_struct *work)
2974 {
2975 struct adv_info *adv_instance = container_of(work, struct adv_info,
2976 rpa_expired_cb.work);
2977
2978 BT_DBG("");
2979
2980 adv_instance->rpa_expired = true;
2981 }
2982
2983 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2984 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2985 u16 adv_data_len, u8 *adv_data,
2986 u16 scan_rsp_len, u8 *scan_rsp_data,
2987 u16 timeout, u16 duration)
2988 {
2989 struct adv_info *adv_instance;
2990
2991 adv_instance = hci_find_adv_instance(hdev, instance);
2992 if (adv_instance) {
2993 memset(adv_instance->adv_data, 0,
2994 sizeof(adv_instance->adv_data));
2995 memset(adv_instance->scan_rsp_data, 0,
2996 sizeof(adv_instance->scan_rsp_data));
2997 } else {
2998 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2999 instance < 1 || instance > hdev->le_num_of_adv_sets)
3000 return -EOVERFLOW;
3001
3002 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3003 if (!adv_instance)
3004 return -ENOMEM;
3005
3006 adv_instance->pending = true;
3007 adv_instance->instance = instance;
3008 list_add(&adv_instance->list, &hdev->adv_instances);
3009 hdev->adv_instance_cnt++;
3010 }
3011
3012 adv_instance->flags = flags;
3013 adv_instance->adv_data_len = adv_data_len;
3014 adv_instance->scan_rsp_len = scan_rsp_len;
3015
3016 if (adv_data_len)
3017 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3018
3019 if (scan_rsp_len)
3020 memcpy(adv_instance->scan_rsp_data,
3021 scan_rsp_data, scan_rsp_len);
3022
3023 adv_instance->timeout = timeout;
3024 adv_instance->remaining_time = timeout;
3025
3026 if (duration == 0)
3027 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3028 else
3029 adv_instance->duration = duration;
3030
3031 adv_instance->tx_power = HCI_TX_POWER_INVALID;
3032
3033 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3034 adv_instance_rpa_expired);
3035
3036 BT_DBG("%s for %dMR", hdev->name, instance);
3037
3038 return 0;
3039 }
3040
3041 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)3042 void hci_adv_monitors_clear(struct hci_dev *hdev)
3043 {
3044 struct adv_monitor *monitor;
3045 int handle;
3046
3047 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3048 hci_free_adv_monitor(monitor);
3049
3050 idr_destroy(&hdev->adv_monitors_idr);
3051 }
3052
hci_free_adv_monitor(struct adv_monitor * monitor)3053 void hci_free_adv_monitor(struct adv_monitor *monitor)
3054 {
3055 struct adv_pattern *pattern;
3056 struct adv_pattern *tmp;
3057
3058 if (!monitor)
3059 return;
3060
3061 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3062 kfree(pattern);
3063
3064 kfree(monitor);
3065 }
3066
3067 /* This function requires the caller holds hdev->lock */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)3068 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3069 {
3070 int min, max, handle;
3071
3072 if (!monitor)
3073 return -EINVAL;
3074
3075 min = HCI_MIN_ADV_MONITOR_HANDLE;
3076 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3077 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3078 GFP_KERNEL);
3079 if (handle < 0)
3080 return handle;
3081
3082 hdev->adv_monitors_cnt++;
3083 monitor->handle = handle;
3084
3085 hci_update_background_scan(hdev);
3086
3087 return 0;
3088 }
3089
free_adv_monitor(int id,void * ptr,void * data)3090 static int free_adv_monitor(int id, void *ptr, void *data)
3091 {
3092 struct hci_dev *hdev = data;
3093 struct adv_monitor *monitor = ptr;
3094
3095 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3096 hci_free_adv_monitor(monitor);
3097 hdev->adv_monitors_cnt--;
3098
3099 return 0;
3100 }
3101
3102 /* This function requires the caller holds hdev->lock */
hci_remove_adv_monitor(struct hci_dev * hdev,u16 handle)3103 int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3104 {
3105 struct adv_monitor *monitor;
3106
3107 if (handle) {
3108 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3109 if (!monitor)
3110 return -ENOENT;
3111
3112 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3113 hci_free_adv_monitor(monitor);
3114 hdev->adv_monitors_cnt--;
3115 } else {
3116 /* Remove all monitors if handle is 0. */
3117 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3118 }
3119
3120 hci_update_background_scan(hdev);
3121
3122 return 0;
3123 }
3124
3125 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)3126 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3127 {
3128 return !idr_is_empty(&hdev->adv_monitors_idr);
3129 }
3130
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3131 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3132 bdaddr_t *bdaddr, u8 type)
3133 {
3134 struct bdaddr_list *b;
3135
3136 list_for_each_entry(b, bdaddr_list, list) {
3137 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3138 return b;
3139 }
3140
3141 return NULL;
3142 }
3143
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3144 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3145 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3146 u8 type)
3147 {
3148 struct bdaddr_list_with_irk *b;
3149
3150 list_for_each_entry(b, bdaddr_list, list) {
3151 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3152 return b;
3153 }
3154
3155 return NULL;
3156 }
3157
3158 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3159 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3160 bdaddr_t *bdaddr, u8 type)
3161 {
3162 struct bdaddr_list_with_flags *b;
3163
3164 list_for_each_entry(b, bdaddr_list, list) {
3165 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3166 return b;
3167 }
3168
3169 return NULL;
3170 }
3171
hci_bdaddr_list_clear(struct list_head * bdaddr_list)3172 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3173 {
3174 struct bdaddr_list *b, *n;
3175
3176 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3177 list_del(&b->list);
3178 kfree(b);
3179 }
3180 }
3181
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)3182 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3183 {
3184 struct bdaddr_list *entry;
3185
3186 if (!bacmp(bdaddr, BDADDR_ANY))
3187 return -EBADF;
3188
3189 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3190 return -EEXIST;
3191
3192 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3193 if (!entry)
3194 return -ENOMEM;
3195
3196 bacpy(&entry->bdaddr, bdaddr);
3197 entry->bdaddr_type = type;
3198
3199 list_add(&entry->list, list);
3200
3201 return 0;
3202 }
3203
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)3204 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3205 u8 type, u8 *peer_irk, u8 *local_irk)
3206 {
3207 struct bdaddr_list_with_irk *entry;
3208
3209 if (!bacmp(bdaddr, BDADDR_ANY))
3210 return -EBADF;
3211
3212 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3213 return -EEXIST;
3214
3215 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3216 if (!entry)
3217 return -ENOMEM;
3218
3219 bacpy(&entry->bdaddr, bdaddr);
3220 entry->bdaddr_type = type;
3221
3222 if (peer_irk)
3223 memcpy(entry->peer_irk, peer_irk, 16);
3224
3225 if (local_irk)
3226 memcpy(entry->local_irk, local_irk, 16);
3227
3228 list_add(&entry->list, list);
3229
3230 return 0;
3231 }
3232
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)3233 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3234 u8 type, u32 flags)
3235 {
3236 struct bdaddr_list_with_flags *entry;
3237
3238 if (!bacmp(bdaddr, BDADDR_ANY))
3239 return -EBADF;
3240
3241 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3242 return -EEXIST;
3243
3244 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3245 if (!entry)
3246 return -ENOMEM;
3247
3248 bacpy(&entry->bdaddr, bdaddr);
3249 entry->bdaddr_type = type;
3250 entry->current_flags = flags;
3251
3252 list_add(&entry->list, list);
3253
3254 return 0;
3255 }
3256
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)3257 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3258 {
3259 struct bdaddr_list *entry;
3260
3261 if (!bacmp(bdaddr, BDADDR_ANY)) {
3262 hci_bdaddr_list_clear(list);
3263 return 0;
3264 }
3265
3266 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3267 if (!entry)
3268 return -ENOENT;
3269
3270 list_del(&entry->list);
3271 kfree(entry);
3272
3273 return 0;
3274 }
3275
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)3276 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3277 u8 type)
3278 {
3279 struct bdaddr_list_with_irk *entry;
3280
3281 if (!bacmp(bdaddr, BDADDR_ANY)) {
3282 hci_bdaddr_list_clear(list);
3283 return 0;
3284 }
3285
3286 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3287 if (!entry)
3288 return -ENOENT;
3289
3290 list_del(&entry->list);
3291 kfree(entry);
3292
3293 return 0;
3294 }
3295
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)3296 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3297 u8 type)
3298 {
3299 struct bdaddr_list_with_flags *entry;
3300
3301 if (!bacmp(bdaddr, BDADDR_ANY)) {
3302 hci_bdaddr_list_clear(list);
3303 return 0;
3304 }
3305
3306 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3307 if (!entry)
3308 return -ENOENT;
3309
3310 list_del(&entry->list);
3311 kfree(entry);
3312
3313 return 0;
3314 }
3315
3316 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3317 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3318 bdaddr_t *addr, u8 addr_type)
3319 {
3320 struct hci_conn_params *params;
3321
3322 list_for_each_entry(params, &hdev->le_conn_params, list) {
3323 if (bacmp(¶ms->addr, addr) == 0 &&
3324 params->addr_type == addr_type) {
3325 return params;
3326 }
3327 }
3328
3329 return NULL;
3330 }
3331
3332 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)3333 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3334 bdaddr_t *addr, u8 addr_type)
3335 {
3336 struct hci_conn_params *param;
3337
3338 switch (addr_type) {
3339 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3340 addr_type = ADDR_LE_DEV_PUBLIC;
3341 break;
3342 case ADDR_LE_DEV_RANDOM_RESOLVED:
3343 addr_type = ADDR_LE_DEV_RANDOM;
3344 break;
3345 }
3346
3347 list_for_each_entry(param, list, action) {
3348 if (bacmp(¶m->addr, addr) == 0 &&
3349 param->addr_type == addr_type)
3350 return param;
3351 }
3352
3353 return NULL;
3354 }
3355
3356 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3357 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3358 bdaddr_t *addr, u8 addr_type)
3359 {
3360 struct hci_conn_params *params;
3361
3362 params = hci_conn_params_lookup(hdev, addr, addr_type);
3363 if (params)
3364 return params;
3365
3366 params = kzalloc(sizeof(*params), GFP_KERNEL);
3367 if (!params) {
3368 bt_dev_err(hdev, "out of memory");
3369 return NULL;
3370 }
3371
3372 bacpy(¶ms->addr, addr);
3373 params->addr_type = addr_type;
3374
3375 list_add(¶ms->list, &hdev->le_conn_params);
3376 INIT_LIST_HEAD(¶ms->action);
3377
3378 params->conn_min_interval = hdev->le_conn_min_interval;
3379 params->conn_max_interval = hdev->le_conn_max_interval;
3380 params->conn_latency = hdev->le_conn_latency;
3381 params->supervision_timeout = hdev->le_supv_timeout;
3382 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3383
3384 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3385
3386 return params;
3387 }
3388
hci_conn_params_free(struct hci_conn_params * params)3389 static void hci_conn_params_free(struct hci_conn_params *params)
3390 {
3391 if (params->conn) {
3392 hci_conn_drop(params->conn);
3393 hci_conn_put(params->conn);
3394 }
3395
3396 list_del(¶ms->action);
3397 list_del(¶ms->list);
3398 kfree(params);
3399 }
3400
3401 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3402 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3403 {
3404 struct hci_conn_params *params;
3405
3406 params = hci_conn_params_lookup(hdev, addr, addr_type);
3407 if (!params)
3408 return;
3409
3410 hci_conn_params_free(params);
3411
3412 hci_update_background_scan(hdev);
3413
3414 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3415 }
3416
3417 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3418 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3419 {
3420 struct hci_conn_params *params, *tmp;
3421
3422 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3423 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3424 continue;
3425
3426 /* If trying to estabilish one time connection to disabled
3427 * device, leave the params, but mark them as just once.
3428 */
3429 if (params->explicit_connect) {
3430 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3431 continue;
3432 }
3433
3434 list_del(¶ms->list);
3435 kfree(params);
3436 }
3437
3438 BT_DBG("All LE disabled connection parameters were removed");
3439 }
3440
3441 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3442 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3443 {
3444 struct hci_conn_params *params, *tmp;
3445
3446 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3447 hci_conn_params_free(params);
3448
3449 BT_DBG("All LE connection parameters were removed");
3450 }
3451
3452 /* Copy the Identity Address of the controller.
3453 *
3454 * If the controller has a public BD_ADDR, then by default use that one.
3455 * If this is a LE only controller without a public address, default to
3456 * the static random address.
3457 *
3458 * For debugging purposes it is possible to force controllers with a
3459 * public address to use the static random address instead.
3460 *
3461 * In case BR/EDR has been disabled on a dual-mode controller and
3462 * userspace has configured a static address, then that address
3463 * becomes the identity address instead of the public BR/EDR address.
3464 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3465 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3466 u8 *bdaddr_type)
3467 {
3468 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3469 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3470 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3471 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3472 bacpy(bdaddr, &hdev->static_addr);
3473 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3474 } else {
3475 bacpy(bdaddr, &hdev->bdaddr);
3476 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3477 }
3478 }
3479
hci_suspend_clear_tasks(struct hci_dev * hdev)3480 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3481 {
3482 int i;
3483
3484 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3485 clear_bit(i, hdev->suspend_tasks);
3486
3487 wake_up(&hdev->suspend_wait_q);
3488 }
3489
hci_suspend_wait_event(struct hci_dev * hdev)3490 static int hci_suspend_wait_event(struct hci_dev *hdev)
3491 {
3492 #define WAKE_COND \
3493 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3494 __SUSPEND_NUM_TASKS)
3495
3496 int i;
3497 int ret = wait_event_timeout(hdev->suspend_wait_q,
3498 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3499
3500 if (ret == 0) {
3501 bt_dev_err(hdev, "Timed out waiting for suspend events");
3502 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3503 if (test_bit(i, hdev->suspend_tasks))
3504 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3505 clear_bit(i, hdev->suspend_tasks);
3506 }
3507
3508 ret = -ETIMEDOUT;
3509 } else {
3510 ret = 0;
3511 }
3512
3513 return ret;
3514 }
3515
hci_prepare_suspend(struct work_struct * work)3516 static void hci_prepare_suspend(struct work_struct *work)
3517 {
3518 struct hci_dev *hdev =
3519 container_of(work, struct hci_dev, suspend_prepare);
3520
3521 hci_dev_lock(hdev);
3522 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3523 hci_dev_unlock(hdev);
3524 }
3525
hci_change_suspend_state(struct hci_dev * hdev,enum suspended_state next)3526 static int hci_change_suspend_state(struct hci_dev *hdev,
3527 enum suspended_state next)
3528 {
3529 hdev->suspend_state_next = next;
3530 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3531 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3532 return hci_suspend_wait_event(hdev);
3533 }
3534
hci_clear_wake_reason(struct hci_dev * hdev)3535 static void hci_clear_wake_reason(struct hci_dev *hdev)
3536 {
3537 hci_dev_lock(hdev);
3538
3539 hdev->wake_reason = 0;
3540 bacpy(&hdev->wake_addr, BDADDR_ANY);
3541 hdev->wake_addr_type = 0;
3542
3543 hci_dev_unlock(hdev);
3544 }
3545
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)3546 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3547 void *data)
3548 {
3549 struct hci_dev *hdev =
3550 container_of(nb, struct hci_dev, suspend_notifier);
3551 int ret = 0;
3552 u8 state = BT_RUNNING;
3553
3554 /* If powering down, wait for completion. */
3555 if (mgmt_powering_down(hdev)) {
3556 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3557 ret = hci_suspend_wait_event(hdev);
3558 if (ret)
3559 goto done;
3560 }
3561
3562 /* Suspend notifier should only act on events when powered. */
3563 if (!hdev_is_powered(hdev) ||
3564 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3565 goto done;
3566
3567 if (action == PM_SUSPEND_PREPARE) {
3568 /* Suspend consists of two actions:
3569 * - First, disconnect everything and make the controller not
3570 * connectable (disabling scanning)
3571 * - Second, program event filter/accept list and enable scan
3572 */
3573 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3574 if (!ret)
3575 state = BT_SUSPEND_DISCONNECT;
3576
3577 /* Only configure accept list if disconnect succeeded and wake
3578 * isn't being prevented.
3579 */
3580 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3581 ret = hci_change_suspend_state(hdev,
3582 BT_SUSPEND_CONFIGURE_WAKE);
3583 if (!ret)
3584 state = BT_SUSPEND_CONFIGURE_WAKE;
3585 }
3586
3587 hci_clear_wake_reason(hdev);
3588 mgmt_suspending(hdev, state);
3589
3590 } else if (action == PM_POST_SUSPEND) {
3591 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3592
3593 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3594 hdev->wake_addr_type);
3595 }
3596
3597 done:
3598 /* We always allow suspend even if suspend preparation failed and
3599 * attempt to recover in resume.
3600 */
3601 if (ret)
3602 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3603 action, ret);
3604
3605 return NOTIFY_DONE;
3606 }
3607
3608 /* Alloc HCI device */
hci_alloc_dev(void)3609 struct hci_dev *hci_alloc_dev(void)
3610 {
3611 struct hci_dev *hdev;
3612
3613 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3614 if (!hdev)
3615 return NULL;
3616
3617 if (init_srcu_struct(&hdev->srcu)) {
3618 kfree(hdev);
3619 return NULL;
3620 }
3621
3622 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3623 hdev->esco_type = (ESCO_HV1);
3624 hdev->link_mode = (HCI_LM_ACCEPT);
3625 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3626 hdev->io_capability = 0x03; /* No Input No Output */
3627 hdev->manufacturer = 0xffff; /* Default to internal use */
3628 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3629 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3630 hdev->adv_instance_cnt = 0;
3631 hdev->cur_adv_instance = 0x00;
3632 hdev->adv_instance_timeout = 0;
3633
3634 hdev->advmon_allowlist_duration = 300;
3635 hdev->advmon_no_filter_duration = 500;
3636
3637 hdev->sniff_max_interval = 800;
3638 hdev->sniff_min_interval = 80;
3639
3640 hdev->le_adv_channel_map = 0x07;
3641 hdev->le_adv_min_interval = 0x0800;
3642 hdev->le_adv_max_interval = 0x0800;
3643 hdev->le_scan_interval = 0x0060;
3644 hdev->le_scan_window = 0x0030;
3645 hdev->le_scan_int_suspend = 0x0400;
3646 hdev->le_scan_window_suspend = 0x0012;
3647 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3648 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3649 hdev->le_scan_int_connect = 0x0060;
3650 hdev->le_scan_window_connect = 0x0060;
3651 hdev->le_conn_min_interval = 0x0018;
3652 hdev->le_conn_max_interval = 0x0028;
3653 hdev->le_conn_latency = 0x0000;
3654 hdev->le_supv_timeout = 0x002a;
3655 hdev->le_def_tx_len = 0x001b;
3656 hdev->le_def_tx_time = 0x0148;
3657 hdev->le_max_tx_len = 0x001b;
3658 hdev->le_max_tx_time = 0x0148;
3659 hdev->le_max_rx_len = 0x001b;
3660 hdev->le_max_rx_time = 0x0148;
3661 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3662 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3663 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3664 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3665 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3666 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3667 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3668
3669 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3670 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3671 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3672 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3673 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3674 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3675
3676 /* default 1.28 sec page scan */
3677 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3678 hdev->def_page_scan_int = 0x0800;
3679 hdev->def_page_scan_window = 0x0012;
3680
3681 mutex_init(&hdev->lock);
3682 mutex_init(&hdev->req_lock);
3683 mutex_init(&hdev->mgmt_pending_lock);
3684
3685 INIT_LIST_HEAD(&hdev->mgmt_pending);
3686 INIT_LIST_HEAD(&hdev->reject_list);
3687 INIT_LIST_HEAD(&hdev->accept_list);
3688 INIT_LIST_HEAD(&hdev->uuids);
3689 INIT_LIST_HEAD(&hdev->link_keys);
3690 INIT_LIST_HEAD(&hdev->long_term_keys);
3691 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3692 INIT_LIST_HEAD(&hdev->remote_oob_data);
3693 INIT_LIST_HEAD(&hdev->le_accept_list);
3694 INIT_LIST_HEAD(&hdev->le_resolv_list);
3695 INIT_LIST_HEAD(&hdev->le_conn_params);
3696 INIT_LIST_HEAD(&hdev->pend_le_conns);
3697 INIT_LIST_HEAD(&hdev->pend_le_reports);
3698 INIT_LIST_HEAD(&hdev->conn_hash.list);
3699 INIT_LIST_HEAD(&hdev->adv_instances);
3700 INIT_LIST_HEAD(&hdev->blocked_keys);
3701
3702 INIT_WORK(&hdev->rx_work, hci_rx_work);
3703 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3704 INIT_WORK(&hdev->tx_work, hci_tx_work);
3705 INIT_WORK(&hdev->power_on, hci_power_on);
3706 INIT_WORK(&hdev->error_reset, hci_error_reset);
3707 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3708
3709 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3710
3711 skb_queue_head_init(&hdev->rx_q);
3712 skb_queue_head_init(&hdev->cmd_q);
3713 skb_queue_head_init(&hdev->raw_q);
3714
3715 init_waitqueue_head(&hdev->req_wait_q);
3716 init_waitqueue_head(&hdev->suspend_wait_q);
3717
3718 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3719
3720 hci_request_setup(hdev);
3721
3722 hci_init_sysfs(hdev);
3723 discovery_init(hdev);
3724
3725 return hdev;
3726 }
3727 EXPORT_SYMBOL(hci_alloc_dev);
3728
3729 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3730 void hci_free_dev(struct hci_dev *hdev)
3731 {
3732 /* will free via device release */
3733 put_device(&hdev->dev);
3734 }
3735 EXPORT_SYMBOL(hci_free_dev);
3736
3737 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3738 int hci_register_dev(struct hci_dev *hdev)
3739 {
3740 int id, error;
3741
3742 if (!hdev->open || !hdev->close || !hdev->send)
3743 return -EINVAL;
3744
3745 /* Do not allow HCI_AMP devices to register at index 0,
3746 * so the index can be used as the AMP controller ID.
3747 */
3748 switch (hdev->dev_type) {
3749 case HCI_PRIMARY:
3750 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3751 break;
3752 case HCI_AMP:
3753 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3754 break;
3755 default:
3756 return -EINVAL;
3757 }
3758
3759 if (id < 0)
3760 return id;
3761
3762 error = dev_set_name(&hdev->dev, "hci%u", id);
3763 if (error)
3764 return error;
3765
3766 hdev->name = dev_name(&hdev->dev);
3767 hdev->id = id;
3768
3769 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3770
3771 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3772 if (!hdev->workqueue) {
3773 error = -ENOMEM;
3774 goto err;
3775 }
3776
3777 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3778 hdev->name);
3779 if (!hdev->req_workqueue) {
3780 destroy_workqueue(hdev->workqueue);
3781 error = -ENOMEM;
3782 goto err;
3783 }
3784
3785 if (!IS_ERR_OR_NULL(bt_debugfs))
3786 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3787
3788 error = device_add(&hdev->dev);
3789 if (error < 0)
3790 goto err_wqueue;
3791
3792 hci_leds_init(hdev);
3793
3794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3796 hdev);
3797 if (hdev->rfkill) {
3798 if (rfkill_register(hdev->rfkill) < 0) {
3799 rfkill_destroy(hdev->rfkill);
3800 hdev->rfkill = NULL;
3801 }
3802 }
3803
3804 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3805 hci_dev_set_flag(hdev, HCI_RFKILLED);
3806
3807 hci_dev_set_flag(hdev, HCI_SETUP);
3808 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3809
3810 if (hdev->dev_type == HCI_PRIMARY) {
3811 /* Assume BR/EDR support until proven otherwise (such as
3812 * through reading supported features during init.
3813 */
3814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3815 }
3816
3817 write_lock(&hci_dev_list_lock);
3818 list_add(&hdev->list, &hci_dev_list);
3819 write_unlock(&hci_dev_list_lock);
3820
3821 /* Devices that are marked for raw-only usage are unconfigured
3822 * and should not be included in normal operation.
3823 */
3824 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3825 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3826
3827 hci_sock_dev_event(hdev, HCI_DEV_REG);
3828 hci_dev_hold(hdev);
3829
3830 if (!hdev->suspend_notifier.notifier_call &&
3831 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3832 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3833 error = register_pm_notifier(&hdev->suspend_notifier);
3834 if (error)
3835 goto err_wqueue;
3836 }
3837
3838 queue_work(hdev->req_workqueue, &hdev->power_on);
3839
3840 idr_init(&hdev->adv_monitors_idr);
3841
3842 return id;
3843
3844 err_wqueue:
3845 debugfs_remove_recursive(hdev->debugfs);
3846 destroy_workqueue(hdev->workqueue);
3847 destroy_workqueue(hdev->req_workqueue);
3848 err:
3849 ida_simple_remove(&hci_index_ida, hdev->id);
3850
3851 return error;
3852 }
3853 EXPORT_SYMBOL(hci_register_dev);
3854
3855 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3856 void hci_unregister_dev(struct hci_dev *hdev)
3857 {
3858 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3859
3860 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3861
3862 write_lock(&hci_dev_list_lock);
3863 list_del(&hdev->list);
3864 write_unlock(&hci_dev_list_lock);
3865
3866 synchronize_srcu(&hdev->srcu);
3867 cleanup_srcu_struct(&hdev->srcu);
3868
3869 cancel_work_sync(&hdev->rx_work);
3870 cancel_work_sync(&hdev->cmd_work);
3871 cancel_work_sync(&hdev->tx_work);
3872 cancel_work_sync(&hdev->power_on);
3873 cancel_work_sync(&hdev->error_reset);
3874
3875 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3876 hci_suspend_clear_tasks(hdev);
3877 unregister_pm_notifier(&hdev->suspend_notifier);
3878 cancel_work_sync(&hdev->suspend_prepare);
3879 }
3880
3881 hci_dev_do_close(hdev);
3882
3883 if (!test_bit(HCI_INIT, &hdev->flags) &&
3884 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3885 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3886 hci_dev_lock(hdev);
3887 mgmt_index_removed(hdev);
3888 hci_dev_unlock(hdev);
3889 }
3890
3891 /* mgmt_index_removed should take care of emptying the
3892 * pending list */
3893 BUG_ON(!list_empty(&hdev->mgmt_pending));
3894
3895 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3896
3897 if (hdev->rfkill) {
3898 rfkill_unregister(hdev->rfkill);
3899 rfkill_destroy(hdev->rfkill);
3900 }
3901
3902 device_del(&hdev->dev);
3903 /* Actual cleanup is deferred until hci_cleanup_dev(). */
3904 hci_dev_put(hdev);
3905 }
3906 EXPORT_SYMBOL(hci_unregister_dev);
3907
3908 /* Cleanup HCI device */
hci_cleanup_dev(struct hci_dev * hdev)3909 void hci_cleanup_dev(struct hci_dev *hdev)
3910 {
3911 debugfs_remove_recursive(hdev->debugfs);
3912 kfree_const(hdev->hw_info);
3913 kfree_const(hdev->fw_info);
3914
3915 destroy_workqueue(hdev->workqueue);
3916 destroy_workqueue(hdev->req_workqueue);
3917
3918 hci_dev_lock(hdev);
3919 hci_bdaddr_list_clear(&hdev->reject_list);
3920 hci_bdaddr_list_clear(&hdev->accept_list);
3921 hci_uuids_clear(hdev);
3922 hci_link_keys_clear(hdev);
3923 hci_smp_ltks_clear(hdev);
3924 hci_smp_irks_clear(hdev);
3925 hci_remote_oob_data_clear(hdev);
3926 hci_adv_instances_clear(hdev);
3927 hci_adv_monitors_clear(hdev);
3928 hci_bdaddr_list_clear(&hdev->le_accept_list);
3929 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3930 hci_conn_params_clear_all(hdev);
3931 hci_discovery_filter_clear(hdev);
3932 hci_blocked_keys_clear(hdev);
3933 hci_dev_unlock(hdev);
3934
3935 ida_simple_remove(&hci_index_ida, hdev->id);
3936 }
3937
3938 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3939 int hci_suspend_dev(struct hci_dev *hdev)
3940 {
3941 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3942 return 0;
3943 }
3944 EXPORT_SYMBOL(hci_suspend_dev);
3945
3946 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3947 int hci_resume_dev(struct hci_dev *hdev)
3948 {
3949 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3950 return 0;
3951 }
3952 EXPORT_SYMBOL(hci_resume_dev);
3953
3954 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3955 int hci_reset_dev(struct hci_dev *hdev)
3956 {
3957 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3958 struct sk_buff *skb;
3959
3960 skb = bt_skb_alloc(3, GFP_ATOMIC);
3961 if (!skb)
3962 return -ENOMEM;
3963
3964 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3965 skb_put_data(skb, hw_err, 3);
3966
3967 /* Send Hardware Error to upper stack */
3968 return hci_recv_frame(hdev, skb);
3969 }
3970 EXPORT_SYMBOL(hci_reset_dev);
3971
3972 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3973 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3974 {
3975 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3976 && !test_bit(HCI_INIT, &hdev->flags))) {
3977 kfree_skb(skb);
3978 return -ENXIO;
3979 }
3980
3981 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3982 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3983 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3984 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3985 kfree_skb(skb);
3986 return -EINVAL;
3987 }
3988
3989 /* Incoming skb */
3990 bt_cb(skb)->incoming = 1;
3991
3992 /* Time stamp */
3993 __net_timestamp(skb);
3994
3995 skb_queue_tail(&hdev->rx_q, skb);
3996 queue_work(hdev->workqueue, &hdev->rx_work);
3997
3998 return 0;
3999 }
4000 EXPORT_SYMBOL(hci_recv_frame);
4001
4002 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)4003 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4004 {
4005 /* Mark as diagnostic packet */
4006 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4007
4008 /* Time stamp */
4009 __net_timestamp(skb);
4010
4011 skb_queue_tail(&hdev->rx_q, skb);
4012 queue_work(hdev->workqueue, &hdev->rx_work);
4013
4014 return 0;
4015 }
4016 EXPORT_SYMBOL(hci_recv_diag);
4017
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)4018 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4019 {
4020 va_list vargs;
4021
4022 va_start(vargs, fmt);
4023 kfree_const(hdev->hw_info);
4024 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4025 va_end(vargs);
4026 }
4027 EXPORT_SYMBOL(hci_set_hw_info);
4028
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)4029 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4030 {
4031 va_list vargs;
4032
4033 va_start(vargs, fmt);
4034 kfree_const(hdev->fw_info);
4035 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4036 va_end(vargs);
4037 }
4038 EXPORT_SYMBOL(hci_set_fw_info);
4039
4040 /* ---- Interface to upper protocols ---- */
4041
hci_register_cb(struct hci_cb * cb)4042 int hci_register_cb(struct hci_cb *cb)
4043 {
4044 BT_DBG("%p name %s", cb, cb->name);
4045
4046 mutex_lock(&hci_cb_list_lock);
4047 list_add_tail(&cb->list, &hci_cb_list);
4048 mutex_unlock(&hci_cb_list_lock);
4049
4050 return 0;
4051 }
4052 EXPORT_SYMBOL(hci_register_cb);
4053
hci_unregister_cb(struct hci_cb * cb)4054 int hci_unregister_cb(struct hci_cb *cb)
4055 {
4056 BT_DBG("%p name %s", cb, cb->name);
4057
4058 mutex_lock(&hci_cb_list_lock);
4059 list_del(&cb->list);
4060 mutex_unlock(&hci_cb_list_lock);
4061
4062 return 0;
4063 }
4064 EXPORT_SYMBOL(hci_unregister_cb);
4065
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)4066 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4067 {
4068 int err;
4069
4070 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4071 skb->len);
4072
4073 /* Time stamp */
4074 __net_timestamp(skb);
4075
4076 /* Send copy to monitor */
4077 hci_send_to_monitor(hdev, skb);
4078
4079 if (atomic_read(&hdev->promisc)) {
4080 /* Send copy to the sockets */
4081 hci_send_to_sock(hdev, skb);
4082 }
4083
4084 /* Get rid of skb owner, prior to sending to the driver. */
4085 skb_orphan(skb);
4086
4087 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4088 kfree_skb(skb);
4089 return;
4090 }
4091
4092 err = hdev->send(hdev, skb);
4093 if (err < 0) {
4094 bt_dev_err(hdev, "sending frame failed (%d)", err);
4095 kfree_skb(skb);
4096 }
4097 }
4098
4099 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)4100 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4101 const void *param)
4102 {
4103 struct sk_buff *skb;
4104
4105 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4106
4107 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4108 if (!skb) {
4109 bt_dev_err(hdev, "no memory for command");
4110 return -ENOMEM;
4111 }
4112
4113 /* Stand-alone HCI commands must be flagged as
4114 * single-command requests.
4115 */
4116 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4117
4118 skb_queue_tail(&hdev->cmd_q, skb);
4119 queue_work(hdev->workqueue, &hdev->cmd_work);
4120
4121 return 0;
4122 }
4123
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)4124 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4125 const void *param)
4126 {
4127 struct sk_buff *skb;
4128
4129 if (hci_opcode_ogf(opcode) != 0x3f) {
4130 /* A controller receiving a command shall respond with either
4131 * a Command Status Event or a Command Complete Event.
4132 * Therefore, all standard HCI commands must be sent via the
4133 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4134 * Some vendors do not comply with this rule for vendor-specific
4135 * commands and do not return any event. We want to support
4136 * unresponded commands for such cases only.
4137 */
4138 bt_dev_err(hdev, "unresponded command not supported");
4139 return -EINVAL;
4140 }
4141
4142 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4143 if (!skb) {
4144 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4145 opcode);
4146 return -ENOMEM;
4147 }
4148
4149 hci_send_frame(hdev, skb);
4150
4151 return 0;
4152 }
4153 EXPORT_SYMBOL(__hci_cmd_send);
4154
4155 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)4156 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4157 {
4158 struct hci_command_hdr *hdr;
4159
4160 if (!hdev->sent_cmd)
4161 return NULL;
4162
4163 hdr = (void *) hdev->sent_cmd->data;
4164
4165 if (hdr->opcode != cpu_to_le16(opcode))
4166 return NULL;
4167
4168 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4169
4170 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4171 }
4172
4173 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)4174 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4175 const void *param, u32 timeout)
4176 {
4177 struct sk_buff *skb;
4178
4179 if (!test_bit(HCI_UP, &hdev->flags))
4180 return ERR_PTR(-ENETDOWN);
4181
4182 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4183
4184 hci_req_sync_lock(hdev);
4185 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4186 hci_req_sync_unlock(hdev);
4187
4188 return skb;
4189 }
4190 EXPORT_SYMBOL(hci_cmd_sync);
4191
4192 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)4193 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4194 {
4195 struct hci_acl_hdr *hdr;
4196 int len = skb->len;
4197
4198 skb_push(skb, HCI_ACL_HDR_SIZE);
4199 skb_reset_transport_header(skb);
4200 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4201 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4202 hdr->dlen = cpu_to_le16(len);
4203 }
4204
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)4205 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4206 struct sk_buff *skb, __u16 flags)
4207 {
4208 struct hci_conn *conn = chan->conn;
4209 struct hci_dev *hdev = conn->hdev;
4210 struct sk_buff *list;
4211
4212 skb->len = skb_headlen(skb);
4213 skb->data_len = 0;
4214
4215 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4216
4217 switch (hdev->dev_type) {
4218 case HCI_PRIMARY:
4219 hci_add_acl_hdr(skb, conn->handle, flags);
4220 break;
4221 case HCI_AMP:
4222 hci_add_acl_hdr(skb, chan->handle, flags);
4223 break;
4224 default:
4225 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4226 return;
4227 }
4228
4229 list = skb_shinfo(skb)->frag_list;
4230 if (!list) {
4231 /* Non fragmented */
4232 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4233
4234 skb_queue_tail(queue, skb);
4235 } else {
4236 /* Fragmented */
4237 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4238
4239 skb_shinfo(skb)->frag_list = NULL;
4240
4241 /* Queue all fragments atomically. We need to use spin_lock_bh
4242 * here because of 6LoWPAN links, as there this function is
4243 * called from softirq and using normal spin lock could cause
4244 * deadlocks.
4245 */
4246 spin_lock_bh(&queue->lock);
4247
4248 __skb_queue_tail(queue, skb);
4249
4250 flags &= ~ACL_START;
4251 flags |= ACL_CONT;
4252 do {
4253 skb = list; list = list->next;
4254
4255 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4256 hci_add_acl_hdr(skb, conn->handle, flags);
4257
4258 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4259
4260 __skb_queue_tail(queue, skb);
4261 } while (list);
4262
4263 spin_unlock_bh(&queue->lock);
4264 }
4265 }
4266
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)4267 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4268 {
4269 struct hci_dev *hdev = chan->conn->hdev;
4270
4271 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4272
4273 hci_queue_acl(chan, &chan->data_q, skb, flags);
4274
4275 queue_work(hdev->workqueue, &hdev->tx_work);
4276 }
4277
4278 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)4279 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4280 {
4281 struct hci_dev *hdev = conn->hdev;
4282 struct hci_sco_hdr hdr;
4283
4284 BT_DBG("%s len %d", hdev->name, skb->len);
4285
4286 hdr.handle = cpu_to_le16(conn->handle);
4287 hdr.dlen = skb->len;
4288
4289 skb_push(skb, HCI_SCO_HDR_SIZE);
4290 skb_reset_transport_header(skb);
4291 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4292
4293 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4294
4295 skb_queue_tail(&conn->data_q, skb);
4296 queue_work(hdev->workqueue, &hdev->tx_work);
4297 }
4298
4299 /* ---- HCI TX task (outgoing data) ---- */
4300
4301 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)4302 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4303 int *quote)
4304 {
4305 struct hci_conn_hash *h = &hdev->conn_hash;
4306 struct hci_conn *conn = NULL, *c;
4307 unsigned int num = 0, min = ~0;
4308
4309 /* We don't have to lock device here. Connections are always
4310 * added and removed with TX task disabled. */
4311
4312 rcu_read_lock();
4313
4314 list_for_each_entry_rcu(c, &h->list, list) {
4315 if (c->type != type || skb_queue_empty(&c->data_q))
4316 continue;
4317
4318 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4319 continue;
4320
4321 num++;
4322
4323 if (c->sent < min) {
4324 min = c->sent;
4325 conn = c;
4326 }
4327
4328 if (hci_conn_num(hdev, type) == num)
4329 break;
4330 }
4331
4332 rcu_read_unlock();
4333
4334 if (conn) {
4335 int cnt, q;
4336
4337 switch (conn->type) {
4338 case ACL_LINK:
4339 cnt = hdev->acl_cnt;
4340 break;
4341 case SCO_LINK:
4342 case ESCO_LINK:
4343 cnt = hdev->sco_cnt;
4344 break;
4345 case LE_LINK:
4346 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4347 break;
4348 default:
4349 cnt = 0;
4350 bt_dev_err(hdev, "unknown link type %d", conn->type);
4351 }
4352
4353 q = cnt / num;
4354 *quote = q ? q : 1;
4355 } else
4356 *quote = 0;
4357
4358 BT_DBG("conn %p quote %d", conn, *quote);
4359 return conn;
4360 }
4361
hci_link_tx_to(struct hci_dev * hdev,__u8 type)4362 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4363 {
4364 struct hci_conn_hash *h = &hdev->conn_hash;
4365 struct hci_conn *c;
4366
4367 bt_dev_err(hdev, "link tx timeout");
4368
4369 rcu_read_lock();
4370
4371 /* Kill stalled connections */
4372 list_for_each_entry_rcu(c, &h->list, list) {
4373 if (c->type == type && c->sent) {
4374 bt_dev_err(hdev, "killing stalled connection %pMR",
4375 &c->dst);
4376 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4377 }
4378 }
4379
4380 rcu_read_unlock();
4381 }
4382
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)4383 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4384 int *quote)
4385 {
4386 struct hci_conn_hash *h = &hdev->conn_hash;
4387 struct hci_chan *chan = NULL;
4388 unsigned int num = 0, min = ~0, cur_prio = 0;
4389 struct hci_conn *conn;
4390 int cnt, q, conn_num = 0;
4391
4392 BT_DBG("%s", hdev->name);
4393
4394 rcu_read_lock();
4395
4396 list_for_each_entry_rcu(conn, &h->list, list) {
4397 struct hci_chan *tmp;
4398
4399 if (conn->type != type)
4400 continue;
4401
4402 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4403 continue;
4404
4405 conn_num++;
4406
4407 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4408 struct sk_buff *skb;
4409
4410 if (skb_queue_empty(&tmp->data_q))
4411 continue;
4412
4413 skb = skb_peek(&tmp->data_q);
4414 if (skb->priority < cur_prio)
4415 continue;
4416
4417 if (skb->priority > cur_prio) {
4418 num = 0;
4419 min = ~0;
4420 cur_prio = skb->priority;
4421 }
4422
4423 num++;
4424
4425 if (conn->sent < min) {
4426 min = conn->sent;
4427 chan = tmp;
4428 }
4429 }
4430
4431 if (hci_conn_num(hdev, type) == conn_num)
4432 break;
4433 }
4434
4435 rcu_read_unlock();
4436
4437 if (!chan)
4438 return NULL;
4439
4440 switch (chan->conn->type) {
4441 case ACL_LINK:
4442 cnt = hdev->acl_cnt;
4443 break;
4444 case AMP_LINK:
4445 cnt = hdev->block_cnt;
4446 break;
4447 case SCO_LINK:
4448 case ESCO_LINK:
4449 cnt = hdev->sco_cnt;
4450 break;
4451 case LE_LINK:
4452 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4453 break;
4454 default:
4455 cnt = 0;
4456 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4457 }
4458
4459 q = cnt / num;
4460 *quote = q ? q : 1;
4461 BT_DBG("chan %p quote %d", chan, *quote);
4462 return chan;
4463 }
4464
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)4465 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4466 {
4467 struct hci_conn_hash *h = &hdev->conn_hash;
4468 struct hci_conn *conn;
4469 int num = 0;
4470
4471 BT_DBG("%s", hdev->name);
4472
4473 rcu_read_lock();
4474
4475 list_for_each_entry_rcu(conn, &h->list, list) {
4476 struct hci_chan *chan;
4477
4478 if (conn->type != type)
4479 continue;
4480
4481 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4482 continue;
4483
4484 num++;
4485
4486 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4487 struct sk_buff *skb;
4488
4489 if (chan->sent) {
4490 chan->sent = 0;
4491 continue;
4492 }
4493
4494 if (skb_queue_empty(&chan->data_q))
4495 continue;
4496
4497 skb = skb_peek(&chan->data_q);
4498 if (skb->priority >= HCI_PRIO_MAX - 1)
4499 continue;
4500
4501 skb->priority = HCI_PRIO_MAX - 1;
4502
4503 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4504 skb->priority);
4505 }
4506
4507 if (hci_conn_num(hdev, type) == num)
4508 break;
4509 }
4510
4511 rcu_read_unlock();
4512
4513 }
4514
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4515 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4516 {
4517 /* Calculate count of blocks used by this packet */
4518 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4519 }
4520
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)4521 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4522 {
4523 unsigned long last_tx;
4524
4525 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4526 return;
4527
4528 switch (type) {
4529 case LE_LINK:
4530 last_tx = hdev->le_last_tx;
4531 break;
4532 default:
4533 last_tx = hdev->acl_last_tx;
4534 break;
4535 }
4536
4537 /* tx timeout must be longer than maximum link supervision timeout
4538 * (40.9 seconds)
4539 */
4540 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4541 hci_link_tx_to(hdev, type);
4542 }
4543
4544 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4545 static void hci_sched_sco(struct hci_dev *hdev)
4546 {
4547 struct hci_conn *conn;
4548 struct sk_buff *skb;
4549 int quote;
4550
4551 BT_DBG("%s", hdev->name);
4552
4553 if (!hci_conn_num(hdev, SCO_LINK))
4554 return;
4555
4556 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4557 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4558 BT_DBG("skb %p len %d", skb, skb->len);
4559 hci_send_frame(hdev, skb);
4560
4561 conn->sent++;
4562 if (conn->sent == ~0)
4563 conn->sent = 0;
4564 }
4565 }
4566 }
4567
hci_sched_esco(struct hci_dev * hdev)4568 static void hci_sched_esco(struct hci_dev *hdev)
4569 {
4570 struct hci_conn *conn;
4571 struct sk_buff *skb;
4572 int quote;
4573
4574 BT_DBG("%s", hdev->name);
4575
4576 if (!hci_conn_num(hdev, ESCO_LINK))
4577 return;
4578
4579 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4580 "e))) {
4581 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4582 BT_DBG("skb %p len %d", skb, skb->len);
4583 hci_send_frame(hdev, skb);
4584
4585 conn->sent++;
4586 if (conn->sent == ~0)
4587 conn->sent = 0;
4588 }
4589 }
4590 }
4591
hci_sched_acl_pkt(struct hci_dev * hdev)4592 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4593 {
4594 unsigned int cnt = hdev->acl_cnt;
4595 struct hci_chan *chan;
4596 struct sk_buff *skb;
4597 int quote;
4598
4599 __check_timeout(hdev, cnt, ACL_LINK);
4600
4601 while (hdev->acl_cnt &&
4602 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4603 u32 priority = (skb_peek(&chan->data_q))->priority;
4604 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4605 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4606 skb->len, skb->priority);
4607
4608 /* Stop if priority has changed */
4609 if (skb->priority < priority)
4610 break;
4611
4612 skb = skb_dequeue(&chan->data_q);
4613
4614 hci_conn_enter_active_mode(chan->conn,
4615 bt_cb(skb)->force_active);
4616
4617 hci_send_frame(hdev, skb);
4618 hdev->acl_last_tx = jiffies;
4619
4620 hdev->acl_cnt--;
4621 chan->sent++;
4622 chan->conn->sent++;
4623
4624 /* Send pending SCO packets right away */
4625 hci_sched_sco(hdev);
4626 hci_sched_esco(hdev);
4627 }
4628 }
4629
4630 if (cnt != hdev->acl_cnt)
4631 hci_prio_recalculate(hdev, ACL_LINK);
4632 }
4633
hci_sched_acl_blk(struct hci_dev * hdev)4634 static void hci_sched_acl_blk(struct hci_dev *hdev)
4635 {
4636 unsigned int cnt = hdev->block_cnt;
4637 struct hci_chan *chan;
4638 struct sk_buff *skb;
4639 int quote;
4640 u8 type;
4641
4642 BT_DBG("%s", hdev->name);
4643
4644 if (hdev->dev_type == HCI_AMP)
4645 type = AMP_LINK;
4646 else
4647 type = ACL_LINK;
4648
4649 __check_timeout(hdev, cnt, type);
4650
4651 while (hdev->block_cnt > 0 &&
4652 (chan = hci_chan_sent(hdev, type, "e))) {
4653 u32 priority = (skb_peek(&chan->data_q))->priority;
4654 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4655 int blocks;
4656
4657 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4658 skb->len, skb->priority);
4659
4660 /* Stop if priority has changed */
4661 if (skb->priority < priority)
4662 break;
4663
4664 skb = skb_dequeue(&chan->data_q);
4665
4666 blocks = __get_blocks(hdev, skb);
4667 if (blocks > hdev->block_cnt)
4668 return;
4669
4670 hci_conn_enter_active_mode(chan->conn,
4671 bt_cb(skb)->force_active);
4672
4673 hci_send_frame(hdev, skb);
4674 hdev->acl_last_tx = jiffies;
4675
4676 hdev->block_cnt -= blocks;
4677 quote -= blocks;
4678
4679 chan->sent += blocks;
4680 chan->conn->sent += blocks;
4681 }
4682 }
4683
4684 if (cnt != hdev->block_cnt)
4685 hci_prio_recalculate(hdev, type);
4686 }
4687
hci_sched_acl(struct hci_dev * hdev)4688 static void hci_sched_acl(struct hci_dev *hdev)
4689 {
4690 BT_DBG("%s", hdev->name);
4691
4692 /* No ACL link over BR/EDR controller */
4693 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4694 return;
4695
4696 /* No AMP link over AMP controller */
4697 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4698 return;
4699
4700 switch (hdev->flow_ctl_mode) {
4701 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4702 hci_sched_acl_pkt(hdev);
4703 break;
4704
4705 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4706 hci_sched_acl_blk(hdev);
4707 break;
4708 }
4709 }
4710
hci_sched_le(struct hci_dev * hdev)4711 static void hci_sched_le(struct hci_dev *hdev)
4712 {
4713 struct hci_chan *chan;
4714 struct sk_buff *skb;
4715 int quote, cnt, tmp;
4716
4717 BT_DBG("%s", hdev->name);
4718
4719 if (!hci_conn_num(hdev, LE_LINK))
4720 return;
4721
4722 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4723
4724 __check_timeout(hdev, cnt, LE_LINK);
4725
4726 tmp = cnt;
4727 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4728 u32 priority = (skb_peek(&chan->data_q))->priority;
4729 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4730 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4731 skb->len, skb->priority);
4732
4733 /* Stop if priority has changed */
4734 if (skb->priority < priority)
4735 break;
4736
4737 skb = skb_dequeue(&chan->data_q);
4738
4739 hci_send_frame(hdev, skb);
4740 hdev->le_last_tx = jiffies;
4741
4742 cnt--;
4743 chan->sent++;
4744 chan->conn->sent++;
4745
4746 /* Send pending SCO packets right away */
4747 hci_sched_sco(hdev);
4748 hci_sched_esco(hdev);
4749 }
4750 }
4751
4752 if (hdev->le_pkts)
4753 hdev->le_cnt = cnt;
4754 else
4755 hdev->acl_cnt = cnt;
4756
4757 if (cnt != tmp)
4758 hci_prio_recalculate(hdev, LE_LINK);
4759 }
4760
hci_tx_work(struct work_struct * work)4761 static void hci_tx_work(struct work_struct *work)
4762 {
4763 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4764 struct sk_buff *skb;
4765
4766 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4767 hdev->sco_cnt, hdev->le_cnt);
4768
4769 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4770 /* Schedule queues and send stuff to HCI driver */
4771 hci_sched_sco(hdev);
4772 hci_sched_esco(hdev);
4773 hci_sched_acl(hdev);
4774 hci_sched_le(hdev);
4775 }
4776
4777 /* Send next queued raw (unknown type) packet */
4778 while ((skb = skb_dequeue(&hdev->raw_q)))
4779 hci_send_frame(hdev, skb);
4780 }
4781
4782 /* ----- HCI RX task (incoming data processing) ----- */
4783
4784 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4785 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4786 {
4787 struct hci_acl_hdr *hdr;
4788 struct hci_conn *conn;
4789 __u16 handle, flags;
4790
4791 hdr = skb_pull_data(skb, sizeof(*hdr));
4792 if (!hdr) {
4793 bt_dev_err(hdev, "ACL packet too small");
4794 goto drop;
4795 }
4796
4797 handle = __le16_to_cpu(hdr->handle);
4798 flags = hci_flags(handle);
4799 handle = hci_handle(handle);
4800
4801 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4802 handle, flags);
4803
4804 hdev->stat.acl_rx++;
4805
4806 hci_dev_lock(hdev);
4807 conn = hci_conn_hash_lookup_handle(hdev, handle);
4808 if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
4809 mgmt_device_connected(hdev, conn, NULL, 0);
4810 hci_dev_unlock(hdev);
4811
4812 if (conn) {
4813 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4814
4815 /* Send to upper protocol */
4816 l2cap_recv_acldata(conn, skb, flags);
4817 return;
4818 } else {
4819 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4820 handle);
4821 }
4822
4823 drop:
4824 kfree_skb(skb);
4825 }
4826
4827 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4828 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4829 {
4830 struct hci_sco_hdr *hdr = (void *) skb->data;
4831 struct hci_conn *conn;
4832 __u16 handle, flags;
4833
4834 skb_pull(skb, HCI_SCO_HDR_SIZE);
4835
4836 handle = __le16_to_cpu(hdr->handle);
4837 flags = hci_flags(handle);
4838 handle = hci_handle(handle);
4839
4840 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4841 handle, flags);
4842
4843 hdev->stat.sco_rx++;
4844
4845 hci_dev_lock(hdev);
4846 conn = hci_conn_hash_lookup_handle(hdev, handle);
4847 hci_dev_unlock(hdev);
4848
4849 if (conn) {
4850 /* Send to upper protocol */
4851 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4852 sco_recv_scodata(conn, skb);
4853 return;
4854 } else {
4855 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4856 handle);
4857 }
4858
4859 kfree_skb(skb);
4860 }
4861
hci_req_is_complete(struct hci_dev * hdev)4862 static bool hci_req_is_complete(struct hci_dev *hdev)
4863 {
4864 struct sk_buff *skb;
4865
4866 skb = skb_peek(&hdev->cmd_q);
4867 if (!skb)
4868 return true;
4869
4870 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4871 }
4872
hci_resend_last(struct hci_dev * hdev)4873 static void hci_resend_last(struct hci_dev *hdev)
4874 {
4875 struct hci_command_hdr *sent;
4876 struct sk_buff *skb;
4877 u16 opcode;
4878
4879 if (!hdev->sent_cmd)
4880 return;
4881
4882 sent = (void *) hdev->sent_cmd->data;
4883 opcode = __le16_to_cpu(sent->opcode);
4884 if (opcode == HCI_OP_RESET)
4885 return;
4886
4887 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4888 if (!skb)
4889 return;
4890
4891 skb_queue_head(&hdev->cmd_q, skb);
4892 queue_work(hdev->workqueue, &hdev->cmd_work);
4893 }
4894
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4895 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4896 hci_req_complete_t *req_complete,
4897 hci_req_complete_skb_t *req_complete_skb)
4898 {
4899 struct sk_buff *skb;
4900 unsigned long flags;
4901
4902 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4903
4904 /* If the completed command doesn't match the last one that was
4905 * sent we need to do special handling of it.
4906 */
4907 if (!hci_sent_cmd_data(hdev, opcode)) {
4908 /* Some CSR based controllers generate a spontaneous
4909 * reset complete event during init and any pending
4910 * command will never be completed. In such a case we
4911 * need to resend whatever was the last sent
4912 * command.
4913 */
4914 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4915 hci_resend_last(hdev);
4916
4917 return;
4918 }
4919
4920 /* If we reach this point this event matches the last command sent */
4921 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4922
4923 /* If the command succeeded and there's still more commands in
4924 * this request the request is not yet complete.
4925 */
4926 if (!status && !hci_req_is_complete(hdev))
4927 return;
4928
4929 /* If this was the last command in a request the complete
4930 * callback would be found in hdev->sent_cmd instead of the
4931 * command queue (hdev->cmd_q).
4932 */
4933 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4934 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4935 return;
4936 }
4937
4938 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4939 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4940 return;
4941 }
4942
4943 /* Remove all pending commands belonging to this request */
4944 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4945 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4946 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4947 __skb_queue_head(&hdev->cmd_q, skb);
4948 break;
4949 }
4950
4951 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4952 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4953 else
4954 *req_complete = bt_cb(skb)->hci.req_complete;
4955 dev_kfree_skb_irq(skb);
4956 }
4957 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4958 }
4959
hci_rx_work(struct work_struct * work)4960 static void hci_rx_work(struct work_struct *work)
4961 {
4962 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4963 struct sk_buff *skb;
4964
4965 BT_DBG("%s", hdev->name);
4966
4967 while ((skb = skb_dequeue(&hdev->rx_q))) {
4968 /* Send copy to monitor */
4969 hci_send_to_monitor(hdev, skb);
4970
4971 if (atomic_read(&hdev->promisc)) {
4972 /* Send copy to the sockets */
4973 hci_send_to_sock(hdev, skb);
4974 }
4975
4976 /* If the device has been opened in HCI_USER_CHANNEL,
4977 * the userspace has exclusive access to device.
4978 * When device is HCI_INIT, we still need to process
4979 * the data packets to the driver in order
4980 * to complete its setup().
4981 */
4982 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4983 !test_bit(HCI_INIT, &hdev->flags)) {
4984 kfree_skb(skb);
4985 continue;
4986 }
4987
4988 if (test_bit(HCI_INIT, &hdev->flags)) {
4989 /* Don't process data packets in this states. */
4990 switch (hci_skb_pkt_type(skb)) {
4991 case HCI_ACLDATA_PKT:
4992 case HCI_SCODATA_PKT:
4993 case HCI_ISODATA_PKT:
4994 kfree_skb(skb);
4995 continue;
4996 }
4997 }
4998
4999 /* Process frame */
5000 switch (hci_skb_pkt_type(skb)) {
5001 case HCI_EVENT_PKT:
5002 BT_DBG("%s Event packet", hdev->name);
5003 hci_event_packet(hdev, skb);
5004 break;
5005
5006 case HCI_ACLDATA_PKT:
5007 BT_DBG("%s ACL data packet", hdev->name);
5008 hci_acldata_packet(hdev, skb);
5009 break;
5010
5011 case HCI_SCODATA_PKT:
5012 BT_DBG("%s SCO data packet", hdev->name);
5013 hci_scodata_packet(hdev, skb);
5014 break;
5015
5016 default:
5017 kfree_skb(skb);
5018 break;
5019 }
5020 }
5021 }
5022
hci_cmd_work(struct work_struct * work)5023 static void hci_cmd_work(struct work_struct *work)
5024 {
5025 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5026 struct sk_buff *skb;
5027
5028 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5029 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5030
5031 /* Send queued commands */
5032 if (atomic_read(&hdev->cmd_cnt)) {
5033 skb = skb_dequeue(&hdev->cmd_q);
5034 if (!skb)
5035 return;
5036
5037 kfree_skb(hdev->sent_cmd);
5038
5039 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5040 if (hdev->sent_cmd) {
5041 if (hci_req_status_pend(hdev))
5042 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5043 atomic_dec(&hdev->cmd_cnt);
5044 hci_send_frame(hdev, skb);
5045 if (test_bit(HCI_RESET, &hdev->flags))
5046 cancel_delayed_work(&hdev->cmd_timer);
5047 else
5048 schedule_delayed_work(&hdev->cmd_timer,
5049 HCI_CMD_TIMEOUT);
5050 } else {
5051 skb_queue_head(&hdev->cmd_q, skb);
5052 queue_work(hdev->workqueue, &hdev->cmd_work);
5053 }
5054 }
5055 }
5056