1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68 {
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80 {
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120 };
121
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124 {
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136 {
137 struct hci_dev *hdev = file->private_data;
138 bool enable;
139 int err;
140
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
144
145 /* When the diagnostic flags are not persistent and the transport
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
149 */
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153 goto done;
154
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
158
159 if (err < 0)
160 return err;
161
162 done:
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176 };
177
hci_debugfs_create_basic(struct hci_dev * hdev)178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186 }
187
hci_reset_req(struct hci_request * req,unsigned long opt)188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190 BT_DBG("%s %ld", req->hdev->name, opt);
191
192 /* Reset device */
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
195 return 0;
196 }
197
bredr_init(struct hci_request * req)198 static void bredr_init(struct hci_request *req)
199 {
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202 /* Read Local Supported Features */
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205 /* Read Local Version */
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208 /* Read BD Address */
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
amp_init1(struct hci_request * req)212 static void amp_init1(struct hci_request *req)
213 {
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216 /* Read Local Version */
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222 /* Read Local AMP Info */
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225 /* Read Data Blk size */
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
amp_init2(struct hci_request * req)235 static int amp_init2(struct hci_request *req)
236 {
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
239 * stage init.
240 */
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244 return 0;
245 }
246
hci_init1_req(struct hci_request * req,unsigned long opt)247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249 struct hci_dev *hdev = req->hdev;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Reset */
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
256
257 switch (hdev->dev_type) {
258 case HCI_PRIMARY:
259 bredr_init(req);
260 break;
261 case HCI_AMP:
262 amp_init1(req);
263 break;
264 default:
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269 return 0;
270 }
271
bredr_setup(struct hci_request * req)272 static void bredr_setup(struct hci_request *req)
273 {
274 __le16 param;
275 __u8 flt_type;
276
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280 /* Read Class of Device */
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283 /* Read Local Name */
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286 /* Read Voice Setting */
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299 /* Connection accept timeout ~20 secs */
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
302 }
303
le_setup(struct hci_request * req)304 static void le_setup(struct hci_request *req)
305 {
306 struct hci_dev *hdev = req->hdev;
307
308 /* Read LE Buffer Size */
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311 /* Read LE Local Supported Features */
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
hci_setup_event_mask(struct hci_request * req)322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324 struct hci_dev *hdev = req->hdev;
325
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 * command otherwise.
329 */
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
334 */
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
340 } else {
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
346
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
350 */
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
355 }
356
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
359 */
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
362 * Complete
363 */
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 }
369 }
370
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
381 }
382
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
409 */
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
414
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
hci_init2_req(struct hci_request * req,unsigned long opt)418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420 struct hci_dev *hdev = req->hdev;
421
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
425 if (lmp_bredr_capable(hdev))
426 bredr_setup(req);
427 else
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430 if (lmp_le_capable(hdev))
431 le_setup(req);
432
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
435 *
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
440 */
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445 if (lmp_ssp_capable(hdev)) {
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
451 */
452 hdev->max_page = 0x01;
453
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455 u8 mode = 0x01;
456
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466 }
467 }
468
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471 u8 mode;
472
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
475 * events.
476 */
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
481
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
491 }
492
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494 u8 enable = 1;
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
497 }
498
499 return 0;
500 }
501
hci_setup_link_policy(struct hci_request * req)502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
hci_set_le_support(struct hci_request * req)521 static void hci_set_le_support(struct hci_request *req)
522 {
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
525
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
528 return;
529
530 memset(&cp, 0, sizeof(cp));
531
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533 cp.le = 0x01;
534 cp.simul = 0x00;
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
540 }
541
hci_set_event_mask_page_2(struct hci_request * req)542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
547
548 /* If Connectionless Peripheral Broadcast central role is supported
549 * enable all necessary events for it.
550 */
551 if (lmp_cpb_central_capable(hdev)) {
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Peripheral Page Response Timeout */
555 events[2] |= 0x20; /* CPB Channel Map Change */
556 changed = true;
557 }
558
559 /* If Connectionless Peripheral Broadcast peripheral role is supported
560 * enable all necessary events for it.
561 */
562 if (lmp_cpb_peripheral_capable(hdev)) {
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CPB Receive */
565 events[2] |= 0x04; /* CPB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
567 changed = true;
568 }
569
570 /* Enable Authenticated Payload Timeout Expired event if supported */
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572 events[2] |= 0x80;
573 changed = true;
574 }
575
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
581 */
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
585 }
586
hci_init3_req(struct hci_request * req,unsigned long opt)587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589 struct hci_dev *hdev = req->hdev;
590 u8 p;
591
592 hci_setup_event_mask(req);
593
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
605
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
616 */
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620 if (lmp_le_capable(hdev)) {
621 u8 events[8];
622
623 memset(events, 0, sizeof(events));
624
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
627
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
633 * Parameter Request
634 */
635
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
638 */
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
641
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
644 */
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
647 * Complete
648 */
649
650 /* If the controller supports Extended Scanner Filter
651 * Policies, enable the corresponding event.
652 */
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
655 * Report
656 */
657
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
660 */
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
663 * Algorithm
664 */
665
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
668 */
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
671
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
674 */
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
677
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
680 */
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
683 * Complete
684 */
685
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
691 * Features Complete
692 */
693
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
696 */
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
700 */
701
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
704 */
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
707
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
710 */
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
713
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
717 */
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
720 * Report
721 */
722
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
725 */
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
728 * Terminated
729 */
730
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732 events);
733
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
741 */
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743 }
744
745 if ((hdev->commands[38] & 0x80) &&
746 !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747 /* Read LE Min/Max Tx Power*/
748 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
749 0, NULL);
750 }
751
752 if (hdev->commands[26] & 0x40) {
753 /* Read LE Accept List Size */
754 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
755 0, NULL);
756 }
757
758 if (hdev->commands[26] & 0x80) {
759 /* Clear LE Accept List */
760 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
761 }
762
763 if (hdev->commands[34] & 0x40) {
764 /* Read LE Resolving List Size */
765 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
766 0, NULL);
767 }
768
769 if (hdev->commands[34] & 0x20) {
770 /* Clear LE Resolving List */
771 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772 }
773
774 if (hdev->commands[35] & 0x04) {
775 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776
777 /* Set RPA timeout */
778 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
779 &rpa_timeout);
780 }
781
782 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783 /* Read LE Maximum Data Length */
784 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785
786 /* Read LE Suggested Default Data Length */
787 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788 }
789
790 if (ext_adv_capable(hdev)) {
791 /* Read LE Number of Supported Advertising Sets */
792 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
793 0, NULL);
794 }
795
796 hci_set_le_support(req);
797 }
798
799 /* Read features beyond page 1 if available */
800 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801 struct hci_cp_read_local_ext_features cp;
802
803 cp.page = p;
804 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805 sizeof(cp), &cp);
806 }
807
808 return 0;
809 }
810
hci_init4_req(struct hci_request * req,unsigned long opt)811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 {
813 struct hci_dev *hdev = req->hdev;
814
815 /* Some Broadcom based Bluetooth controllers do not support the
816 * Delete Stored Link Key command. They are clearly indicating its
817 * absence in the bit mask of supported commands.
818 *
819 * Check the supported commands and only if the command is marked
820 * as supported send it. If not supported assume that the controller
821 * does not have actual support for stored link keys which makes this
822 * command redundant anyway.
823 *
824 * Some controllers indicate that they support handling deleting
825 * stored link keys, but they don't. The quirk lets a driver
826 * just disable this command.
827 */
828 if (hdev->commands[6] & 0x80 &&
829 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830 struct hci_cp_delete_stored_link_key cp;
831
832 bacpy(&cp.bdaddr, BDADDR_ANY);
833 cp.delete_all = 0x01;
834 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
835 sizeof(cp), &cp);
836 }
837
838 /* Set event mask page 2 if the HCI command for it is supported */
839 if (hdev->commands[22] & 0x04)
840 hci_set_event_mask_page_2(req);
841
842 /* Read local codec list if the HCI command is supported */
843 if (hdev->commands[29] & 0x20)
844 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845
846 /* Read local pairing options if the HCI command is supported */
847 if (hdev->commands[41] & 0x08)
848 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849
850 /* Get MWS transport configuration if the HCI command is supported */
851 if (hdev->commands[30] & 0x08)
852 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853
854 /* Check for Synchronization Train support */
855 if (lmp_sync_train_capable(hdev))
856 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857
858 /* Enable Secure Connections if supported and configured */
859 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860 bredr_sc_enabled(hdev)) {
861 u8 support = 0x01;
862
863 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864 sizeof(support), &support);
865 }
866
867 /* Set erroneous data reporting if supported to the wideband speech
868 * setting value
869 */
870 if (hdev->commands[18] & 0x08 &&
871 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872 bool enabled = hci_dev_test_flag(hdev,
873 HCI_WIDEBAND_SPEECH_ENABLED);
874
875 if (enabled !=
876 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877 struct hci_cp_write_def_err_data_reporting cp;
878
879 cp.err_data_reporting = enabled ?
880 ERR_DATA_REPORTING_ENABLED :
881 ERR_DATA_REPORTING_DISABLED;
882
883 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
884 sizeof(cp), &cp);
885 }
886 }
887
888 /* Set Suggested Default Data Length to maximum if supported */
889 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890 struct hci_cp_le_write_def_data_len cp;
891
892 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895 }
896
897 /* Set Default PHY parameters if command is supported */
898 if (hdev->commands[35] & 0x20) {
899 struct hci_cp_le_set_default_phy cp;
900
901 cp.all_phys = 0x00;
902 cp.tx_phys = hdev->le_tx_def_phys;
903 cp.rx_phys = hdev->le_rx_def_phys;
904
905 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
906 }
907
908 return 0;
909 }
910
__hci_init(struct hci_dev * hdev)911 static int __hci_init(struct hci_dev *hdev)
912 {
913 int err;
914
915 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
916 if (err < 0)
917 return err;
918
919 if (hci_dev_test_flag(hdev, HCI_SETUP))
920 hci_debugfs_create_basic(hdev);
921
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
923 if (err < 0)
924 return err;
925
926 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927 * BR/EDR/LE type controllers. AMP controllers only need the
928 * first two stages of init.
929 */
930 if (hdev->dev_type != HCI_PRIMARY)
931 return 0;
932
933 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
934 if (err < 0)
935 return err;
936
937 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
938 if (err < 0)
939 return err;
940
941 /* This function is only called when the controller is actually in
942 * configured state. When the controller is marked as unconfigured,
943 * this initialization procedure is not run.
944 *
945 * It means that it is possible that a controller runs through its
946 * setup phase and then discovers missing settings. If that is the
947 * case, then this function will not be called. It then will only
948 * be called during the config phase.
949 *
950 * So only when in setup phase or config phase, create the debugfs
951 * entries and register the SMP channels.
952 */
953 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954 !hci_dev_test_flag(hdev, HCI_CONFIG))
955 return 0;
956
957 hci_debugfs_create_common(hdev);
958
959 if (lmp_bredr_capable(hdev))
960 hci_debugfs_create_bredr(hdev);
961
962 if (lmp_le_capable(hdev))
963 hci_debugfs_create_le(hdev);
964
965 return 0;
966 }
967
hci_init0_req(struct hci_request * req,unsigned long opt)968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 {
970 struct hci_dev *hdev = req->hdev;
971
972 BT_DBG("%s %ld", hdev->name, opt);
973
974 /* Reset */
975 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976 hci_reset_req(req, 0);
977
978 /* Read Local Version */
979 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980
981 /* Read BD Address */
982 if (hdev->set_bdaddr)
983 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
984
985 return 0;
986 }
987
__hci_unconf_init(struct hci_dev * hdev)988 static int __hci_unconf_init(struct hci_dev *hdev)
989 {
990 int err;
991
992 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993 return 0;
994
995 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
996 if (err < 0)
997 return err;
998
999 if (hci_dev_test_flag(hdev, HCI_SETUP))
1000 hci_debugfs_create_basic(hdev);
1001
1002 return 0;
1003 }
1004
hci_scan_req(struct hci_request * req,unsigned long opt)1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1006 {
1007 __u8 scan = opt;
1008
1009 BT_DBG("%s %x", req->hdev->name, scan);
1010
1011 /* Inquiry and Page scans */
1012 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013 return 0;
1014 }
1015
hci_auth_req(struct hci_request * req,unsigned long opt)1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018 __u8 auth = opt;
1019
1020 BT_DBG("%s %x", req->hdev->name, auth);
1021
1022 /* Authentication */
1023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024 return 0;
1025 }
1026
hci_encrypt_req(struct hci_request * req,unsigned long opt)1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029 __u8 encrypt = opt;
1030
1031 BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033 /* Encryption */
1034 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035 return 0;
1036 }
1037
hci_linkpol_req(struct hci_request * req,unsigned long opt)1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040 __le16 policy = cpu_to_le16(opt);
1041
1042 BT_DBG("%s %x", req->hdev->name, policy);
1043
1044 /* Default link policy */
1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046 return 0;
1047 }
1048
1049 /* Get HCI device by index.
1050 * Device is held on return. */
hci_dev_get(int index)1051 struct hci_dev *hci_dev_get(int index)
1052 {
1053 struct hci_dev *hdev = NULL, *d;
1054
1055 BT_DBG("%d", index);
1056
1057 if (index < 0)
1058 return NULL;
1059
1060 read_lock(&hci_dev_list_lock);
1061 list_for_each_entry(d, &hci_dev_list, list) {
1062 if (d->id == index) {
1063 hdev = hci_dev_hold(d);
1064 break;
1065 }
1066 }
1067 read_unlock(&hci_dev_list_lock);
1068 return hdev;
1069 }
1070
1071 /* ---- Inquiry support ---- */
1072
hci_discovery_active(struct hci_dev * hdev)1073 bool hci_discovery_active(struct hci_dev *hdev)
1074 {
1075 struct discovery_state *discov = &hdev->discovery;
1076
1077 switch (discov->state) {
1078 case DISCOVERY_FINDING:
1079 case DISCOVERY_RESOLVING:
1080 return true;
1081
1082 default:
1083 return false;
1084 }
1085 }
1086
hci_discovery_set_state(struct hci_dev * hdev,int state)1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 {
1089 int old_state = hdev->discovery.state;
1090
1091 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092
1093 if (old_state == state)
1094 return;
1095
1096 hdev->discovery.state = state;
1097
1098 switch (state) {
1099 case DISCOVERY_STOPPED:
1100 hci_update_background_scan(hdev);
1101
1102 if (old_state != DISCOVERY_STARTING)
1103 mgmt_discovering(hdev, 0);
1104 break;
1105 case DISCOVERY_STARTING:
1106 break;
1107 case DISCOVERY_FINDING:
1108 mgmt_discovering(hdev, 1);
1109 break;
1110 case DISCOVERY_RESOLVING:
1111 break;
1112 case DISCOVERY_STOPPING:
1113 break;
1114 }
1115 }
1116
hci_inquiry_cache_flush(struct hci_dev * hdev)1117 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1118 {
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct inquiry_entry *p, *n;
1121
1122 list_for_each_entry_safe(p, n, &cache->all, all) {
1123 list_del(&p->all);
1124 kfree(p);
1125 }
1126
1127 INIT_LIST_HEAD(&cache->unknown);
1128 INIT_LIST_HEAD(&cache->resolve);
1129 }
1130
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1131 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1132 bdaddr_t *bdaddr)
1133 {
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *e;
1136
1137 BT_DBG("cache %p, %pMR", cache, bdaddr);
1138
1139 list_for_each_entry(e, &cache->all, all) {
1140 if (!bacmp(&e->data.bdaddr, bdaddr))
1141 return e;
1142 }
1143
1144 return NULL;
1145 }
1146
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1147 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1148 bdaddr_t *bdaddr)
1149 {
1150 struct discovery_state *cache = &hdev->discovery;
1151 struct inquiry_entry *e;
1152
1153 BT_DBG("cache %p, %pMR", cache, bdaddr);
1154
1155 list_for_each_entry(e, &cache->unknown, list) {
1156 if (!bacmp(&e->data.bdaddr, bdaddr))
1157 return e;
1158 }
1159
1160 return NULL;
1161 }
1162
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1163 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1164 bdaddr_t *bdaddr,
1165 int state)
1166 {
1167 struct discovery_state *cache = &hdev->discovery;
1168 struct inquiry_entry *e;
1169
1170 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1171
1172 list_for_each_entry(e, &cache->resolve, list) {
1173 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1174 return e;
1175 if (!bacmp(&e->data.bdaddr, bdaddr))
1176 return e;
1177 }
1178
1179 return NULL;
1180 }
1181
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1182 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1183 struct inquiry_entry *ie)
1184 {
1185 struct discovery_state *cache = &hdev->discovery;
1186 struct list_head *pos = &cache->resolve;
1187 struct inquiry_entry *p;
1188
1189 list_del(&ie->list);
1190
1191 list_for_each_entry(p, &cache->resolve, list) {
1192 if (p->name_state != NAME_PENDING &&
1193 abs(p->data.rssi) >= abs(ie->data.rssi))
1194 break;
1195 pos = &p->list;
1196 }
1197
1198 list_add(&ie->list, pos);
1199 }
1200
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1201 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1202 bool name_known)
1203 {
1204 struct discovery_state *cache = &hdev->discovery;
1205 struct inquiry_entry *ie;
1206 u32 flags = 0;
1207
1208 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1209
1210 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1211
1212 if (!data->ssp_mode)
1213 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1214
1215 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1216 if (ie) {
1217 if (!ie->data.ssp_mode)
1218 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1219
1220 if (ie->name_state == NAME_NEEDED &&
1221 data->rssi != ie->data.rssi) {
1222 ie->data.rssi = data->rssi;
1223 hci_inquiry_cache_update_resolve(hdev, ie);
1224 }
1225
1226 goto update;
1227 }
1228
1229 /* Entry not in the cache. Add new one. */
1230 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1231 if (!ie) {
1232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1233 goto done;
1234 }
1235
1236 list_add(&ie->all, &cache->all);
1237
1238 if (name_known) {
1239 ie->name_state = NAME_KNOWN;
1240 } else {
1241 ie->name_state = NAME_NOT_KNOWN;
1242 list_add(&ie->list, &cache->unknown);
1243 }
1244
1245 update:
1246 if (name_known && ie->name_state != NAME_KNOWN &&
1247 ie->name_state != NAME_PENDING) {
1248 ie->name_state = NAME_KNOWN;
1249 list_del(&ie->list);
1250 }
1251
1252 memcpy(&ie->data, data, sizeof(*data));
1253 ie->timestamp = jiffies;
1254 cache->timestamp = jiffies;
1255
1256 if (ie->name_state == NAME_NOT_KNOWN)
1257 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1258
1259 done:
1260 return flags;
1261 }
1262
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1263 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1264 {
1265 struct discovery_state *cache = &hdev->discovery;
1266 struct inquiry_info *info = (struct inquiry_info *) buf;
1267 struct inquiry_entry *e;
1268 int copied = 0;
1269
1270 list_for_each_entry(e, &cache->all, all) {
1271 struct inquiry_data *data = &e->data;
1272
1273 if (copied >= num)
1274 break;
1275
1276 bacpy(&info->bdaddr, &data->bdaddr);
1277 info->pscan_rep_mode = data->pscan_rep_mode;
1278 info->pscan_period_mode = data->pscan_period_mode;
1279 info->pscan_mode = data->pscan_mode;
1280 memcpy(info->dev_class, data->dev_class, 3);
1281 info->clock_offset = data->clock_offset;
1282
1283 info++;
1284 copied++;
1285 }
1286
1287 BT_DBG("cache %p, copied %d", cache, copied);
1288 return copied;
1289 }
1290
hci_inq_req(struct hci_request * req,unsigned long opt)1291 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1292 {
1293 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1294 struct hci_dev *hdev = req->hdev;
1295 struct hci_cp_inquiry cp;
1296
1297 BT_DBG("%s", hdev->name);
1298
1299 if (test_bit(HCI_INQUIRY, &hdev->flags))
1300 return 0;
1301
1302 /* Start Inquiry */
1303 memcpy(&cp.lap, &ir->lap, 3);
1304 cp.length = ir->length;
1305 cp.num_rsp = ir->num_rsp;
1306 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1307
1308 return 0;
1309 }
1310
hci_inquiry(void __user * arg)1311 int hci_inquiry(void __user *arg)
1312 {
1313 __u8 __user *ptr = arg;
1314 struct hci_inquiry_req ir;
1315 struct hci_dev *hdev;
1316 int err = 0, do_inquiry = 0, max_rsp;
1317 long timeo;
1318 __u8 *buf;
1319
1320 if (copy_from_user(&ir, ptr, sizeof(ir)))
1321 return -EFAULT;
1322
1323 hdev = hci_dev_get(ir.dev_id);
1324 if (!hdev)
1325 return -ENODEV;
1326
1327 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1328 err = -EBUSY;
1329 goto done;
1330 }
1331
1332 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1333 err = -EOPNOTSUPP;
1334 goto done;
1335 }
1336
1337 if (hdev->dev_type != HCI_PRIMARY) {
1338 err = -EOPNOTSUPP;
1339 goto done;
1340 }
1341
1342 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1343 err = -EOPNOTSUPP;
1344 goto done;
1345 }
1346
1347 /* Restrict maximum inquiry length to 60 seconds */
1348 if (ir.length > 60) {
1349 err = -EINVAL;
1350 goto done;
1351 }
1352
1353 hci_dev_lock(hdev);
1354 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1355 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1356 hci_inquiry_cache_flush(hdev);
1357 do_inquiry = 1;
1358 }
1359 hci_dev_unlock(hdev);
1360
1361 timeo = ir.length * msecs_to_jiffies(2000);
1362
1363 if (do_inquiry) {
1364 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1365 timeo, NULL);
1366 if (err < 0)
1367 goto done;
1368
1369 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1370 * cleared). If it is interrupted by a signal, return -EINTR.
1371 */
1372 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1373 TASK_INTERRUPTIBLE)) {
1374 err = -EINTR;
1375 goto done;
1376 }
1377 }
1378
1379 /* for unlimited number of responses we will use buffer with
1380 * 255 entries
1381 */
1382 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1383
1384 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385 * copy it to the user space.
1386 */
1387 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1388 if (!buf) {
1389 err = -ENOMEM;
1390 goto done;
1391 }
1392
1393 hci_dev_lock(hdev);
1394 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1395 hci_dev_unlock(hdev);
1396
1397 BT_DBG("num_rsp %d", ir.num_rsp);
1398
1399 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1400 ptr += sizeof(ir);
1401 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1402 ir.num_rsp))
1403 err = -EFAULT;
1404 } else
1405 err = -EFAULT;
1406
1407 kfree(buf);
1408
1409 done:
1410 hci_dev_put(hdev);
1411 return err;
1412 }
1413
1414 /**
1415 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1416 * (BD_ADDR) for a HCI device from
1417 * a firmware node property.
1418 * @hdev: The HCI device
1419 *
1420 * Search the firmware node for 'local-bd-address'.
1421 *
1422 * All-zero BD addresses are rejected, because those could be properties
1423 * that exist in the firmware tables, but were not updated by the firmware. For
1424 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1425 */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)1426 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1427 {
1428 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1429 bdaddr_t ba;
1430 int ret;
1431
1432 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1433 (u8 *)&ba, sizeof(ba));
1434 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1435 return;
1436
1437 bacpy(&hdev->public_addr, &ba);
1438 }
1439
hci_dev_do_open(struct hci_dev * hdev)1440 static int hci_dev_do_open(struct hci_dev *hdev)
1441 {
1442 int ret = 0;
1443
1444 BT_DBG("%s %p", hdev->name, hdev);
1445
1446 hci_req_sync_lock(hdev);
1447
1448 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1449 ret = -ENODEV;
1450 goto done;
1451 }
1452
1453 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1454 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1455 /* Check for rfkill but allow the HCI setup stage to
1456 * proceed (which in itself doesn't cause any RF activity).
1457 */
1458 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1459 ret = -ERFKILL;
1460 goto done;
1461 }
1462
1463 /* Check for valid public address or a configured static
1464 * random address, but let the HCI setup proceed to
1465 * be able to determine if there is a public address
1466 * or not.
1467 *
1468 * In case of user channel usage, it is not important
1469 * if a public address or static random address is
1470 * available.
1471 *
1472 * This check is only valid for BR/EDR controllers
1473 * since AMP controllers do not have an address.
1474 */
1475 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1476 hdev->dev_type == HCI_PRIMARY &&
1477 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1478 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1479 ret = -EADDRNOTAVAIL;
1480 goto done;
1481 }
1482 }
1483
1484 if (test_bit(HCI_UP, &hdev->flags)) {
1485 ret = -EALREADY;
1486 goto done;
1487 }
1488
1489 if (hdev->open(hdev)) {
1490 ret = -EIO;
1491 goto done;
1492 }
1493
1494 set_bit(HCI_RUNNING, &hdev->flags);
1495 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1496
1497 atomic_set(&hdev->cmd_cnt, 1);
1498 set_bit(HCI_INIT, &hdev->flags);
1499
1500 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1501 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1502 bool invalid_bdaddr;
1503
1504 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1505
1506 if (hdev->setup)
1507 ret = hdev->setup(hdev);
1508
1509 /* The transport driver can set the quirk to mark the
1510 * BD_ADDR invalid before creating the HCI device or in
1511 * its setup callback.
1512 */
1513 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1514 &hdev->quirks);
1515
1516 if (ret)
1517 goto setup_failed;
1518
1519 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1520 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1521 hci_dev_get_bd_addr_from_property(hdev);
1522
1523 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1524 hdev->set_bdaddr) {
1525 ret = hdev->set_bdaddr(hdev,
1526 &hdev->public_addr);
1527
1528 /* If setting of the BD_ADDR from the device
1529 * property succeeds, then treat the address
1530 * as valid even if the invalid BD_ADDR
1531 * quirk indicates otherwise.
1532 */
1533 if (!ret)
1534 invalid_bdaddr = false;
1535 }
1536 }
1537
1538 setup_failed:
1539 /* The transport driver can set these quirks before
1540 * creating the HCI device or in its setup callback.
1541 *
1542 * For the invalid BD_ADDR quirk it is possible that
1543 * it becomes a valid address if the bootloader does
1544 * provide it (see above).
1545 *
1546 * In case any of them is set, the controller has to
1547 * start up as unconfigured.
1548 */
1549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1550 invalid_bdaddr)
1551 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1552
1553 /* For an unconfigured controller it is required to
1554 * read at least the version information provided by
1555 * the Read Local Version Information command.
1556 *
1557 * If the set_bdaddr driver callback is provided, then
1558 * also the original Bluetooth public device address
1559 * will be read using the Read BD Address command.
1560 */
1561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1562 ret = __hci_unconf_init(hdev);
1563 }
1564
1565 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1566 /* If public address change is configured, ensure that
1567 * the address gets programmed. If the driver does not
1568 * support changing the public address, fail the power
1569 * on procedure.
1570 */
1571 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1572 hdev->set_bdaddr)
1573 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1574 else
1575 ret = -EADDRNOTAVAIL;
1576 }
1577
1578 if (!ret) {
1579 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581 ret = __hci_init(hdev);
1582 if (!ret && hdev->post_init)
1583 ret = hdev->post_init(hdev);
1584 }
1585 }
1586
1587 /* If the HCI Reset command is clearing all diagnostic settings,
1588 * then they need to be reprogrammed after the init procedure
1589 * completed.
1590 */
1591 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1592 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1594 ret = hdev->set_diag(hdev, true);
1595
1596 msft_do_open(hdev);
1597 aosp_do_open(hdev);
1598
1599 clear_bit(HCI_INIT, &hdev->flags);
1600
1601 if (!ret) {
1602 hci_dev_hold(hdev);
1603 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1604 hci_adv_instances_set_rpa_expired(hdev, true);
1605 set_bit(HCI_UP, &hdev->flags);
1606 hci_sock_dev_event(hdev, HCI_DEV_UP);
1607 hci_leds_update_powered(hdev, true);
1608 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1609 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1610 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1611 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1612 hci_dev_test_flag(hdev, HCI_MGMT) &&
1613 hdev->dev_type == HCI_PRIMARY) {
1614 ret = __hci_req_hci_power_on(hdev);
1615 mgmt_power_on(hdev, ret);
1616 }
1617 } else {
1618 /* Init failed, cleanup */
1619 flush_work(&hdev->tx_work);
1620
1621 /* Since hci_rx_work() is possible to awake new cmd_work
1622 * it should be flushed first to avoid unexpected call of
1623 * hci_cmd_work()
1624 */
1625 flush_work(&hdev->rx_work);
1626 flush_work(&hdev->cmd_work);
1627
1628 skb_queue_purge(&hdev->cmd_q);
1629 skb_queue_purge(&hdev->rx_q);
1630
1631 if (hdev->flush)
1632 hdev->flush(hdev);
1633
1634 if (hdev->sent_cmd) {
1635 cancel_delayed_work_sync(&hdev->cmd_timer);
1636 kfree_skb(hdev->sent_cmd);
1637 hdev->sent_cmd = NULL;
1638 }
1639
1640 clear_bit(HCI_RUNNING, &hdev->flags);
1641 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1642
1643 hdev->close(hdev);
1644 hdev->flags &= BIT(HCI_RAW);
1645 }
1646
1647 done:
1648 hci_req_sync_unlock(hdev);
1649 return ret;
1650 }
1651
1652 /* ---- HCI ioctl helpers ---- */
1653
hci_dev_open(__u16 dev)1654 int hci_dev_open(__u16 dev)
1655 {
1656 struct hci_dev *hdev;
1657 int err;
1658
1659 hdev = hci_dev_get(dev);
1660 if (!hdev)
1661 return -ENODEV;
1662
1663 /* Devices that are marked as unconfigured can only be powered
1664 * up as user channel. Trying to bring them up as normal devices
1665 * will result into a failure. Only user channel operation is
1666 * possible.
1667 *
1668 * When this function is called for a user channel, the flag
1669 * HCI_USER_CHANNEL will be set first before attempting to
1670 * open the device.
1671 */
1672 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1673 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1674 err = -EOPNOTSUPP;
1675 goto done;
1676 }
1677
1678 /* We need to ensure that no other power on/off work is pending
1679 * before proceeding to call hci_dev_do_open. This is
1680 * particularly important if the setup procedure has not yet
1681 * completed.
1682 */
1683 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1684 cancel_delayed_work(&hdev->power_off);
1685
1686 /* After this call it is guaranteed that the setup procedure
1687 * has finished. This means that error conditions like RFKILL
1688 * or no valid public or static random address apply.
1689 */
1690 flush_workqueue(hdev->req_workqueue);
1691
1692 /* For controllers not using the management interface and that
1693 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1694 * so that pairing works for them. Once the management interface
1695 * is in use this bit will be cleared again and userspace has
1696 * to explicitly enable it.
1697 */
1698 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1699 !hci_dev_test_flag(hdev, HCI_MGMT))
1700 hci_dev_set_flag(hdev, HCI_BONDABLE);
1701
1702 err = hci_dev_do_open(hdev);
1703
1704 done:
1705 hci_dev_put(hdev);
1706 return err;
1707 }
1708
1709 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1710 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1711 {
1712 struct hci_conn_params *p;
1713
1714 list_for_each_entry(p, &hdev->le_conn_params, list) {
1715 if (p->conn) {
1716 hci_conn_drop(p->conn);
1717 hci_conn_put(p->conn);
1718 p->conn = NULL;
1719 }
1720 list_del_init(&p->action);
1721 }
1722
1723 BT_DBG("All LE pending actions cleared");
1724 }
1725
hci_dev_do_close(struct hci_dev * hdev)1726 int hci_dev_do_close(struct hci_dev *hdev)
1727 {
1728 bool auto_off;
1729 int err = 0;
1730
1731 BT_DBG("%s %p", hdev->name, hdev);
1732
1733 cancel_delayed_work(&hdev->power_off);
1734 cancel_delayed_work(&hdev->ncmd_timer);
1735
1736 hci_request_cancel_all(hdev);
1737 hci_req_sync_lock(hdev);
1738
1739 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1740 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1741 test_bit(HCI_UP, &hdev->flags)) {
1742 /* Execute vendor specific shutdown routine */
1743 if (hdev->shutdown)
1744 err = hdev->shutdown(hdev);
1745 }
1746
1747 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1748 cancel_delayed_work_sync(&hdev->cmd_timer);
1749 hci_req_sync_unlock(hdev);
1750 return err;
1751 }
1752
1753 hci_leds_update_powered(hdev, false);
1754
1755 /* Flush RX and TX works */
1756 flush_work(&hdev->tx_work);
1757 flush_work(&hdev->rx_work);
1758
1759 if (hdev->discov_timeout > 0) {
1760 hdev->discov_timeout = 0;
1761 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1762 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1763 }
1764
1765 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1766 cancel_delayed_work(&hdev->service_cache);
1767
1768 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1769 struct adv_info *adv_instance;
1770
1771 cancel_delayed_work_sync(&hdev->rpa_expired);
1772
1773 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1774 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1775 }
1776
1777 /* Avoid potential lockdep warnings from the *_flush() calls by
1778 * ensuring the workqueue is empty up front.
1779 */
1780 drain_workqueue(hdev->workqueue);
1781
1782 hci_dev_lock(hdev);
1783
1784 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1785
1786 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1787
1788 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1789 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1790 hci_dev_test_flag(hdev, HCI_MGMT))
1791 __mgmt_power_off(hdev);
1792
1793 hci_inquiry_cache_flush(hdev);
1794 hci_pend_le_actions_clear(hdev);
1795 hci_conn_hash_flush(hdev);
1796 hci_dev_unlock(hdev);
1797
1798 smp_unregister(hdev);
1799
1800 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1801
1802 aosp_do_close(hdev);
1803 msft_do_close(hdev);
1804
1805 if (hdev->flush)
1806 hdev->flush(hdev);
1807
1808 /* Reset device */
1809 skb_queue_purge(&hdev->cmd_q);
1810 atomic_set(&hdev->cmd_cnt, 1);
1811 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1812 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1813 set_bit(HCI_INIT, &hdev->flags);
1814 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1815 clear_bit(HCI_INIT, &hdev->flags);
1816 }
1817
1818 /* flush cmd work */
1819 flush_work(&hdev->cmd_work);
1820
1821 /* Drop queues */
1822 skb_queue_purge(&hdev->rx_q);
1823 skb_queue_purge(&hdev->cmd_q);
1824 skb_queue_purge(&hdev->raw_q);
1825
1826 /* Drop last sent command */
1827 if (hdev->sent_cmd) {
1828 cancel_delayed_work_sync(&hdev->cmd_timer);
1829 kfree_skb(hdev->sent_cmd);
1830 hdev->sent_cmd = NULL;
1831 }
1832
1833 clear_bit(HCI_RUNNING, &hdev->flags);
1834 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1835
1836 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1837 wake_up(&hdev->suspend_wait_q);
1838
1839 /* After this point our queues are empty
1840 * and no tasks are scheduled. */
1841 hdev->close(hdev);
1842
1843 /* Clear flags */
1844 hdev->flags &= BIT(HCI_RAW);
1845 hci_dev_clear_volatile_flags(hdev);
1846
1847 /* Controller radio is available but is currently powered down */
1848 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1849
1850 memset(hdev->eir, 0, sizeof(hdev->eir));
1851 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1852 bacpy(&hdev->random_addr, BDADDR_ANY);
1853
1854 hci_req_sync_unlock(hdev);
1855
1856 hci_dev_put(hdev);
1857 return err;
1858 }
1859
hci_dev_close(__u16 dev)1860 int hci_dev_close(__u16 dev)
1861 {
1862 struct hci_dev *hdev;
1863 int err;
1864
1865 hdev = hci_dev_get(dev);
1866 if (!hdev)
1867 return -ENODEV;
1868
1869 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1870 err = -EBUSY;
1871 goto done;
1872 }
1873
1874 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1875 cancel_delayed_work(&hdev->power_off);
1876
1877 err = hci_dev_do_close(hdev);
1878
1879 done:
1880 hci_dev_put(hdev);
1881 return err;
1882 }
1883
hci_dev_do_reset(struct hci_dev * hdev)1884 static int hci_dev_do_reset(struct hci_dev *hdev)
1885 {
1886 int ret;
1887
1888 BT_DBG("%s %p", hdev->name, hdev);
1889
1890 hci_req_sync_lock(hdev);
1891
1892 /* Drop queues */
1893 skb_queue_purge(&hdev->rx_q);
1894 skb_queue_purge(&hdev->cmd_q);
1895
1896 /* Avoid potential lockdep warnings from the *_flush() calls by
1897 * ensuring the workqueue is empty up front.
1898 */
1899 drain_workqueue(hdev->workqueue);
1900
1901 hci_dev_lock(hdev);
1902 hci_inquiry_cache_flush(hdev);
1903 hci_conn_hash_flush(hdev);
1904 hci_dev_unlock(hdev);
1905
1906 if (hdev->flush)
1907 hdev->flush(hdev);
1908
1909 atomic_set(&hdev->cmd_cnt, 1);
1910 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1911
1912 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1913
1914 hci_req_sync_unlock(hdev);
1915 return ret;
1916 }
1917
hci_dev_reset(__u16 dev)1918 int hci_dev_reset(__u16 dev)
1919 {
1920 struct hci_dev *hdev;
1921 int err;
1922
1923 hdev = hci_dev_get(dev);
1924 if (!hdev)
1925 return -ENODEV;
1926
1927 if (!test_bit(HCI_UP, &hdev->flags)) {
1928 err = -ENETDOWN;
1929 goto done;
1930 }
1931
1932 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1933 err = -EBUSY;
1934 goto done;
1935 }
1936
1937 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1938 err = -EOPNOTSUPP;
1939 goto done;
1940 }
1941
1942 err = hci_dev_do_reset(hdev);
1943
1944 done:
1945 hci_dev_put(hdev);
1946 return err;
1947 }
1948
hci_dev_reset_stat(__u16 dev)1949 int hci_dev_reset_stat(__u16 dev)
1950 {
1951 struct hci_dev *hdev;
1952 int ret = 0;
1953
1954 hdev = hci_dev_get(dev);
1955 if (!hdev)
1956 return -ENODEV;
1957
1958 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1959 ret = -EBUSY;
1960 goto done;
1961 }
1962
1963 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1964 ret = -EOPNOTSUPP;
1965 goto done;
1966 }
1967
1968 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1969
1970 done:
1971 hci_dev_put(hdev);
1972 return ret;
1973 }
1974
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1975 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1976 {
1977 bool conn_changed, discov_changed;
1978
1979 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1980
1981 if ((scan & SCAN_PAGE))
1982 conn_changed = !hci_dev_test_and_set_flag(hdev,
1983 HCI_CONNECTABLE);
1984 else
1985 conn_changed = hci_dev_test_and_clear_flag(hdev,
1986 HCI_CONNECTABLE);
1987
1988 if ((scan & SCAN_INQUIRY)) {
1989 discov_changed = !hci_dev_test_and_set_flag(hdev,
1990 HCI_DISCOVERABLE);
1991 } else {
1992 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1993 discov_changed = hci_dev_test_and_clear_flag(hdev,
1994 HCI_DISCOVERABLE);
1995 }
1996
1997 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1998 return;
1999
2000 if (conn_changed || discov_changed) {
2001 /* In case this was disabled through mgmt */
2002 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2003
2004 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2005 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2006
2007 mgmt_new_settings(hdev);
2008 }
2009 }
2010
hci_dev_cmd(unsigned int cmd,void __user * arg)2011 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2012 {
2013 struct hci_dev *hdev;
2014 struct hci_dev_req dr;
2015 int err = 0;
2016
2017 if (copy_from_user(&dr, arg, sizeof(dr)))
2018 return -EFAULT;
2019
2020 hdev = hci_dev_get(dr.dev_id);
2021 if (!hdev)
2022 return -ENODEV;
2023
2024 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2025 err = -EBUSY;
2026 goto done;
2027 }
2028
2029 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2030 err = -EOPNOTSUPP;
2031 goto done;
2032 }
2033
2034 if (hdev->dev_type != HCI_PRIMARY) {
2035 err = -EOPNOTSUPP;
2036 goto done;
2037 }
2038
2039 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2040 err = -EOPNOTSUPP;
2041 goto done;
2042 }
2043
2044 switch (cmd) {
2045 case HCISETAUTH:
2046 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2047 HCI_INIT_TIMEOUT, NULL);
2048 break;
2049
2050 case HCISETENCRYPT:
2051 if (!lmp_encrypt_capable(hdev)) {
2052 err = -EOPNOTSUPP;
2053 break;
2054 }
2055
2056 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2057 /* Auth must be enabled first */
2058 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2059 HCI_INIT_TIMEOUT, NULL);
2060 if (err)
2061 break;
2062 }
2063
2064 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2065 HCI_INIT_TIMEOUT, NULL);
2066 break;
2067
2068 case HCISETSCAN:
2069 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2070 HCI_INIT_TIMEOUT, NULL);
2071
2072 /* Ensure that the connectable and discoverable states
2073 * get correctly modified as this was a non-mgmt change.
2074 */
2075 if (!err)
2076 hci_update_scan_state(hdev, dr.dev_opt);
2077 break;
2078
2079 case HCISETLINKPOL:
2080 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2081 HCI_INIT_TIMEOUT, NULL);
2082 break;
2083
2084 case HCISETLINKMODE:
2085 hdev->link_mode = ((__u16) dr.dev_opt) &
2086 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2087 break;
2088
2089 case HCISETPTYPE:
2090 if (hdev->pkt_type == (__u16) dr.dev_opt)
2091 break;
2092
2093 hdev->pkt_type = (__u16) dr.dev_opt;
2094 mgmt_phy_configuration_changed(hdev, NULL);
2095 break;
2096
2097 case HCISETACLMTU:
2098 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2099 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2100 break;
2101
2102 case HCISETSCOMTU:
2103 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2104 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2105 break;
2106
2107 default:
2108 err = -EINVAL;
2109 break;
2110 }
2111
2112 done:
2113 hci_dev_put(hdev);
2114 return err;
2115 }
2116
hci_get_dev_list(void __user * arg)2117 int hci_get_dev_list(void __user *arg)
2118 {
2119 struct hci_dev *hdev;
2120 struct hci_dev_list_req *dl;
2121 struct hci_dev_req *dr;
2122 int n = 0, size, err;
2123 __u16 dev_num;
2124
2125 if (get_user(dev_num, (__u16 __user *) arg))
2126 return -EFAULT;
2127
2128 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2129 return -EINVAL;
2130
2131 size = sizeof(*dl) + dev_num * sizeof(*dr);
2132
2133 dl = kzalloc(size, GFP_KERNEL);
2134 if (!dl)
2135 return -ENOMEM;
2136
2137 dr = dl->dev_req;
2138
2139 read_lock(&hci_dev_list_lock);
2140 list_for_each_entry(hdev, &hci_dev_list, list) {
2141 unsigned long flags = hdev->flags;
2142
2143 /* When the auto-off is configured it means the transport
2144 * is running, but in that case still indicate that the
2145 * device is actually down.
2146 */
2147 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2148 flags &= ~BIT(HCI_UP);
2149
2150 (dr + n)->dev_id = hdev->id;
2151 (dr + n)->dev_opt = flags;
2152
2153 if (++n >= dev_num)
2154 break;
2155 }
2156 read_unlock(&hci_dev_list_lock);
2157
2158 dl->dev_num = n;
2159 size = sizeof(*dl) + n * sizeof(*dr);
2160
2161 err = copy_to_user(arg, dl, size);
2162 kfree(dl);
2163
2164 return err ? -EFAULT : 0;
2165 }
2166
hci_get_dev_info(void __user * arg)2167 int hci_get_dev_info(void __user *arg)
2168 {
2169 struct hci_dev *hdev;
2170 struct hci_dev_info di;
2171 unsigned long flags;
2172 int err = 0;
2173
2174 if (copy_from_user(&di, arg, sizeof(di)))
2175 return -EFAULT;
2176
2177 hdev = hci_dev_get(di.dev_id);
2178 if (!hdev)
2179 return -ENODEV;
2180
2181 /* When the auto-off is configured it means the transport
2182 * is running, but in that case still indicate that the
2183 * device is actually down.
2184 */
2185 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2186 flags = hdev->flags & ~BIT(HCI_UP);
2187 else
2188 flags = hdev->flags;
2189
2190 strcpy(di.name, hdev->name);
2191 di.bdaddr = hdev->bdaddr;
2192 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2193 di.flags = flags;
2194 di.pkt_type = hdev->pkt_type;
2195 if (lmp_bredr_capable(hdev)) {
2196 di.acl_mtu = hdev->acl_mtu;
2197 di.acl_pkts = hdev->acl_pkts;
2198 di.sco_mtu = hdev->sco_mtu;
2199 di.sco_pkts = hdev->sco_pkts;
2200 } else {
2201 di.acl_mtu = hdev->le_mtu;
2202 di.acl_pkts = hdev->le_pkts;
2203 di.sco_mtu = 0;
2204 di.sco_pkts = 0;
2205 }
2206 di.link_policy = hdev->link_policy;
2207 di.link_mode = hdev->link_mode;
2208
2209 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2210 memcpy(&di.features, &hdev->features, sizeof(di.features));
2211
2212 if (copy_to_user(arg, &di, sizeof(di)))
2213 err = -EFAULT;
2214
2215 hci_dev_put(hdev);
2216
2217 return err;
2218 }
2219
2220 /* ---- Interface to HCI drivers ---- */
2221
hci_rfkill_set_block(void * data,bool blocked)2222 static int hci_rfkill_set_block(void *data, bool blocked)
2223 {
2224 struct hci_dev *hdev = data;
2225
2226 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2227
2228 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2229 return -EBUSY;
2230
2231 if (blocked) {
2232 hci_dev_set_flag(hdev, HCI_RFKILLED);
2233 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2234 !hci_dev_test_flag(hdev, HCI_CONFIG))
2235 hci_dev_do_close(hdev);
2236 } else {
2237 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2238 }
2239
2240 return 0;
2241 }
2242
2243 static const struct rfkill_ops hci_rfkill_ops = {
2244 .set_block = hci_rfkill_set_block,
2245 };
2246
hci_power_on(struct work_struct * work)2247 static void hci_power_on(struct work_struct *work)
2248 {
2249 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2250 int err;
2251
2252 BT_DBG("%s", hdev->name);
2253
2254 if (test_bit(HCI_UP, &hdev->flags) &&
2255 hci_dev_test_flag(hdev, HCI_MGMT) &&
2256 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2257 cancel_delayed_work(&hdev->power_off);
2258 hci_req_sync_lock(hdev);
2259 err = __hci_req_hci_power_on(hdev);
2260 hci_req_sync_unlock(hdev);
2261 mgmt_power_on(hdev, err);
2262 return;
2263 }
2264
2265 err = hci_dev_do_open(hdev);
2266 if (err < 0) {
2267 hci_dev_lock(hdev);
2268 mgmt_set_powered_failed(hdev, err);
2269 hci_dev_unlock(hdev);
2270 return;
2271 }
2272
2273 /* During the HCI setup phase, a few error conditions are
2274 * ignored and they need to be checked now. If they are still
2275 * valid, it is important to turn the device back off.
2276 */
2277 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2278 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2279 (hdev->dev_type == HCI_PRIMARY &&
2280 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2281 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2282 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2283 hci_dev_do_close(hdev);
2284 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2285 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2286 HCI_AUTO_OFF_TIMEOUT);
2287 }
2288
2289 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2290 /* For unconfigured devices, set the HCI_RAW flag
2291 * so that userspace can easily identify them.
2292 */
2293 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2294 set_bit(HCI_RAW, &hdev->flags);
2295
2296 /* For fully configured devices, this will send
2297 * the Index Added event. For unconfigured devices,
2298 * it will send Unconfigued Index Added event.
2299 *
2300 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2301 * and no event will be send.
2302 */
2303 mgmt_index_added(hdev);
2304 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2305 /* When the controller is now configured, then it
2306 * is important to clear the HCI_RAW flag.
2307 */
2308 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2309 clear_bit(HCI_RAW, &hdev->flags);
2310
2311 /* Powering on the controller with HCI_CONFIG set only
2312 * happens with the transition from unconfigured to
2313 * configured. This will send the Index Added event.
2314 */
2315 mgmt_index_added(hdev);
2316 }
2317 }
2318
hci_power_off(struct work_struct * work)2319 static void hci_power_off(struct work_struct *work)
2320 {
2321 struct hci_dev *hdev = container_of(work, struct hci_dev,
2322 power_off.work);
2323
2324 BT_DBG("%s", hdev->name);
2325
2326 hci_dev_do_close(hdev);
2327 }
2328
hci_error_reset(struct work_struct * work)2329 static void hci_error_reset(struct work_struct *work)
2330 {
2331 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2332
2333 hci_dev_hold(hdev);
2334 BT_DBG("%s", hdev->name);
2335
2336 if (hdev->hw_error)
2337 hdev->hw_error(hdev, hdev->hw_error_code);
2338 else
2339 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2340
2341 if (!hci_dev_do_close(hdev))
2342 hci_dev_do_open(hdev);
2343
2344 hci_dev_put(hdev);
2345 }
2346
hci_uuids_clear(struct hci_dev * hdev)2347 void hci_uuids_clear(struct hci_dev *hdev)
2348 {
2349 struct bt_uuid *uuid, *tmp;
2350
2351 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2352 list_del(&uuid->list);
2353 kfree(uuid);
2354 }
2355 }
2356
hci_link_keys_clear(struct hci_dev * hdev)2357 void hci_link_keys_clear(struct hci_dev *hdev)
2358 {
2359 struct link_key *key, *tmp;
2360
2361 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
2362 list_del_rcu(&key->list);
2363 kfree_rcu(key, rcu);
2364 }
2365 }
2366
hci_smp_ltks_clear(struct hci_dev * hdev)2367 void hci_smp_ltks_clear(struct hci_dev *hdev)
2368 {
2369 struct smp_ltk *k, *tmp;
2370
2371 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2372 list_del_rcu(&k->list);
2373 kfree_rcu(k, rcu);
2374 }
2375 }
2376
hci_smp_irks_clear(struct hci_dev * hdev)2377 void hci_smp_irks_clear(struct hci_dev *hdev)
2378 {
2379 struct smp_irk *k, *tmp;
2380
2381 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2382 list_del_rcu(&k->list);
2383 kfree_rcu(k, rcu);
2384 }
2385 }
2386
hci_blocked_keys_clear(struct hci_dev * hdev)2387 void hci_blocked_keys_clear(struct hci_dev *hdev)
2388 {
2389 struct blocked_key *b, *tmp;
2390
2391 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
2392 list_del_rcu(&b->list);
2393 kfree_rcu(b, rcu);
2394 }
2395 }
2396
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])2397 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2398 {
2399 bool blocked = false;
2400 struct blocked_key *b;
2401
2402 rcu_read_lock();
2403 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2404 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2405 blocked = true;
2406 break;
2407 }
2408 }
2409
2410 rcu_read_unlock();
2411 return blocked;
2412 }
2413
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2414 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2415 {
2416 struct link_key *k;
2417
2418 rcu_read_lock();
2419 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2420 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2421 rcu_read_unlock();
2422
2423 if (hci_is_blocked_key(hdev,
2424 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2425 k->val)) {
2426 bt_dev_warn_ratelimited(hdev,
2427 "Link key blocked for %pMR",
2428 &k->bdaddr);
2429 return NULL;
2430 }
2431
2432 return k;
2433 }
2434 }
2435 rcu_read_unlock();
2436
2437 return NULL;
2438 }
2439
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2440 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2441 u8 key_type, u8 old_key_type)
2442 {
2443 /* Legacy key */
2444 if (key_type < 0x03)
2445 return true;
2446
2447 /* Debug keys are insecure so don't store them persistently */
2448 if (key_type == HCI_LK_DEBUG_COMBINATION)
2449 return false;
2450
2451 /* Changed combination key and there's no previous one */
2452 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2453 return false;
2454
2455 /* Security mode 3 case */
2456 if (!conn)
2457 return true;
2458
2459 /* BR/EDR key derived using SC from an LE link */
2460 if (conn->type == LE_LINK)
2461 return true;
2462
2463 /* Neither local nor remote side had no-bonding as requirement */
2464 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2465 return true;
2466
2467 /* Local side had dedicated bonding as requirement */
2468 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2469 return true;
2470
2471 /* Remote side had dedicated bonding as requirement */
2472 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2473 return true;
2474
2475 /* If none of the above criteria match, then don't store the key
2476 * persistently */
2477 return false;
2478 }
2479
ltk_role(u8 type)2480 static u8 ltk_role(u8 type)
2481 {
2482 if (type == SMP_LTK)
2483 return HCI_ROLE_MASTER;
2484
2485 return HCI_ROLE_SLAVE;
2486 }
2487
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2488 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2489 u8 addr_type, u8 role)
2490 {
2491 struct smp_ltk *k;
2492
2493 rcu_read_lock();
2494 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2495 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2496 continue;
2497
2498 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2499 rcu_read_unlock();
2500
2501 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2502 k->val)) {
2503 bt_dev_warn_ratelimited(hdev,
2504 "LTK blocked for %pMR",
2505 &k->bdaddr);
2506 return NULL;
2507 }
2508
2509 return k;
2510 }
2511 }
2512 rcu_read_unlock();
2513
2514 return NULL;
2515 }
2516
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2517 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2518 {
2519 struct smp_irk *irk_to_return = NULL;
2520 struct smp_irk *irk;
2521
2522 rcu_read_lock();
2523 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2524 if (!bacmp(&irk->rpa, rpa)) {
2525 irk_to_return = irk;
2526 goto done;
2527 }
2528 }
2529
2530 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2531 if (smp_irk_matches(hdev, irk->val, rpa)) {
2532 bacpy(&irk->rpa, rpa);
2533 irk_to_return = irk;
2534 goto done;
2535 }
2536 }
2537
2538 done:
2539 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2540 irk_to_return->val)) {
2541 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2542 &irk_to_return->bdaddr);
2543 irk_to_return = NULL;
2544 }
2545
2546 rcu_read_unlock();
2547
2548 return irk_to_return;
2549 }
2550
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2551 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2552 u8 addr_type)
2553 {
2554 struct smp_irk *irk_to_return = NULL;
2555 struct smp_irk *irk;
2556
2557 /* Identity Address must be public or static random */
2558 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2559 return NULL;
2560
2561 rcu_read_lock();
2562 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2563 if (addr_type == irk->addr_type &&
2564 bacmp(bdaddr, &irk->bdaddr) == 0) {
2565 irk_to_return = irk;
2566 goto done;
2567 }
2568 }
2569
2570 done:
2571
2572 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2573 irk_to_return->val)) {
2574 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2575 &irk_to_return->bdaddr);
2576 irk_to_return = NULL;
2577 }
2578
2579 rcu_read_unlock();
2580
2581 return irk_to_return;
2582 }
2583
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2584 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2585 bdaddr_t *bdaddr, u8 *val, u8 type,
2586 u8 pin_len, bool *persistent)
2587 {
2588 struct link_key *key, *old_key;
2589 u8 old_key_type;
2590
2591 old_key = hci_find_link_key(hdev, bdaddr);
2592 if (old_key) {
2593 old_key_type = old_key->type;
2594 key = old_key;
2595 } else {
2596 old_key_type = conn ? conn->key_type : 0xff;
2597 key = kzalloc(sizeof(*key), GFP_KERNEL);
2598 if (!key)
2599 return NULL;
2600 list_add_rcu(&key->list, &hdev->link_keys);
2601 }
2602
2603 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2604
2605 /* Some buggy controller combinations generate a changed
2606 * combination key for legacy pairing even when there's no
2607 * previous key */
2608 if (type == HCI_LK_CHANGED_COMBINATION &&
2609 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2610 type = HCI_LK_COMBINATION;
2611 if (conn)
2612 conn->key_type = type;
2613 }
2614
2615 bacpy(&key->bdaddr, bdaddr);
2616 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2617 key->pin_len = pin_len;
2618
2619 if (type == HCI_LK_CHANGED_COMBINATION)
2620 key->type = old_key_type;
2621 else
2622 key->type = type;
2623
2624 if (persistent)
2625 *persistent = hci_persistent_key(hdev, conn, type,
2626 old_key_type);
2627
2628 return key;
2629 }
2630
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2631 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2632 u8 addr_type, u8 type, u8 authenticated,
2633 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2634 {
2635 struct smp_ltk *key, *old_key;
2636 u8 role = ltk_role(type);
2637
2638 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2639 if (old_key)
2640 key = old_key;
2641 else {
2642 key = kzalloc(sizeof(*key), GFP_KERNEL);
2643 if (!key)
2644 return NULL;
2645 list_add_rcu(&key->list, &hdev->long_term_keys);
2646 }
2647
2648 bacpy(&key->bdaddr, bdaddr);
2649 key->bdaddr_type = addr_type;
2650 memcpy(key->val, tk, sizeof(key->val));
2651 key->authenticated = authenticated;
2652 key->ediv = ediv;
2653 key->rand = rand;
2654 key->enc_size = enc_size;
2655 key->type = type;
2656
2657 return key;
2658 }
2659
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2660 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2662 {
2663 struct smp_irk *irk;
2664
2665 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2666 if (!irk) {
2667 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2668 if (!irk)
2669 return NULL;
2670
2671 bacpy(&irk->bdaddr, bdaddr);
2672 irk->addr_type = addr_type;
2673
2674 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2675 }
2676
2677 memcpy(irk->val, val, 16);
2678 bacpy(&irk->rpa, rpa);
2679
2680 return irk;
2681 }
2682
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2683 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2684 {
2685 struct link_key *key;
2686
2687 key = hci_find_link_key(hdev, bdaddr);
2688 if (!key)
2689 return -ENOENT;
2690
2691 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2692
2693 list_del_rcu(&key->list);
2694 kfree_rcu(key, rcu);
2695
2696 return 0;
2697 }
2698
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2699 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2700 {
2701 struct smp_ltk *k, *tmp;
2702 int removed = 0;
2703
2704 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2705 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2706 continue;
2707
2708 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2709
2710 list_del_rcu(&k->list);
2711 kfree_rcu(k, rcu);
2712 removed++;
2713 }
2714
2715 return removed ? 0 : -ENOENT;
2716 }
2717
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2718 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2719 {
2720 struct smp_irk *k, *tmp;
2721
2722 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2723 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2724 continue;
2725
2726 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2727
2728 list_del_rcu(&k->list);
2729 kfree_rcu(k, rcu);
2730 }
2731 }
2732
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2733 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2734 {
2735 struct smp_ltk *k;
2736 struct smp_irk *irk;
2737 u8 addr_type;
2738
2739 if (type == BDADDR_BREDR) {
2740 if (hci_find_link_key(hdev, bdaddr))
2741 return true;
2742 return false;
2743 }
2744
2745 /* Convert to HCI addr type which struct smp_ltk uses */
2746 if (type == BDADDR_LE_PUBLIC)
2747 addr_type = ADDR_LE_DEV_PUBLIC;
2748 else
2749 addr_type = ADDR_LE_DEV_RANDOM;
2750
2751 irk = hci_get_irk(hdev, bdaddr, addr_type);
2752 if (irk) {
2753 bdaddr = &irk->bdaddr;
2754 addr_type = irk->addr_type;
2755 }
2756
2757 rcu_read_lock();
2758 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2759 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2760 rcu_read_unlock();
2761 return true;
2762 }
2763 }
2764 rcu_read_unlock();
2765
2766 return false;
2767 }
2768
2769 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2770 static void hci_cmd_timeout(struct work_struct *work)
2771 {
2772 struct hci_dev *hdev = container_of(work, struct hci_dev,
2773 cmd_timer.work);
2774
2775 if (hdev->sent_cmd) {
2776 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2777 u16 opcode = __le16_to_cpu(sent->opcode);
2778
2779 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2780 } else {
2781 bt_dev_err(hdev, "command tx timeout");
2782 }
2783
2784 if (hdev->cmd_timeout)
2785 hdev->cmd_timeout(hdev);
2786
2787 atomic_set(&hdev->cmd_cnt, 1);
2788 queue_work(hdev->workqueue, &hdev->cmd_work);
2789 }
2790
2791 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)2792 static void hci_ncmd_timeout(struct work_struct *work)
2793 {
2794 struct hci_dev *hdev = container_of(work, struct hci_dev,
2795 ncmd_timer.work);
2796
2797 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2798
2799 /* During HCI_INIT phase no events can be injected if the ncmd timer
2800 * triggers since the procedure has its own timeout handling.
2801 */
2802 if (test_bit(HCI_INIT, &hdev->flags))
2803 return;
2804
2805 /* This is an irrecoverable state, inject hardware error event */
2806 hci_reset_dev(hdev);
2807 }
2808
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2809 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2810 bdaddr_t *bdaddr, u8 bdaddr_type)
2811 {
2812 struct oob_data *data;
2813
2814 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2815 if (bacmp(bdaddr, &data->bdaddr) != 0)
2816 continue;
2817 if (data->bdaddr_type != bdaddr_type)
2818 continue;
2819 return data;
2820 }
2821
2822 return NULL;
2823 }
2824
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2825 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2826 u8 bdaddr_type)
2827 {
2828 struct oob_data *data;
2829
2830 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2831 if (!data)
2832 return -ENOENT;
2833
2834 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2835
2836 list_del(&data->list);
2837 kfree(data);
2838
2839 return 0;
2840 }
2841
hci_remote_oob_data_clear(struct hci_dev * hdev)2842 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2843 {
2844 struct oob_data *data, *n;
2845
2846 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2847 list_del(&data->list);
2848 kfree(data);
2849 }
2850 }
2851
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2852 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2853 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2854 u8 *hash256, u8 *rand256)
2855 {
2856 struct oob_data *data;
2857
2858 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2859 if (!data) {
2860 data = kmalloc(sizeof(*data), GFP_KERNEL);
2861 if (!data)
2862 return -ENOMEM;
2863
2864 bacpy(&data->bdaddr, bdaddr);
2865 data->bdaddr_type = bdaddr_type;
2866 list_add(&data->list, &hdev->remote_oob_data);
2867 }
2868
2869 if (hash192 && rand192) {
2870 memcpy(data->hash192, hash192, sizeof(data->hash192));
2871 memcpy(data->rand192, rand192, sizeof(data->rand192));
2872 if (hash256 && rand256)
2873 data->present = 0x03;
2874 } else {
2875 memset(data->hash192, 0, sizeof(data->hash192));
2876 memset(data->rand192, 0, sizeof(data->rand192));
2877 if (hash256 && rand256)
2878 data->present = 0x02;
2879 else
2880 data->present = 0x00;
2881 }
2882
2883 if (hash256 && rand256) {
2884 memcpy(data->hash256, hash256, sizeof(data->hash256));
2885 memcpy(data->rand256, rand256, sizeof(data->rand256));
2886 } else {
2887 memset(data->hash256, 0, sizeof(data->hash256));
2888 memset(data->rand256, 0, sizeof(data->rand256));
2889 if (hash192 && rand192)
2890 data->present = 0x01;
2891 }
2892
2893 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2894
2895 return 0;
2896 }
2897
2898 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2899 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2900 {
2901 struct adv_info *adv_instance;
2902
2903 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2904 if (adv_instance->instance == instance)
2905 return adv_instance;
2906 }
2907
2908 return NULL;
2909 }
2910
2911 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2912 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2913 {
2914 struct adv_info *cur_instance;
2915
2916 cur_instance = hci_find_adv_instance(hdev, instance);
2917 if (!cur_instance)
2918 return NULL;
2919
2920 if (cur_instance == list_last_entry(&hdev->adv_instances,
2921 struct adv_info, list))
2922 return list_first_entry(&hdev->adv_instances,
2923 struct adv_info, list);
2924 else
2925 return list_next_entry(cur_instance, list);
2926 }
2927
2928 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2929 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2930 {
2931 struct adv_info *adv_instance;
2932
2933 adv_instance = hci_find_adv_instance(hdev, instance);
2934 if (!adv_instance)
2935 return -ENOENT;
2936
2937 BT_DBG("%s removing %dMR", hdev->name, instance);
2938
2939 if (hdev->cur_adv_instance == instance) {
2940 if (hdev->adv_instance_timeout) {
2941 cancel_delayed_work(&hdev->adv_instance_expire);
2942 hdev->adv_instance_timeout = 0;
2943 }
2944 hdev->cur_adv_instance = 0x00;
2945 }
2946
2947 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2948
2949 list_del(&adv_instance->list);
2950 kfree(adv_instance);
2951
2952 hdev->adv_instance_cnt--;
2953
2954 return 0;
2955 }
2956
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)2957 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2958 {
2959 struct adv_info *adv_instance, *n;
2960
2961 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2962 adv_instance->rpa_expired = rpa_expired;
2963 }
2964
2965 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2966 void hci_adv_instances_clear(struct hci_dev *hdev)
2967 {
2968 struct adv_info *adv_instance, *n;
2969
2970 if (hdev->adv_instance_timeout) {
2971 cancel_delayed_work(&hdev->adv_instance_expire);
2972 hdev->adv_instance_timeout = 0;
2973 }
2974
2975 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2976 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2977 list_del(&adv_instance->list);
2978 kfree(adv_instance);
2979 }
2980
2981 hdev->adv_instance_cnt = 0;
2982 hdev->cur_adv_instance = 0x00;
2983 }
2984
adv_instance_rpa_expired(struct work_struct * work)2985 static void adv_instance_rpa_expired(struct work_struct *work)
2986 {
2987 struct adv_info *adv_instance = container_of(work, struct adv_info,
2988 rpa_expired_cb.work);
2989
2990 BT_DBG("");
2991
2992 adv_instance->rpa_expired = true;
2993 }
2994
2995 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval)2996 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2997 u16 adv_data_len, u8 *adv_data,
2998 u16 scan_rsp_len, u8 *scan_rsp_data,
2999 u16 timeout, u16 duration, s8 tx_power,
3000 u32 min_interval, u32 max_interval)
3001 {
3002 struct adv_info *adv_instance;
3003
3004 adv_instance = hci_find_adv_instance(hdev, instance);
3005 if (adv_instance) {
3006 memset(adv_instance->adv_data, 0,
3007 sizeof(adv_instance->adv_data));
3008 memset(adv_instance->scan_rsp_data, 0,
3009 sizeof(adv_instance->scan_rsp_data));
3010 } else {
3011 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3012 instance < 1 || instance > hdev->le_num_of_adv_sets)
3013 return -EOVERFLOW;
3014
3015 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3016 if (!adv_instance)
3017 return -ENOMEM;
3018
3019 adv_instance->pending = true;
3020 adv_instance->instance = instance;
3021 list_add(&adv_instance->list, &hdev->adv_instances);
3022 hdev->adv_instance_cnt++;
3023 }
3024
3025 adv_instance->flags = flags;
3026 adv_instance->adv_data_len = adv_data_len;
3027 adv_instance->scan_rsp_len = scan_rsp_len;
3028 adv_instance->min_interval = min_interval;
3029 adv_instance->max_interval = max_interval;
3030 adv_instance->tx_power = tx_power;
3031
3032 if (adv_data_len)
3033 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3034
3035 if (scan_rsp_len)
3036 memcpy(adv_instance->scan_rsp_data,
3037 scan_rsp_data, scan_rsp_len);
3038
3039 adv_instance->timeout = timeout;
3040 adv_instance->remaining_time = timeout;
3041
3042 if (duration == 0)
3043 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3044 else
3045 adv_instance->duration = duration;
3046
3047 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3048 adv_instance_rpa_expired);
3049
3050 BT_DBG("%s for %dMR", hdev->name, instance);
3051
3052 return 0;
3053 }
3054
3055 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)3056 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3057 u16 adv_data_len, u8 *adv_data,
3058 u16 scan_rsp_len, u8 *scan_rsp_data)
3059 {
3060 struct adv_info *adv_instance;
3061
3062 adv_instance = hci_find_adv_instance(hdev, instance);
3063
3064 /* If advertisement doesn't exist, we can't modify its data */
3065 if (!adv_instance)
3066 return -ENOENT;
3067
3068 if (adv_data_len) {
3069 memset(adv_instance->adv_data, 0,
3070 sizeof(adv_instance->adv_data));
3071 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3072 adv_instance->adv_data_len = adv_data_len;
3073 }
3074
3075 if (scan_rsp_len) {
3076 memset(adv_instance->scan_rsp_data, 0,
3077 sizeof(adv_instance->scan_rsp_data));
3078 memcpy(adv_instance->scan_rsp_data,
3079 scan_rsp_data, scan_rsp_len);
3080 adv_instance->scan_rsp_len = scan_rsp_len;
3081 }
3082
3083 return 0;
3084 }
3085
3086 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)3087 void hci_adv_monitors_clear(struct hci_dev *hdev)
3088 {
3089 struct adv_monitor *monitor;
3090 int handle;
3091
3092 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3093 hci_free_adv_monitor(hdev, monitor);
3094
3095 idr_destroy(&hdev->adv_monitors_idr);
3096 }
3097
3098 /* Frees the monitor structure and do some bookkeepings.
3099 * This function requires the caller holds hdev->lock.
3100 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)3101 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3102 {
3103 struct adv_pattern *pattern;
3104 struct adv_pattern *tmp;
3105
3106 if (!monitor)
3107 return;
3108
3109 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3110 list_del(&pattern->list);
3111 kfree(pattern);
3112 }
3113
3114 if (monitor->handle)
3115 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3116
3117 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3118 hdev->adv_monitors_cnt--;
3119 mgmt_adv_monitor_removed(hdev, monitor->handle);
3120 }
3121
3122 kfree(monitor);
3123 }
3124
hci_add_adv_patterns_monitor_complete(struct hci_dev * hdev,u8 status)3125 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3126 {
3127 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3128 }
3129
hci_remove_adv_monitor_complete(struct hci_dev * hdev,u8 status)3130 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3131 {
3132 return mgmt_remove_adv_monitor_complete(hdev, status);
3133 }
3134
3135 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3136 * also attempts to forward the request to the controller.
3137 * Returns true if request is forwarded (result is pending), false otherwise.
3138 * This function requires the caller holds hdev->lock.
3139 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor,int * err)3140 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3141 int *err)
3142 {
3143 int min, max, handle;
3144
3145 *err = 0;
3146
3147 if (!monitor) {
3148 *err = -EINVAL;
3149 return false;
3150 }
3151
3152 min = HCI_MIN_ADV_MONITOR_HANDLE;
3153 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3154 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3155 GFP_KERNEL);
3156 if (handle < 0) {
3157 *err = handle;
3158 return false;
3159 }
3160
3161 monitor->handle = handle;
3162
3163 if (!hdev_is_powered(hdev))
3164 return false;
3165
3166 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3167 case HCI_ADV_MONITOR_EXT_NONE:
3168 hci_update_background_scan(hdev);
3169 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3170 /* Message was not forwarded to controller - not an error */
3171 return false;
3172 case HCI_ADV_MONITOR_EXT_MSFT:
3173 *err = msft_add_monitor_pattern(hdev, monitor);
3174 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3175 *err);
3176 break;
3177 }
3178
3179 return (*err == 0);
3180 }
3181
3182 /* Attempts to tell the controller and free the monitor. If somehow the
3183 * controller doesn't have a corresponding handle, remove anyway.
3184 * Returns true if request is forwarded (result is pending), false otherwise.
3185 * This function requires the caller holds hdev->lock.
3186 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor,u16 handle,int * err)3187 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3188 struct adv_monitor *monitor,
3189 u16 handle, int *err)
3190 {
3191 *err = 0;
3192
3193 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3194 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3195 goto free_monitor;
3196 case HCI_ADV_MONITOR_EXT_MSFT:
3197 *err = msft_remove_monitor(hdev, monitor, handle);
3198 break;
3199 }
3200
3201 /* In case no matching handle registered, just free the monitor */
3202 if (*err == -ENOENT)
3203 goto free_monitor;
3204
3205 return (*err == 0);
3206
3207 free_monitor:
3208 if (*err == -ENOENT)
3209 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3210 monitor->handle);
3211 hci_free_adv_monitor(hdev, monitor);
3212
3213 *err = 0;
3214 return false;
3215 }
3216
3217 /* Returns true if request is forwarded (result is pending), false otherwise.
3218 * This function requires the caller holds hdev->lock.
3219 */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle,int * err)3220 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3221 {
3222 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3223 bool pending;
3224
3225 if (!monitor) {
3226 *err = -EINVAL;
3227 return false;
3228 }
3229
3230 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3231 if (!*err && !pending)
3232 hci_update_background_scan(hdev);
3233
3234 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3235 hdev->name, handle, *err, pending ? "" : "not ");
3236
3237 return pending;
3238 }
3239
3240 /* Returns true if request is forwarded (result is pending), false otherwise.
3241 * This function requires the caller holds hdev->lock.
3242 */
hci_remove_all_adv_monitor(struct hci_dev * hdev,int * err)3243 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3244 {
3245 struct adv_monitor *monitor;
3246 int idr_next_id = 0;
3247 bool pending = false;
3248 bool update = false;
3249
3250 *err = 0;
3251
3252 while (!*err && !pending) {
3253 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3254 if (!monitor)
3255 break;
3256
3257 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3258
3259 if (!*err && !pending)
3260 update = true;
3261 }
3262
3263 if (update)
3264 hci_update_background_scan(hdev);
3265
3266 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3267 hdev->name, *err, pending ? "" : "not ");
3268
3269 return pending;
3270 }
3271
3272 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)3273 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3274 {
3275 return !idr_is_empty(&hdev->adv_monitors_idr);
3276 }
3277
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)3278 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3279 {
3280 if (msft_monitor_supported(hdev))
3281 return HCI_ADV_MONITOR_EXT_MSFT;
3282
3283 return HCI_ADV_MONITOR_EXT_NONE;
3284 }
3285
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3286 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3287 bdaddr_t *bdaddr, u8 type)
3288 {
3289 struct bdaddr_list *b;
3290
3291 list_for_each_entry(b, bdaddr_list, list) {
3292 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3293 return b;
3294 }
3295
3296 return NULL;
3297 }
3298
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3299 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3300 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3301 u8 type)
3302 {
3303 struct bdaddr_list_with_irk *b;
3304
3305 list_for_each_entry(b, bdaddr_list, list) {
3306 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3307 return b;
3308 }
3309
3310 return NULL;
3311 }
3312
3313 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3314 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3315 bdaddr_t *bdaddr, u8 type)
3316 {
3317 struct bdaddr_list_with_flags *b;
3318
3319 list_for_each_entry(b, bdaddr_list, list) {
3320 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3321 return b;
3322 }
3323
3324 return NULL;
3325 }
3326
hci_bdaddr_list_clear(struct list_head * bdaddr_list)3327 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3328 {
3329 struct bdaddr_list *b, *n;
3330
3331 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3332 list_del(&b->list);
3333 kfree(b);
3334 }
3335 }
3336
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)3337 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3338 {
3339 struct bdaddr_list *entry;
3340
3341 if (!bacmp(bdaddr, BDADDR_ANY))
3342 return -EBADF;
3343
3344 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3345 return -EEXIST;
3346
3347 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3348 if (!entry)
3349 return -ENOMEM;
3350
3351 bacpy(&entry->bdaddr, bdaddr);
3352 entry->bdaddr_type = type;
3353
3354 list_add(&entry->list, list);
3355
3356 return 0;
3357 }
3358
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)3359 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3360 u8 type, u8 *peer_irk, u8 *local_irk)
3361 {
3362 struct bdaddr_list_with_irk *entry;
3363
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3366
3367 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3368 return -EEXIST;
3369
3370 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3371 if (!entry)
3372 return -ENOMEM;
3373
3374 bacpy(&entry->bdaddr, bdaddr);
3375 entry->bdaddr_type = type;
3376
3377 if (peer_irk)
3378 memcpy(entry->peer_irk, peer_irk, 16);
3379
3380 if (local_irk)
3381 memcpy(entry->local_irk, local_irk, 16);
3382
3383 list_add(&entry->list, list);
3384
3385 return 0;
3386 }
3387
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)3388 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3389 u8 type, u32 flags)
3390 {
3391 struct bdaddr_list_with_flags *entry;
3392
3393 if (!bacmp(bdaddr, BDADDR_ANY))
3394 return -EBADF;
3395
3396 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3397 return -EEXIST;
3398
3399 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3400 if (!entry)
3401 return -ENOMEM;
3402
3403 bacpy(&entry->bdaddr, bdaddr);
3404 entry->bdaddr_type = type;
3405 entry->current_flags = flags;
3406
3407 list_add(&entry->list, list);
3408
3409 return 0;
3410 }
3411
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)3412 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3413 {
3414 struct bdaddr_list *entry;
3415
3416 if (!bacmp(bdaddr, BDADDR_ANY)) {
3417 hci_bdaddr_list_clear(list);
3418 return 0;
3419 }
3420
3421 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3422 if (!entry)
3423 return -ENOENT;
3424
3425 list_del(&entry->list);
3426 kfree(entry);
3427
3428 return 0;
3429 }
3430
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)3431 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3432 u8 type)
3433 {
3434 struct bdaddr_list_with_irk *entry;
3435
3436 if (!bacmp(bdaddr, BDADDR_ANY)) {
3437 hci_bdaddr_list_clear(list);
3438 return 0;
3439 }
3440
3441 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3442 if (!entry)
3443 return -ENOENT;
3444
3445 list_del(&entry->list);
3446 kfree(entry);
3447
3448 return 0;
3449 }
3450
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)3451 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3452 u8 type)
3453 {
3454 struct bdaddr_list_with_flags *entry;
3455
3456 if (!bacmp(bdaddr, BDADDR_ANY)) {
3457 hci_bdaddr_list_clear(list);
3458 return 0;
3459 }
3460
3461 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3462 if (!entry)
3463 return -ENOENT;
3464
3465 list_del(&entry->list);
3466 kfree(entry);
3467
3468 return 0;
3469 }
3470
3471 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3472 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3473 bdaddr_t *addr, u8 addr_type)
3474 {
3475 struct hci_conn_params *params;
3476
3477 list_for_each_entry(params, &hdev->le_conn_params, list) {
3478 if (bacmp(¶ms->addr, addr) == 0 &&
3479 params->addr_type == addr_type) {
3480 return params;
3481 }
3482 }
3483
3484 return NULL;
3485 }
3486
3487 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)3488 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3489 bdaddr_t *addr, u8 addr_type)
3490 {
3491 struct hci_conn_params *param;
3492
3493 switch (addr_type) {
3494 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3495 addr_type = ADDR_LE_DEV_PUBLIC;
3496 break;
3497 case ADDR_LE_DEV_RANDOM_RESOLVED:
3498 addr_type = ADDR_LE_DEV_RANDOM;
3499 break;
3500 }
3501
3502 list_for_each_entry(param, list, action) {
3503 if (bacmp(¶m->addr, addr) == 0 &&
3504 param->addr_type == addr_type)
3505 return param;
3506 }
3507
3508 return NULL;
3509 }
3510
3511 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3512 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3513 bdaddr_t *addr, u8 addr_type)
3514 {
3515 struct hci_conn_params *params;
3516
3517 params = hci_conn_params_lookup(hdev, addr, addr_type);
3518 if (params)
3519 return params;
3520
3521 params = kzalloc(sizeof(*params), GFP_KERNEL);
3522 if (!params) {
3523 bt_dev_err(hdev, "out of memory");
3524 return NULL;
3525 }
3526
3527 bacpy(¶ms->addr, addr);
3528 params->addr_type = addr_type;
3529
3530 list_add(¶ms->list, &hdev->le_conn_params);
3531 INIT_LIST_HEAD(¶ms->action);
3532
3533 params->conn_min_interval = hdev->le_conn_min_interval;
3534 params->conn_max_interval = hdev->le_conn_max_interval;
3535 params->conn_latency = hdev->le_conn_latency;
3536 params->supervision_timeout = hdev->le_supv_timeout;
3537 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3538
3539 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3540
3541 return params;
3542 }
3543
hci_conn_params_free(struct hci_conn_params * params)3544 static void hci_conn_params_free(struct hci_conn_params *params)
3545 {
3546 if (params->conn) {
3547 hci_conn_drop(params->conn);
3548 hci_conn_put(params->conn);
3549 }
3550
3551 list_del(¶ms->action);
3552 list_del(¶ms->list);
3553 kfree(params);
3554 }
3555
3556 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3557 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3558 {
3559 struct hci_conn_params *params;
3560
3561 params = hci_conn_params_lookup(hdev, addr, addr_type);
3562 if (!params)
3563 return;
3564
3565 hci_conn_params_free(params);
3566
3567 hci_update_background_scan(hdev);
3568
3569 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3570 }
3571
3572 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3573 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3574 {
3575 struct hci_conn_params *params, *tmp;
3576
3577 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3578 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3579 continue;
3580
3581 /* If trying to establish one time connection to disabled
3582 * device, leave the params, but mark them as just once.
3583 */
3584 if (params->explicit_connect) {
3585 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3586 continue;
3587 }
3588
3589 list_del(¶ms->list);
3590 kfree(params);
3591 }
3592
3593 BT_DBG("All LE disabled connection parameters were removed");
3594 }
3595
3596 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3597 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3598 {
3599 struct hci_conn_params *params, *tmp;
3600
3601 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3602 hci_conn_params_free(params);
3603
3604 BT_DBG("All LE connection parameters were removed");
3605 }
3606
3607 /* Copy the Identity Address of the controller.
3608 *
3609 * If the controller has a public BD_ADDR, then by default use that one.
3610 * If this is a LE only controller without a public address, default to
3611 * the static random address.
3612 *
3613 * For debugging purposes it is possible to force controllers with a
3614 * public address to use the static random address instead.
3615 *
3616 * In case BR/EDR has been disabled on a dual-mode controller and
3617 * userspace has configured a static address, then that address
3618 * becomes the identity address instead of the public BR/EDR address.
3619 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3620 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3621 u8 *bdaddr_type)
3622 {
3623 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3624 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3625 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3626 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3627 bacpy(bdaddr, &hdev->static_addr);
3628 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3629 } else {
3630 bacpy(bdaddr, &hdev->bdaddr);
3631 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3632 }
3633 }
3634
hci_suspend_clear_tasks(struct hci_dev * hdev)3635 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3636 {
3637 int i;
3638
3639 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3640 clear_bit(i, hdev->suspend_tasks);
3641
3642 wake_up(&hdev->suspend_wait_q);
3643 }
3644
hci_suspend_wait_event(struct hci_dev * hdev)3645 static int hci_suspend_wait_event(struct hci_dev *hdev)
3646 {
3647 #define WAKE_COND \
3648 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3649 __SUSPEND_NUM_TASKS)
3650
3651 int i;
3652 int ret = wait_event_timeout(hdev->suspend_wait_q,
3653 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3654
3655 if (ret == 0) {
3656 bt_dev_err(hdev, "Timed out waiting for suspend events");
3657 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3658 if (test_bit(i, hdev->suspend_tasks))
3659 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3660 clear_bit(i, hdev->suspend_tasks);
3661 }
3662
3663 ret = -ETIMEDOUT;
3664 } else {
3665 ret = 0;
3666 }
3667
3668 return ret;
3669 }
3670
hci_prepare_suspend(struct work_struct * work)3671 static void hci_prepare_suspend(struct work_struct *work)
3672 {
3673 struct hci_dev *hdev =
3674 container_of(work, struct hci_dev, suspend_prepare);
3675
3676 hci_dev_lock(hdev);
3677 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3678 hci_dev_unlock(hdev);
3679 }
3680
hci_change_suspend_state(struct hci_dev * hdev,enum suspended_state next)3681 static int hci_change_suspend_state(struct hci_dev *hdev,
3682 enum suspended_state next)
3683 {
3684 hdev->suspend_state_next = next;
3685 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3686 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3687 return hci_suspend_wait_event(hdev);
3688 }
3689
hci_clear_wake_reason(struct hci_dev * hdev)3690 static void hci_clear_wake_reason(struct hci_dev *hdev)
3691 {
3692 hci_dev_lock(hdev);
3693
3694 hdev->wake_reason = 0;
3695 bacpy(&hdev->wake_addr, BDADDR_ANY);
3696 hdev->wake_addr_type = 0;
3697
3698 hci_dev_unlock(hdev);
3699 }
3700
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)3701 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3702 void *data)
3703 {
3704 struct hci_dev *hdev =
3705 container_of(nb, struct hci_dev, suspend_notifier);
3706 int ret = 0;
3707 u8 state = BT_RUNNING;
3708
3709 /* If powering down, wait for completion. */
3710 if (mgmt_powering_down(hdev)) {
3711 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3712 ret = hci_suspend_wait_event(hdev);
3713 if (ret)
3714 goto done;
3715 }
3716
3717 /* Suspend notifier should only act on events when powered. */
3718 if (!hdev_is_powered(hdev) ||
3719 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3720 goto done;
3721
3722 if (action == PM_SUSPEND_PREPARE) {
3723 /* Suspend consists of two actions:
3724 * - First, disconnect everything and make the controller not
3725 * connectable (disabling scanning)
3726 * - Second, program event filter/accept list and enable scan
3727 */
3728 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3729 if (!ret)
3730 state = BT_SUSPEND_DISCONNECT;
3731
3732 /* Only configure accept list if disconnect succeeded and wake
3733 * isn't being prevented.
3734 */
3735 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3736 ret = hci_change_suspend_state(hdev,
3737 BT_SUSPEND_CONFIGURE_WAKE);
3738 if (!ret)
3739 state = BT_SUSPEND_CONFIGURE_WAKE;
3740 }
3741
3742 hci_clear_wake_reason(hdev);
3743 mgmt_suspending(hdev, state);
3744
3745 } else if (action == PM_POST_SUSPEND) {
3746 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3747
3748 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3749 hdev->wake_addr_type);
3750 }
3751
3752 done:
3753 /* We always allow suspend even if suspend preparation failed and
3754 * attempt to recover in resume.
3755 */
3756 if (ret)
3757 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3758 action, ret);
3759
3760 return NOTIFY_DONE;
3761 }
3762
3763 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)3764 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3765 {
3766 struct hci_dev *hdev;
3767 unsigned int alloc_size;
3768
3769 alloc_size = sizeof(*hdev);
3770 if (sizeof_priv) {
3771 /* Fixme: May need ALIGN-ment? */
3772 alloc_size += sizeof_priv;
3773 }
3774
3775 hdev = kzalloc(alloc_size, GFP_KERNEL);
3776 if (!hdev)
3777 return NULL;
3778
3779 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3780 hdev->esco_type = (ESCO_HV1);
3781 hdev->link_mode = (HCI_LM_ACCEPT);
3782 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3783 hdev->io_capability = 0x03; /* No Input No Output */
3784 hdev->manufacturer = 0xffff; /* Default to internal use */
3785 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3786 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3787 hdev->adv_instance_cnt = 0;
3788 hdev->cur_adv_instance = 0x00;
3789 hdev->adv_instance_timeout = 0;
3790
3791 hdev->advmon_allowlist_duration = 300;
3792 hdev->advmon_no_filter_duration = 500;
3793 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3794
3795 hdev->sniff_max_interval = 800;
3796 hdev->sniff_min_interval = 80;
3797
3798 hdev->le_adv_channel_map = 0x07;
3799 hdev->le_adv_min_interval = 0x0800;
3800 hdev->le_adv_max_interval = 0x0800;
3801 hdev->le_scan_interval = 0x0060;
3802 hdev->le_scan_window = 0x0030;
3803 hdev->le_scan_int_suspend = 0x0400;
3804 hdev->le_scan_window_suspend = 0x0012;
3805 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3806 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3807 hdev->le_scan_int_adv_monitor = 0x0060;
3808 hdev->le_scan_window_adv_monitor = 0x0030;
3809 hdev->le_scan_int_connect = 0x0060;
3810 hdev->le_scan_window_connect = 0x0060;
3811 hdev->le_conn_min_interval = 0x0018;
3812 hdev->le_conn_max_interval = 0x0028;
3813 hdev->le_conn_latency = 0x0000;
3814 hdev->le_supv_timeout = 0x002a;
3815 hdev->le_def_tx_len = 0x001b;
3816 hdev->le_def_tx_time = 0x0148;
3817 hdev->le_max_tx_len = 0x001b;
3818 hdev->le_max_tx_time = 0x0148;
3819 hdev->le_max_rx_len = 0x001b;
3820 hdev->le_max_rx_time = 0x0148;
3821 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3822 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3823 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3824 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3825 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3826 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3827 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3828 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3829 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3830
3831 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3832 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3833 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3834 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3835 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3836 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3837
3838 /* default 1.28 sec page scan */
3839 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3840 hdev->def_page_scan_int = 0x0800;
3841 hdev->def_page_scan_window = 0x0012;
3842
3843 mutex_init(&hdev->lock);
3844 mutex_init(&hdev->req_lock);
3845
3846 INIT_LIST_HEAD(&hdev->mgmt_pending);
3847 INIT_LIST_HEAD(&hdev->reject_list);
3848 INIT_LIST_HEAD(&hdev->accept_list);
3849 INIT_LIST_HEAD(&hdev->uuids);
3850 INIT_LIST_HEAD(&hdev->link_keys);
3851 INIT_LIST_HEAD(&hdev->long_term_keys);
3852 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3853 INIT_LIST_HEAD(&hdev->remote_oob_data);
3854 INIT_LIST_HEAD(&hdev->le_accept_list);
3855 INIT_LIST_HEAD(&hdev->le_resolv_list);
3856 INIT_LIST_HEAD(&hdev->le_conn_params);
3857 INIT_LIST_HEAD(&hdev->pend_le_conns);
3858 INIT_LIST_HEAD(&hdev->pend_le_reports);
3859 INIT_LIST_HEAD(&hdev->conn_hash.list);
3860 INIT_LIST_HEAD(&hdev->adv_instances);
3861 INIT_LIST_HEAD(&hdev->blocked_keys);
3862
3863 INIT_WORK(&hdev->rx_work, hci_rx_work);
3864 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3865 INIT_WORK(&hdev->tx_work, hci_tx_work);
3866 INIT_WORK(&hdev->power_on, hci_power_on);
3867 INIT_WORK(&hdev->error_reset, hci_error_reset);
3868 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3869
3870 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3871
3872 skb_queue_head_init(&hdev->rx_q);
3873 skb_queue_head_init(&hdev->cmd_q);
3874 skb_queue_head_init(&hdev->raw_q);
3875
3876 init_waitqueue_head(&hdev->req_wait_q);
3877 init_waitqueue_head(&hdev->suspend_wait_q);
3878
3879 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3880 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3881
3882 hci_request_setup(hdev);
3883
3884 hci_init_sysfs(hdev);
3885 discovery_init(hdev);
3886
3887 return hdev;
3888 }
3889 EXPORT_SYMBOL(hci_alloc_dev_priv);
3890
3891 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3892 void hci_free_dev(struct hci_dev *hdev)
3893 {
3894 /* will free via device release */
3895 put_device(&hdev->dev);
3896 }
3897 EXPORT_SYMBOL(hci_free_dev);
3898
3899 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3900 int hci_register_dev(struct hci_dev *hdev)
3901 {
3902 int id, error;
3903
3904 if (!hdev->open || !hdev->close || !hdev->send)
3905 return -EINVAL;
3906
3907 /* Do not allow HCI_AMP devices to register at index 0,
3908 * so the index can be used as the AMP controller ID.
3909 */
3910 switch (hdev->dev_type) {
3911 case HCI_PRIMARY:
3912 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3913 break;
3914 case HCI_AMP:
3915 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3916 break;
3917 default:
3918 return -EINVAL;
3919 }
3920
3921 if (id < 0)
3922 return id;
3923
3924 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3925 hdev->id = id;
3926
3927 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3928
3929 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3930 if (!hdev->workqueue) {
3931 error = -ENOMEM;
3932 goto err;
3933 }
3934
3935 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3936 hdev->name);
3937 if (!hdev->req_workqueue) {
3938 destroy_workqueue(hdev->workqueue);
3939 error = -ENOMEM;
3940 goto err;
3941 }
3942
3943 if (!IS_ERR_OR_NULL(bt_debugfs))
3944 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3945
3946 dev_set_name(&hdev->dev, "%s", hdev->name);
3947
3948 error = device_add(&hdev->dev);
3949 if (error < 0)
3950 goto err_wqueue;
3951
3952 hci_leds_init(hdev);
3953
3954 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3955 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3956 hdev);
3957 if (hdev->rfkill) {
3958 if (rfkill_register(hdev->rfkill) < 0) {
3959 rfkill_destroy(hdev->rfkill);
3960 hdev->rfkill = NULL;
3961 }
3962 }
3963
3964 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3965 hci_dev_set_flag(hdev, HCI_RFKILLED);
3966
3967 hci_dev_set_flag(hdev, HCI_SETUP);
3968 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3969
3970 if (hdev->dev_type == HCI_PRIMARY) {
3971 /* Assume BR/EDR support until proven otherwise (such as
3972 * through reading supported features during init.
3973 */
3974 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3975 }
3976
3977 write_lock(&hci_dev_list_lock);
3978 list_add(&hdev->list, &hci_dev_list);
3979 write_unlock(&hci_dev_list_lock);
3980
3981 /* Devices that are marked for raw-only usage are unconfigured
3982 * and should not be included in normal operation.
3983 */
3984 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3985 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3986
3987 hci_sock_dev_event(hdev, HCI_DEV_REG);
3988 hci_dev_hold(hdev);
3989
3990 if (!hdev->suspend_notifier.notifier_call &&
3991 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3992 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3993 error = register_pm_notifier(&hdev->suspend_notifier);
3994 if (error)
3995 goto err_wqueue;
3996 }
3997
3998 queue_work(hdev->req_workqueue, &hdev->power_on);
3999
4000 idr_init(&hdev->adv_monitors_idr);
4001
4002 return id;
4003
4004 err_wqueue:
4005 debugfs_remove_recursive(hdev->debugfs);
4006 destroy_workqueue(hdev->workqueue);
4007 destroy_workqueue(hdev->req_workqueue);
4008 err:
4009 ida_simple_remove(&hci_index_ida, hdev->id);
4010
4011 return error;
4012 }
4013 EXPORT_SYMBOL(hci_register_dev);
4014
4015 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)4016 void hci_unregister_dev(struct hci_dev *hdev)
4017 {
4018 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4019
4020 hci_dev_set_flag(hdev, HCI_UNREGISTER);
4021
4022 write_lock(&hci_dev_list_lock);
4023 list_del(&hdev->list);
4024 write_unlock(&hci_dev_list_lock);
4025
4026 cancel_work_sync(&hdev->power_on);
4027
4028 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4029 hci_suspend_clear_tasks(hdev);
4030 unregister_pm_notifier(&hdev->suspend_notifier);
4031 cancel_work_sync(&hdev->suspend_prepare);
4032 }
4033
4034 hci_dev_do_close(hdev);
4035
4036 if (!test_bit(HCI_INIT, &hdev->flags) &&
4037 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4038 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4039 hci_dev_lock(hdev);
4040 mgmt_index_removed(hdev);
4041 hci_dev_unlock(hdev);
4042 }
4043
4044 /* mgmt_index_removed should take care of emptying the
4045 * pending list */
4046 BUG_ON(!list_empty(&hdev->mgmt_pending));
4047
4048 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4049
4050 if (hdev->rfkill) {
4051 rfkill_unregister(hdev->rfkill);
4052 rfkill_destroy(hdev->rfkill);
4053 }
4054
4055 device_del(&hdev->dev);
4056 /* Actual cleanup is deferred until hci_release_dev(). */
4057 hci_dev_put(hdev);
4058 }
4059 EXPORT_SYMBOL(hci_unregister_dev);
4060
4061 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)4062 void hci_release_dev(struct hci_dev *hdev)
4063 {
4064 debugfs_remove_recursive(hdev->debugfs);
4065 kfree_const(hdev->hw_info);
4066 kfree_const(hdev->fw_info);
4067
4068 destroy_workqueue(hdev->workqueue);
4069 destroy_workqueue(hdev->req_workqueue);
4070
4071 hci_dev_lock(hdev);
4072 hci_bdaddr_list_clear(&hdev->reject_list);
4073 hci_bdaddr_list_clear(&hdev->accept_list);
4074 hci_uuids_clear(hdev);
4075 hci_link_keys_clear(hdev);
4076 hci_smp_ltks_clear(hdev);
4077 hci_smp_irks_clear(hdev);
4078 hci_remote_oob_data_clear(hdev);
4079 hci_adv_instances_clear(hdev);
4080 hci_adv_monitors_clear(hdev);
4081 hci_bdaddr_list_clear(&hdev->le_accept_list);
4082 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4083 hci_conn_params_clear_all(hdev);
4084 hci_discovery_filter_clear(hdev);
4085 hci_blocked_keys_clear(hdev);
4086 hci_dev_unlock(hdev);
4087
4088 ida_simple_remove(&hci_index_ida, hdev->id);
4089 kfree_skb(hdev->sent_cmd);
4090 kfree(hdev);
4091 }
4092 EXPORT_SYMBOL(hci_release_dev);
4093
4094 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)4095 int hci_suspend_dev(struct hci_dev *hdev)
4096 {
4097 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4098 return 0;
4099 }
4100 EXPORT_SYMBOL(hci_suspend_dev);
4101
4102 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)4103 int hci_resume_dev(struct hci_dev *hdev)
4104 {
4105 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4106 return 0;
4107 }
4108 EXPORT_SYMBOL(hci_resume_dev);
4109
4110 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)4111 int hci_reset_dev(struct hci_dev *hdev)
4112 {
4113 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4114 struct sk_buff *skb;
4115
4116 skb = bt_skb_alloc(3, GFP_ATOMIC);
4117 if (!skb)
4118 return -ENOMEM;
4119
4120 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4121 skb_put_data(skb, hw_err, 3);
4122
4123 bt_dev_err(hdev, "Injecting HCI hardware error event");
4124
4125 /* Send Hardware Error to upper stack */
4126 return hci_recv_frame(hdev, skb);
4127 }
4128 EXPORT_SYMBOL(hci_reset_dev);
4129
4130 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)4131 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4132 {
4133 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4134 && !test_bit(HCI_INIT, &hdev->flags))) {
4135 kfree_skb(skb);
4136 return -ENXIO;
4137 }
4138
4139 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4140 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4141 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4142 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4143 kfree_skb(skb);
4144 return -EINVAL;
4145 }
4146
4147 /* Incoming skb */
4148 bt_cb(skb)->incoming = 1;
4149
4150 /* Time stamp */
4151 __net_timestamp(skb);
4152
4153 skb_queue_tail(&hdev->rx_q, skb);
4154 queue_work(hdev->workqueue, &hdev->rx_work);
4155
4156 return 0;
4157 }
4158 EXPORT_SYMBOL(hci_recv_frame);
4159
4160 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)4161 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4162 {
4163 /* Mark as diagnostic packet */
4164 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4165
4166 /* Time stamp */
4167 __net_timestamp(skb);
4168
4169 skb_queue_tail(&hdev->rx_q, skb);
4170 queue_work(hdev->workqueue, &hdev->rx_work);
4171
4172 return 0;
4173 }
4174 EXPORT_SYMBOL(hci_recv_diag);
4175
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)4176 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4177 {
4178 va_list vargs;
4179
4180 va_start(vargs, fmt);
4181 kfree_const(hdev->hw_info);
4182 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4183 va_end(vargs);
4184 }
4185 EXPORT_SYMBOL(hci_set_hw_info);
4186
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)4187 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4188 {
4189 va_list vargs;
4190
4191 va_start(vargs, fmt);
4192 kfree_const(hdev->fw_info);
4193 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4194 va_end(vargs);
4195 }
4196 EXPORT_SYMBOL(hci_set_fw_info);
4197
4198 /* ---- Interface to upper protocols ---- */
4199
hci_register_cb(struct hci_cb * cb)4200 int hci_register_cb(struct hci_cb *cb)
4201 {
4202 BT_DBG("%p name %s", cb, cb->name);
4203
4204 mutex_lock(&hci_cb_list_lock);
4205 list_add_tail(&cb->list, &hci_cb_list);
4206 mutex_unlock(&hci_cb_list_lock);
4207
4208 return 0;
4209 }
4210 EXPORT_SYMBOL(hci_register_cb);
4211
hci_unregister_cb(struct hci_cb * cb)4212 int hci_unregister_cb(struct hci_cb *cb)
4213 {
4214 BT_DBG("%p name %s", cb, cb->name);
4215
4216 mutex_lock(&hci_cb_list_lock);
4217 list_del(&cb->list);
4218 mutex_unlock(&hci_cb_list_lock);
4219
4220 return 0;
4221 }
4222 EXPORT_SYMBOL(hci_unregister_cb);
4223
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)4224 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4225 {
4226 int err;
4227
4228 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4229 skb->len);
4230
4231 /* Time stamp */
4232 __net_timestamp(skb);
4233
4234 /* Send copy to monitor */
4235 hci_send_to_monitor(hdev, skb);
4236
4237 if (atomic_read(&hdev->promisc)) {
4238 /* Send copy to the sockets */
4239 hci_send_to_sock(hdev, skb);
4240 }
4241
4242 /* Get rid of skb owner, prior to sending to the driver. */
4243 skb_orphan(skb);
4244
4245 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4246 kfree_skb(skb);
4247 return;
4248 }
4249
4250 err = hdev->send(hdev, skb);
4251 if (err < 0) {
4252 bt_dev_err(hdev, "sending frame failed (%d)", err);
4253 kfree_skb(skb);
4254 }
4255 }
4256
4257 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)4258 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4259 const void *param)
4260 {
4261 struct sk_buff *skb;
4262
4263 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4264
4265 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4266 if (!skb) {
4267 bt_dev_err(hdev, "no memory for command");
4268 return -ENOMEM;
4269 }
4270
4271 /* Stand-alone HCI commands must be flagged as
4272 * single-command requests.
4273 */
4274 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4275
4276 skb_queue_tail(&hdev->cmd_q, skb);
4277 queue_work(hdev->workqueue, &hdev->cmd_work);
4278
4279 return 0;
4280 }
4281
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)4282 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4283 const void *param)
4284 {
4285 struct sk_buff *skb;
4286
4287 if (hci_opcode_ogf(opcode) != 0x3f) {
4288 /* A controller receiving a command shall respond with either
4289 * a Command Status Event or a Command Complete Event.
4290 * Therefore, all standard HCI commands must be sent via the
4291 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4292 * Some vendors do not comply with this rule for vendor-specific
4293 * commands and do not return any event. We want to support
4294 * unresponded commands for such cases only.
4295 */
4296 bt_dev_err(hdev, "unresponded command not supported");
4297 return -EINVAL;
4298 }
4299
4300 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4301 if (!skb) {
4302 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4303 opcode);
4304 return -ENOMEM;
4305 }
4306
4307 hci_send_frame(hdev, skb);
4308
4309 return 0;
4310 }
4311 EXPORT_SYMBOL(__hci_cmd_send);
4312
4313 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)4314 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4315 {
4316 struct hci_command_hdr *hdr;
4317
4318 if (!hdev->sent_cmd)
4319 return NULL;
4320
4321 hdr = (void *) hdev->sent_cmd->data;
4322
4323 if (hdr->opcode != cpu_to_le16(opcode))
4324 return NULL;
4325
4326 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4327
4328 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4329 }
4330
4331 /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)4332 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4333 const void *param, u32 timeout)
4334 {
4335 struct sk_buff *skb;
4336
4337 if (!test_bit(HCI_UP, &hdev->flags))
4338 return ERR_PTR(-ENETDOWN);
4339
4340 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4341
4342 hci_req_sync_lock(hdev);
4343 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4344 hci_req_sync_unlock(hdev);
4345
4346 return skb;
4347 }
4348 EXPORT_SYMBOL(hci_cmd_sync);
4349
4350 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)4351 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4352 {
4353 struct hci_acl_hdr *hdr;
4354 int len = skb->len;
4355
4356 skb_push(skb, HCI_ACL_HDR_SIZE);
4357 skb_reset_transport_header(skb);
4358 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4359 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4360 hdr->dlen = cpu_to_le16(len);
4361 }
4362
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)4363 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4364 struct sk_buff *skb, __u16 flags)
4365 {
4366 struct hci_conn *conn = chan->conn;
4367 struct hci_dev *hdev = conn->hdev;
4368 struct sk_buff *list;
4369
4370 skb->len = skb_headlen(skb);
4371 skb->data_len = 0;
4372
4373 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4374
4375 switch (hdev->dev_type) {
4376 case HCI_PRIMARY:
4377 hci_add_acl_hdr(skb, conn->handle, flags);
4378 break;
4379 case HCI_AMP:
4380 hci_add_acl_hdr(skb, chan->handle, flags);
4381 break;
4382 default:
4383 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4384 return;
4385 }
4386
4387 list = skb_shinfo(skb)->frag_list;
4388 if (!list) {
4389 /* Non fragmented */
4390 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4391
4392 skb_queue_tail(queue, skb);
4393 } else {
4394 /* Fragmented */
4395 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4396
4397 skb_shinfo(skb)->frag_list = NULL;
4398
4399 /* Queue all fragments atomically. We need to use spin_lock_bh
4400 * here because of 6LoWPAN links, as there this function is
4401 * called from softirq and using normal spin lock could cause
4402 * deadlocks.
4403 */
4404 spin_lock_bh(&queue->lock);
4405
4406 __skb_queue_tail(queue, skb);
4407
4408 flags &= ~ACL_START;
4409 flags |= ACL_CONT;
4410 do {
4411 skb = list; list = list->next;
4412
4413 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4414 hci_add_acl_hdr(skb, conn->handle, flags);
4415
4416 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4417
4418 __skb_queue_tail(queue, skb);
4419 } while (list);
4420
4421 spin_unlock_bh(&queue->lock);
4422 }
4423 }
4424
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)4425 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4426 {
4427 struct hci_dev *hdev = chan->conn->hdev;
4428
4429 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4430
4431 hci_queue_acl(chan, &chan->data_q, skb, flags);
4432
4433 queue_work(hdev->workqueue, &hdev->tx_work);
4434 }
4435
4436 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)4437 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4438 {
4439 struct hci_dev *hdev = conn->hdev;
4440 struct hci_sco_hdr hdr;
4441
4442 BT_DBG("%s len %d", hdev->name, skb->len);
4443
4444 hdr.handle = cpu_to_le16(conn->handle);
4445 hdr.dlen = skb->len;
4446
4447 skb_push(skb, HCI_SCO_HDR_SIZE);
4448 skb_reset_transport_header(skb);
4449 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4450
4451 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4452
4453 skb_queue_tail(&conn->data_q, skb);
4454 queue_work(hdev->workqueue, &hdev->tx_work);
4455 }
4456
4457 /* ---- HCI TX task (outgoing data) ---- */
4458
4459 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)4460 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4461 int *quote)
4462 {
4463 struct hci_conn_hash *h = &hdev->conn_hash;
4464 struct hci_conn *conn = NULL, *c;
4465 unsigned int num = 0, min = ~0;
4466
4467 /* We don't have to lock device here. Connections are always
4468 * added and removed with TX task disabled. */
4469
4470 rcu_read_lock();
4471
4472 list_for_each_entry_rcu(c, &h->list, list) {
4473 if (c->type != type || skb_queue_empty(&c->data_q))
4474 continue;
4475
4476 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4477 continue;
4478
4479 num++;
4480
4481 if (c->sent < min) {
4482 min = c->sent;
4483 conn = c;
4484 }
4485
4486 if (hci_conn_num(hdev, type) == num)
4487 break;
4488 }
4489
4490 rcu_read_unlock();
4491
4492 if (conn) {
4493 int cnt, q;
4494
4495 switch (conn->type) {
4496 case ACL_LINK:
4497 cnt = hdev->acl_cnt;
4498 break;
4499 case SCO_LINK:
4500 case ESCO_LINK:
4501 cnt = hdev->sco_cnt;
4502 break;
4503 case LE_LINK:
4504 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4505 break;
4506 default:
4507 cnt = 0;
4508 bt_dev_err(hdev, "unknown link type %d", conn->type);
4509 }
4510
4511 q = cnt / num;
4512 *quote = q ? q : 1;
4513 } else
4514 *quote = 0;
4515
4516 BT_DBG("conn %p quote %d", conn, *quote);
4517 return conn;
4518 }
4519
hci_link_tx_to(struct hci_dev * hdev,__u8 type)4520 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4521 {
4522 struct hci_conn_hash *h = &hdev->conn_hash;
4523 struct hci_conn *c;
4524
4525 bt_dev_err(hdev, "link tx timeout");
4526
4527 rcu_read_lock();
4528
4529 /* Kill stalled connections */
4530 list_for_each_entry_rcu(c, &h->list, list) {
4531 if (c->type == type && c->sent) {
4532 bt_dev_err(hdev, "killing stalled connection %pMR",
4533 &c->dst);
4534 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4535 }
4536 }
4537
4538 rcu_read_unlock();
4539 }
4540
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)4541 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4542 int *quote)
4543 {
4544 struct hci_conn_hash *h = &hdev->conn_hash;
4545 struct hci_chan *chan = NULL;
4546 unsigned int num = 0, min = ~0, cur_prio = 0;
4547 struct hci_conn *conn;
4548 int cnt, q, conn_num = 0;
4549
4550 BT_DBG("%s", hdev->name);
4551
4552 rcu_read_lock();
4553
4554 list_for_each_entry_rcu(conn, &h->list, list) {
4555 struct hci_chan *tmp;
4556
4557 if (conn->type != type)
4558 continue;
4559
4560 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4561 continue;
4562
4563 conn_num++;
4564
4565 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4566 struct sk_buff *skb;
4567
4568 if (skb_queue_empty(&tmp->data_q))
4569 continue;
4570
4571 skb = skb_peek(&tmp->data_q);
4572 if (skb->priority < cur_prio)
4573 continue;
4574
4575 if (skb->priority > cur_prio) {
4576 num = 0;
4577 min = ~0;
4578 cur_prio = skb->priority;
4579 }
4580
4581 num++;
4582
4583 if (conn->sent < min) {
4584 min = conn->sent;
4585 chan = tmp;
4586 }
4587 }
4588
4589 if (hci_conn_num(hdev, type) == conn_num)
4590 break;
4591 }
4592
4593 rcu_read_unlock();
4594
4595 if (!chan)
4596 return NULL;
4597
4598 switch (chan->conn->type) {
4599 case ACL_LINK:
4600 cnt = hdev->acl_cnt;
4601 break;
4602 case AMP_LINK:
4603 cnt = hdev->block_cnt;
4604 break;
4605 case SCO_LINK:
4606 case ESCO_LINK:
4607 cnt = hdev->sco_cnt;
4608 break;
4609 case LE_LINK:
4610 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4611 break;
4612 default:
4613 cnt = 0;
4614 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4615 }
4616
4617 q = cnt / num;
4618 *quote = q ? q : 1;
4619 BT_DBG("chan %p quote %d", chan, *quote);
4620 return chan;
4621 }
4622
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)4623 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4624 {
4625 struct hci_conn_hash *h = &hdev->conn_hash;
4626 struct hci_conn *conn;
4627 int num = 0;
4628
4629 BT_DBG("%s", hdev->name);
4630
4631 rcu_read_lock();
4632
4633 list_for_each_entry_rcu(conn, &h->list, list) {
4634 struct hci_chan *chan;
4635
4636 if (conn->type != type)
4637 continue;
4638
4639 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4640 continue;
4641
4642 num++;
4643
4644 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4645 struct sk_buff *skb;
4646
4647 if (chan->sent) {
4648 chan->sent = 0;
4649 continue;
4650 }
4651
4652 if (skb_queue_empty(&chan->data_q))
4653 continue;
4654
4655 skb = skb_peek(&chan->data_q);
4656 if (skb->priority >= HCI_PRIO_MAX - 1)
4657 continue;
4658
4659 skb->priority = HCI_PRIO_MAX - 1;
4660
4661 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4662 skb->priority);
4663 }
4664
4665 if (hci_conn_num(hdev, type) == num)
4666 break;
4667 }
4668
4669 rcu_read_unlock();
4670
4671 }
4672
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4673 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4674 {
4675 /* Calculate count of blocks used by this packet */
4676 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4677 }
4678
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)4679 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4680 {
4681 unsigned long last_tx;
4682
4683 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4684 return;
4685
4686 switch (type) {
4687 case LE_LINK:
4688 last_tx = hdev->le_last_tx;
4689 break;
4690 default:
4691 last_tx = hdev->acl_last_tx;
4692 break;
4693 }
4694
4695 /* tx timeout must be longer than maximum link supervision timeout
4696 * (40.9 seconds)
4697 */
4698 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4699 hci_link_tx_to(hdev, type);
4700 }
4701
4702 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4703 static void hci_sched_sco(struct hci_dev *hdev)
4704 {
4705 struct hci_conn *conn;
4706 struct sk_buff *skb;
4707 int quote;
4708
4709 BT_DBG("%s", hdev->name);
4710
4711 if (!hci_conn_num(hdev, SCO_LINK))
4712 return;
4713
4714 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4715 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4716 BT_DBG("skb %p len %d", skb, skb->len);
4717 hci_send_frame(hdev, skb);
4718
4719 conn->sent++;
4720 if (conn->sent == ~0)
4721 conn->sent = 0;
4722 }
4723 }
4724 }
4725
hci_sched_esco(struct hci_dev * hdev)4726 static void hci_sched_esco(struct hci_dev *hdev)
4727 {
4728 struct hci_conn *conn;
4729 struct sk_buff *skb;
4730 int quote;
4731
4732 BT_DBG("%s", hdev->name);
4733
4734 if (!hci_conn_num(hdev, ESCO_LINK))
4735 return;
4736
4737 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4738 "e))) {
4739 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4740 BT_DBG("skb %p len %d", skb, skb->len);
4741 hci_send_frame(hdev, skb);
4742
4743 conn->sent++;
4744 if (conn->sent == ~0)
4745 conn->sent = 0;
4746 }
4747 }
4748 }
4749
hci_sched_acl_pkt(struct hci_dev * hdev)4750 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4751 {
4752 unsigned int cnt = hdev->acl_cnt;
4753 struct hci_chan *chan;
4754 struct sk_buff *skb;
4755 int quote;
4756
4757 __check_timeout(hdev, cnt, ACL_LINK);
4758
4759 while (hdev->acl_cnt &&
4760 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4761 u32 priority = (skb_peek(&chan->data_q))->priority;
4762 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4763 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4764 skb->len, skb->priority);
4765
4766 /* Stop if priority has changed */
4767 if (skb->priority < priority)
4768 break;
4769
4770 skb = skb_dequeue(&chan->data_q);
4771
4772 hci_conn_enter_active_mode(chan->conn,
4773 bt_cb(skb)->force_active);
4774
4775 hci_send_frame(hdev, skb);
4776 hdev->acl_last_tx = jiffies;
4777
4778 hdev->acl_cnt--;
4779 chan->sent++;
4780 chan->conn->sent++;
4781
4782 /* Send pending SCO packets right away */
4783 hci_sched_sco(hdev);
4784 hci_sched_esco(hdev);
4785 }
4786 }
4787
4788 if (cnt != hdev->acl_cnt)
4789 hci_prio_recalculate(hdev, ACL_LINK);
4790 }
4791
hci_sched_acl_blk(struct hci_dev * hdev)4792 static void hci_sched_acl_blk(struct hci_dev *hdev)
4793 {
4794 unsigned int cnt = hdev->block_cnt;
4795 struct hci_chan *chan;
4796 struct sk_buff *skb;
4797 int quote;
4798 u8 type;
4799
4800 BT_DBG("%s", hdev->name);
4801
4802 if (hdev->dev_type == HCI_AMP)
4803 type = AMP_LINK;
4804 else
4805 type = ACL_LINK;
4806
4807 __check_timeout(hdev, cnt, type);
4808
4809 while (hdev->block_cnt > 0 &&
4810 (chan = hci_chan_sent(hdev, type, "e))) {
4811 u32 priority = (skb_peek(&chan->data_q))->priority;
4812 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4813 int blocks;
4814
4815 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4816 skb->len, skb->priority);
4817
4818 /* Stop if priority has changed */
4819 if (skb->priority < priority)
4820 break;
4821
4822 skb = skb_dequeue(&chan->data_q);
4823
4824 blocks = __get_blocks(hdev, skb);
4825 if (blocks > hdev->block_cnt)
4826 return;
4827
4828 hci_conn_enter_active_mode(chan->conn,
4829 bt_cb(skb)->force_active);
4830
4831 hci_send_frame(hdev, skb);
4832 hdev->acl_last_tx = jiffies;
4833
4834 hdev->block_cnt -= blocks;
4835 quote -= blocks;
4836
4837 chan->sent += blocks;
4838 chan->conn->sent += blocks;
4839 }
4840 }
4841
4842 if (cnt != hdev->block_cnt)
4843 hci_prio_recalculate(hdev, type);
4844 }
4845
hci_sched_acl(struct hci_dev * hdev)4846 static void hci_sched_acl(struct hci_dev *hdev)
4847 {
4848 BT_DBG("%s", hdev->name);
4849
4850 /* No ACL link over BR/EDR controller */
4851 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4852 return;
4853
4854 /* No AMP link over AMP controller */
4855 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4856 return;
4857
4858 switch (hdev->flow_ctl_mode) {
4859 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4860 hci_sched_acl_pkt(hdev);
4861 break;
4862
4863 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4864 hci_sched_acl_blk(hdev);
4865 break;
4866 }
4867 }
4868
hci_sched_le(struct hci_dev * hdev)4869 static void hci_sched_le(struct hci_dev *hdev)
4870 {
4871 struct hci_chan *chan;
4872 struct sk_buff *skb;
4873 int quote, cnt, tmp;
4874
4875 BT_DBG("%s", hdev->name);
4876
4877 if (!hci_conn_num(hdev, LE_LINK))
4878 return;
4879
4880 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4881
4882 __check_timeout(hdev, cnt, LE_LINK);
4883
4884 tmp = cnt;
4885 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4886 u32 priority = (skb_peek(&chan->data_q))->priority;
4887 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4888 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4889 skb->len, skb->priority);
4890
4891 /* Stop if priority has changed */
4892 if (skb->priority < priority)
4893 break;
4894
4895 skb = skb_dequeue(&chan->data_q);
4896
4897 hci_send_frame(hdev, skb);
4898 hdev->le_last_tx = jiffies;
4899
4900 cnt--;
4901 chan->sent++;
4902 chan->conn->sent++;
4903
4904 /* Send pending SCO packets right away */
4905 hci_sched_sco(hdev);
4906 hci_sched_esco(hdev);
4907 }
4908 }
4909
4910 if (hdev->le_pkts)
4911 hdev->le_cnt = cnt;
4912 else
4913 hdev->acl_cnt = cnt;
4914
4915 if (cnt != tmp)
4916 hci_prio_recalculate(hdev, LE_LINK);
4917 }
4918
hci_tx_work(struct work_struct * work)4919 static void hci_tx_work(struct work_struct *work)
4920 {
4921 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4922 struct sk_buff *skb;
4923
4924 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4925 hdev->sco_cnt, hdev->le_cnt);
4926
4927 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4928 /* Schedule queues and send stuff to HCI driver */
4929 hci_sched_sco(hdev);
4930 hci_sched_esco(hdev);
4931 hci_sched_acl(hdev);
4932 hci_sched_le(hdev);
4933 }
4934
4935 /* Send next queued raw (unknown type) packet */
4936 while ((skb = skb_dequeue(&hdev->raw_q)))
4937 hci_send_frame(hdev, skb);
4938 }
4939
4940 /* ----- HCI RX task (incoming data processing) ----- */
4941
4942 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4943 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4944 {
4945 struct hci_acl_hdr *hdr = (void *) skb->data;
4946 struct hci_conn *conn;
4947 __u16 handle, flags;
4948
4949 skb_pull(skb, HCI_ACL_HDR_SIZE);
4950
4951 handle = __le16_to_cpu(hdr->handle);
4952 flags = hci_flags(handle);
4953 handle = hci_handle(handle);
4954
4955 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4956 handle, flags);
4957
4958 hdev->stat.acl_rx++;
4959
4960 hci_dev_lock(hdev);
4961 conn = hci_conn_hash_lookup_handle(hdev, handle);
4962 hci_dev_unlock(hdev);
4963
4964 if (conn) {
4965 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4966
4967 /* Send to upper protocol */
4968 l2cap_recv_acldata(conn, skb, flags);
4969 return;
4970 } else {
4971 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4972 handle);
4973 }
4974
4975 kfree_skb(skb);
4976 }
4977
4978 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4979 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4980 {
4981 struct hci_sco_hdr *hdr = (void *) skb->data;
4982 struct hci_conn *conn;
4983 __u16 handle, flags;
4984
4985 skb_pull(skb, HCI_SCO_HDR_SIZE);
4986
4987 handle = __le16_to_cpu(hdr->handle);
4988 flags = hci_flags(handle);
4989 handle = hci_handle(handle);
4990
4991 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4992 handle, flags);
4993
4994 hdev->stat.sco_rx++;
4995
4996 hci_dev_lock(hdev);
4997 conn = hci_conn_hash_lookup_handle(hdev, handle);
4998 hci_dev_unlock(hdev);
4999
5000 if (conn) {
5001 /* Send to upper protocol */
5002 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5003 sco_recv_scodata(conn, skb);
5004 return;
5005 } else {
5006 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5007 handle);
5008 }
5009
5010 kfree_skb(skb);
5011 }
5012
hci_req_is_complete(struct hci_dev * hdev)5013 static bool hci_req_is_complete(struct hci_dev *hdev)
5014 {
5015 struct sk_buff *skb;
5016
5017 skb = skb_peek(&hdev->cmd_q);
5018 if (!skb)
5019 return true;
5020
5021 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5022 }
5023
hci_resend_last(struct hci_dev * hdev)5024 static void hci_resend_last(struct hci_dev *hdev)
5025 {
5026 struct hci_command_hdr *sent;
5027 struct sk_buff *skb;
5028 u16 opcode;
5029
5030 if (!hdev->sent_cmd)
5031 return;
5032
5033 sent = (void *) hdev->sent_cmd->data;
5034 opcode = __le16_to_cpu(sent->opcode);
5035 if (opcode == HCI_OP_RESET)
5036 return;
5037
5038 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5039 if (!skb)
5040 return;
5041
5042 skb_queue_head(&hdev->cmd_q, skb);
5043 queue_work(hdev->workqueue, &hdev->cmd_work);
5044 }
5045
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)5046 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5047 hci_req_complete_t *req_complete,
5048 hci_req_complete_skb_t *req_complete_skb)
5049 {
5050 struct sk_buff *skb;
5051 unsigned long flags;
5052
5053 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5054
5055 /* If the completed command doesn't match the last one that was
5056 * sent we need to do special handling of it.
5057 */
5058 if (!hci_sent_cmd_data(hdev, opcode)) {
5059 /* Some CSR based controllers generate a spontaneous
5060 * reset complete event during init and any pending
5061 * command will never be completed. In such a case we
5062 * need to resend whatever was the last sent
5063 * command.
5064 */
5065 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5066 hci_resend_last(hdev);
5067
5068 return;
5069 }
5070
5071 /* If we reach this point this event matches the last command sent */
5072 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5073
5074 /* If the command succeeded and there's still more commands in
5075 * this request the request is not yet complete.
5076 */
5077 if (!status && !hci_req_is_complete(hdev))
5078 return;
5079
5080 /* If this was the last command in a request the complete
5081 * callback would be found in hdev->sent_cmd instead of the
5082 * command queue (hdev->cmd_q).
5083 */
5084 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5085 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5086 return;
5087 }
5088
5089 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5090 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5091 return;
5092 }
5093
5094 /* Remove all pending commands belonging to this request */
5095 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5096 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5097 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5098 __skb_queue_head(&hdev->cmd_q, skb);
5099 break;
5100 }
5101
5102 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5103 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5104 else
5105 *req_complete = bt_cb(skb)->hci.req_complete;
5106 dev_kfree_skb_irq(skb);
5107 }
5108 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5109 }
5110
hci_rx_work(struct work_struct * work)5111 static void hci_rx_work(struct work_struct *work)
5112 {
5113 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5114 struct sk_buff *skb;
5115
5116 BT_DBG("%s", hdev->name);
5117
5118 while ((skb = skb_dequeue(&hdev->rx_q))) {
5119 /* Send copy to monitor */
5120 hci_send_to_monitor(hdev, skb);
5121
5122 if (atomic_read(&hdev->promisc)) {
5123 /* Send copy to the sockets */
5124 hci_send_to_sock(hdev, skb);
5125 }
5126
5127 /* If the device has been opened in HCI_USER_CHANNEL,
5128 * the userspace has exclusive access to device.
5129 * When device is HCI_INIT, we still need to process
5130 * the data packets to the driver in order
5131 * to complete its setup().
5132 */
5133 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5134 !test_bit(HCI_INIT, &hdev->flags)) {
5135 kfree_skb(skb);
5136 continue;
5137 }
5138
5139 if (test_bit(HCI_INIT, &hdev->flags)) {
5140 /* Don't process data packets in this states. */
5141 switch (hci_skb_pkt_type(skb)) {
5142 case HCI_ACLDATA_PKT:
5143 case HCI_SCODATA_PKT:
5144 case HCI_ISODATA_PKT:
5145 kfree_skb(skb);
5146 continue;
5147 }
5148 }
5149
5150 /* Process frame */
5151 switch (hci_skb_pkt_type(skb)) {
5152 case HCI_EVENT_PKT:
5153 BT_DBG("%s Event packet", hdev->name);
5154 hci_event_packet(hdev, skb);
5155 break;
5156
5157 case HCI_ACLDATA_PKT:
5158 BT_DBG("%s ACL data packet", hdev->name);
5159 hci_acldata_packet(hdev, skb);
5160 break;
5161
5162 case HCI_SCODATA_PKT:
5163 BT_DBG("%s SCO data packet", hdev->name);
5164 hci_scodata_packet(hdev, skb);
5165 break;
5166
5167 default:
5168 kfree_skb(skb);
5169 break;
5170 }
5171 }
5172 }
5173
hci_cmd_work(struct work_struct * work)5174 static void hci_cmd_work(struct work_struct *work)
5175 {
5176 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5177 struct sk_buff *skb;
5178
5179 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5180 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5181
5182 /* Send queued commands */
5183 if (atomic_read(&hdev->cmd_cnt)) {
5184 skb = skb_dequeue(&hdev->cmd_q);
5185 if (!skb)
5186 return;
5187
5188 kfree_skb(hdev->sent_cmd);
5189
5190 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5191 if (hdev->sent_cmd) {
5192 if (hci_req_status_pend(hdev))
5193 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5194 atomic_dec(&hdev->cmd_cnt);
5195 hci_send_frame(hdev, skb);
5196 if (test_bit(HCI_RESET, &hdev->flags))
5197 cancel_delayed_work(&hdev->cmd_timer);
5198 else
5199 schedule_delayed_work(&hdev->cmd_timer,
5200 HCI_CMD_TIMEOUT);
5201 } else {
5202 skb_queue_head(&hdev->cmd_q, skb);
5203 queue_work(hdev->workqueue, &hdev->cmd_work);
5204 }
5205 }
5206 }
5207