• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <asm/unaligned.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/mgmt.h>
40 
41 #include "hci_request.h"
42 #include "hci_debugfs.h"
43 #include "smp.h"
44 #include "leds.h"
45 
46 static void hci_rx_work(struct work_struct *work);
47 static void hci_cmd_work(struct work_struct *work);
48 static void hci_tx_work(struct work_struct *work);
49 
50 /* HCI device list */
51 LIST_HEAD(hci_dev_list);
52 DEFINE_RWLOCK(hci_dev_list_lock);
53 
54 /* HCI callback list */
55 LIST_HEAD(hci_cb_list);
56 DEFINE_MUTEX(hci_cb_list_lock);
57 
58 /* HCI ID Numbering */
59 static DEFINE_IDA(hci_index_ida);
60 
61 /* ---- HCI debugfs entries ---- */
62 
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)63 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 			     size_t count, loff_t *ppos)
65 {
66 	struct hci_dev *hdev = file->private_data;
67 	char buf[3];
68 
69 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 	buf[1] = '\n';
71 	buf[2] = '\0';
72 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 }
74 
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)75 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 			      size_t count, loff_t *ppos)
77 {
78 	struct hci_dev *hdev = file->private_data;
79 	struct sk_buff *skb;
80 	bool enable;
81 	int err;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	err = kstrtobool_from_user(user_buf, count, &enable);
87 	if (err)
88 		return err;
89 
90 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 		return -EALREADY;
92 
93 	hci_req_sync_lock(hdev);
94 	if (enable)
95 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 				     HCI_CMD_TIMEOUT);
97 	else
98 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	hci_req_sync_unlock(hdev);
101 
102 	if (IS_ERR(skb))
103 		return PTR_ERR(skb);
104 
105 	kfree_skb(skb);
106 
107 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
108 
109 	return count;
110 }
111 
112 static const struct file_operations dut_mode_fops = {
113 	.open		= simple_open,
114 	.read		= dut_mode_read,
115 	.write		= dut_mode_write,
116 	.llseek		= default_llseek,
117 };
118 
vendor_diag_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)119 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 				size_t count, loff_t *ppos)
121 {
122 	struct hci_dev *hdev = file->private_data;
123 	char buf[3];
124 
125 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 	buf[1] = '\n';
127 	buf[2] = '\0';
128 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129 }
130 
vendor_diag_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)131 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 				 size_t count, loff_t *ppos)
133 {
134 	struct hci_dev *hdev = file->private_data;
135 	bool enable;
136 	int err;
137 
138 	err = kstrtobool_from_user(user_buf, count, &enable);
139 	if (err)
140 		return err;
141 
142 	/* When the diagnostic flags are not persistent and the transport
143 	 * is not active or in user channel operation, then there is no need
144 	 * for the vendor callback. Instead just store the desired value and
145 	 * the setting will be programmed when the controller gets powered on.
146 	 */
147 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 		goto done;
151 
152 	hci_req_sync_lock(hdev);
153 	err = hdev->set_diag(hdev, enable);
154 	hci_req_sync_unlock(hdev);
155 
156 	if (err < 0)
157 		return err;
158 
159 done:
160 	if (enable)
161 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 	else
163 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164 
165 	return count;
166 }
167 
168 static const struct file_operations vendor_diag_fops = {
169 	.open		= simple_open,
170 	.read		= vendor_diag_read,
171 	.write		= vendor_diag_write,
172 	.llseek		= default_llseek,
173 };
174 
hci_debugfs_create_basic(struct hci_dev * hdev)175 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 {
177 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 			    &dut_mode_fops);
179 
180 	if (hdev->set_diag)
181 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 				    &vendor_diag_fops);
183 }
184 
hci_reset_req(struct hci_request * req,unsigned long opt)185 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 {
187 	BT_DBG("%s %ld", req->hdev->name, opt);
188 
189 	/* Reset device */
190 	set_bit(HCI_RESET, &req->hdev->flags);
191 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 	return 0;
193 }
194 
bredr_init(struct hci_request * req)195 static void bredr_init(struct hci_request *req)
196 {
197 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 
199 	/* Read Local Supported Features */
200 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201 
202 	/* Read Local Version */
203 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204 
205 	/* Read BD Address */
206 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207 }
208 
amp_init1(struct hci_request * req)209 static void amp_init1(struct hci_request *req)
210 {
211 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212 
213 	/* Read Local Version */
214 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 
216 	/* Read Local Supported Commands */
217 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218 
219 	/* Read Local AMP Info */
220 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221 
222 	/* Read Data Blk size */
223 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224 
225 	/* Read Flow Control Mode */
226 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227 
228 	/* Read Location Data */
229 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230 }
231 
amp_init2(struct hci_request * req)232 static int amp_init2(struct hci_request *req)
233 {
234 	/* Read Local Supported Features. Not all AMP controllers
235 	 * support this so it's placed conditionally in the second
236 	 * stage init.
237 	 */
238 	if (req->hdev->commands[14] & 0x20)
239 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240 
241 	return 0;
242 }
243 
hci_init1_req(struct hci_request * req,unsigned long opt)244 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 {
246 	struct hci_dev *hdev = req->hdev;
247 
248 	BT_DBG("%s %ld", hdev->name, opt);
249 
250 	/* Reset */
251 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 		hci_reset_req(req, 0);
253 
254 	switch (hdev->dev_type) {
255 	case HCI_PRIMARY:
256 		bredr_init(req);
257 		break;
258 	case HCI_AMP:
259 		amp_init1(req);
260 		break;
261 	default:
262 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 		break;
264 	}
265 
266 	return 0;
267 }
268 
bredr_setup(struct hci_request * req)269 static void bredr_setup(struct hci_request *req)
270 {
271 	__le16 param;
272 	__u8 flt_type;
273 
274 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
275 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276 
277 	/* Read Class of Device */
278 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279 
280 	/* Read Local Name */
281 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282 
283 	/* Read Voice Setting */
284 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285 
286 	/* Read Number of Supported IAC */
287 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288 
289 	/* Read Current IAC LAP */
290 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291 
292 	/* Clear Event Filters */
293 	flt_type = HCI_FLT_CLEAR_ALL;
294 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295 
296 	/* Connection accept timeout ~20 secs */
297 	param = cpu_to_le16(0x7d00);
298 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
299 }
300 
le_setup(struct hci_request * req)301 static void le_setup(struct hci_request *req)
302 {
303 	struct hci_dev *hdev = req->hdev;
304 
305 	/* Read LE Buffer Size */
306 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307 
308 	/* Read LE Local Supported Features */
309 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310 
311 	/* Read LE Supported States */
312 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313 
314 	/* LE-only controllers have LE implicitly enabled */
315 	if (!lmp_bredr_capable(hdev))
316 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317 }
318 
hci_setup_event_mask(struct hci_request * req)319 static void hci_setup_event_mask(struct hci_request *req)
320 {
321 	struct hci_dev *hdev = req->hdev;
322 
323 	/* The second byte is 0xff instead of 0x9f (two reserved bits
324 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325 	 * command otherwise.
326 	 */
327 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 
329 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
330 	 * any event mask for pre 1.2 devices.
331 	 */
332 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 		return;
334 
335 	if (lmp_bredr_capable(hdev)) {
336 		events[4] |= 0x01; /* Flow Specification Complete */
337 	} else {
338 		/* Use a different default for LE-only devices */
339 		memset(events, 0, sizeof(events));
340 		events[1] |= 0x20; /* Command Complete */
341 		events[1] |= 0x40; /* Command Status */
342 		events[1] |= 0x80; /* Hardware Error */
343 
344 		/* If the controller supports the Disconnect command, enable
345 		 * the corresponding event. In addition enable packet flow
346 		 * control related events.
347 		 */
348 		if (hdev->commands[0] & 0x20) {
349 			events[0] |= 0x10; /* Disconnection Complete */
350 			events[2] |= 0x04; /* Number of Completed Packets */
351 			events[3] |= 0x02; /* Data Buffer Overflow */
352 		}
353 
354 		/* If the controller supports the Read Remote Version
355 		 * Information command, enable the corresponding event.
356 		 */
357 		if (hdev->commands[2] & 0x80)
358 			events[1] |= 0x08; /* Read Remote Version Information
359 					    * Complete
360 					    */
361 
362 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 			events[0] |= 0x80; /* Encryption Change */
364 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 		}
366 	}
367 
368 	if (lmp_inq_rssi_capable(hdev) ||
369 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 		events[4] |= 0x02; /* Inquiry Result with RSSI */
371 
372 	if (lmp_ext_feat_capable(hdev))
373 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
374 
375 	if (lmp_esco_capable(hdev)) {
376 		events[5] |= 0x08; /* Synchronous Connection Complete */
377 		events[5] |= 0x10; /* Synchronous Connection Changed */
378 	}
379 
380 	if (lmp_sniffsubr_capable(hdev))
381 		events[5] |= 0x20; /* Sniff Subrating */
382 
383 	if (lmp_pause_enc_capable(hdev))
384 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
385 
386 	if (lmp_ext_inq_capable(hdev))
387 		events[5] |= 0x40; /* Extended Inquiry Result */
388 
389 	if (lmp_no_flush_capable(hdev))
390 		events[7] |= 0x01; /* Enhanced Flush Complete */
391 
392 	if (lmp_lsto_capable(hdev))
393 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
394 
395 	if (lmp_ssp_capable(hdev)) {
396 		events[6] |= 0x01;	/* IO Capability Request */
397 		events[6] |= 0x02;	/* IO Capability Response */
398 		events[6] |= 0x04;	/* User Confirmation Request */
399 		events[6] |= 0x08;	/* User Passkey Request */
400 		events[6] |= 0x10;	/* Remote OOB Data Request */
401 		events[6] |= 0x20;	/* Simple Pairing Complete */
402 		events[7] |= 0x04;	/* User Passkey Notification */
403 		events[7] |= 0x08;	/* Keypress Notification */
404 		events[7] |= 0x10;	/* Remote Host Supported
405 					 * Features Notification
406 					 */
407 	}
408 
409 	if (lmp_le_capable(hdev))
410 		events[7] |= 0x20;	/* LE Meta-Event */
411 
412 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413 }
414 
hci_init2_req(struct hci_request * req,unsigned long opt)415 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 {
417 	struct hci_dev *hdev = req->hdev;
418 
419 	if (hdev->dev_type == HCI_AMP)
420 		return amp_init2(req);
421 
422 	if (lmp_bredr_capable(hdev))
423 		bredr_setup(req);
424 	else
425 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426 
427 	if (lmp_le_capable(hdev))
428 		le_setup(req);
429 
430 	/* All Bluetooth 1.2 and later controllers should support the
431 	 * HCI command for reading the local supported commands.
432 	 *
433 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
434 	 * but do not have support for this command. If that is the case,
435 	 * the driver can quirk the behavior and skip reading the local
436 	 * supported commands.
437 	 */
438 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441 
442 	if (lmp_ssp_capable(hdev)) {
443 		/* When SSP is available, then the host features page
444 		 * should also be available as well. However some
445 		 * controllers list the max_page as 0 as long as SSP
446 		 * has not been enabled. To achieve proper debugging
447 		 * output, force the minimum max_page to 1 at least.
448 		 */
449 		hdev->max_page = 0x01;
450 
451 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 			u8 mode = 0x01;
453 
454 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 				    sizeof(mode), &mode);
456 		} else {
457 			struct hci_cp_write_eir cp;
458 
459 			memset(hdev->eir, 0, sizeof(hdev->eir));
460 			memset(&cp, 0, sizeof(cp));
461 
462 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 		}
464 	}
465 
466 	if (lmp_inq_rssi_capable(hdev) ||
467 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 		u8 mode;
469 
470 		/* If Extended Inquiry Result events are supported, then
471 		 * they are clearly preferred over Inquiry Result with RSSI
472 		 * events.
473 		 */
474 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475 
476 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 	}
478 
479 	if (lmp_inq_tx_pwr_capable(hdev))
480 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481 
482 	if (lmp_ext_feat_capable(hdev)) {
483 		struct hci_cp_read_local_ext_features cp;
484 
485 		cp.page = 0x01;
486 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 			    sizeof(cp), &cp);
488 	}
489 
490 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 		u8 enable = 1;
492 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 			    &enable);
494 	}
495 
496 	return 0;
497 }
498 
hci_setup_link_policy(struct hci_request * req)499 static void hci_setup_link_policy(struct hci_request *req)
500 {
501 	struct hci_dev *hdev = req->hdev;
502 	struct hci_cp_write_def_link_policy cp;
503 	u16 link_policy = 0;
504 
505 	if (lmp_rswitch_capable(hdev))
506 		link_policy |= HCI_LP_RSWITCH;
507 	if (lmp_hold_capable(hdev))
508 		link_policy |= HCI_LP_HOLD;
509 	if (lmp_sniff_capable(hdev))
510 		link_policy |= HCI_LP_SNIFF;
511 	if (lmp_park_capable(hdev))
512 		link_policy |= HCI_LP_PARK;
513 
514 	cp.policy = cpu_to_le16(link_policy);
515 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516 }
517 
hci_set_le_support(struct hci_request * req)518 static void hci_set_le_support(struct hci_request *req)
519 {
520 	struct hci_dev *hdev = req->hdev;
521 	struct hci_cp_write_le_host_supported cp;
522 
523 	/* LE-only devices do not support explicit enablement */
524 	if (!lmp_bredr_capable(hdev))
525 		return;
526 
527 	memset(&cp, 0, sizeof(cp));
528 
529 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 		cp.le = 0x01;
531 		cp.simul = 0x00;
532 	}
533 
534 	if (cp.le != lmp_host_le_capable(hdev))
535 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 			    &cp);
537 }
538 
hci_set_event_mask_page_2(struct hci_request * req)539 static void hci_set_event_mask_page_2(struct hci_request *req)
540 {
541 	struct hci_dev *hdev = req->hdev;
542 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 	bool changed = false;
544 
545 	/* If Connectionless Slave Broadcast master role is supported
546 	 * enable all necessary events for it.
547 	 */
548 	if (lmp_csb_master_capable(hdev)) {
549 		events[1] |= 0x40;	/* Triggered Clock Capture */
550 		events[1] |= 0x80;	/* Synchronization Train Complete */
551 		events[2] |= 0x10;	/* Slave Page Response Timeout */
552 		events[2] |= 0x20;	/* CSB Channel Map Change */
553 		changed = true;
554 	}
555 
556 	/* If Connectionless Slave Broadcast slave role is supported
557 	 * enable all necessary events for it.
558 	 */
559 	if (lmp_csb_slave_capable(hdev)) {
560 		events[2] |= 0x01;	/* Synchronization Train Received */
561 		events[2] |= 0x02;	/* CSB Receive */
562 		events[2] |= 0x04;	/* CSB Timeout */
563 		events[2] |= 0x08;	/* Truncated Page Complete */
564 		changed = true;
565 	}
566 
567 	/* Enable Authenticated Payload Timeout Expired event if supported */
568 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 		events[2] |= 0x80;
570 		changed = true;
571 	}
572 
573 	/* Some Broadcom based controllers indicate support for Set Event
574 	 * Mask Page 2 command, but then actually do not support it. Since
575 	 * the default value is all bits set to zero, the command is only
576 	 * required if the event mask has to be changed. In case no change
577 	 * to the event mask is needed, skip this command.
578 	 */
579 	if (changed)
580 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 			    sizeof(events), events);
582 }
583 
hci_init3_req(struct hci_request * req,unsigned long opt)584 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 {
586 	struct hci_dev *hdev = req->hdev;
587 	u8 p;
588 
589 	hci_setup_event_mask(req);
590 
591 	if (hdev->commands[6] & 0x20 &&
592 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 		struct hci_cp_read_stored_link_key cp;
594 
595 		bacpy(&cp.bdaddr, BDADDR_ANY);
596 		cp.read_all = 0x01;
597 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 	}
599 
600 	if (hdev->commands[5] & 0x10)
601 		hci_setup_link_policy(req);
602 
603 	if (hdev->commands[8] & 0x01)
604 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605 
606 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
607 	 * support the Read Page Scan Type command. Check support for
608 	 * this command in the bit mask of supported commands.
609 	 */
610 	if (hdev->commands[13] & 0x01)
611 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612 
613 	if (lmp_le_capable(hdev)) {
614 		u8 events[8];
615 
616 		memset(events, 0, sizeof(events));
617 
618 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 			events[0] |= 0x10;	/* LE Long Term Key Request */
620 
621 		/* If controller supports the Connection Parameters Request
622 		 * Link Layer Procedure, enable the corresponding event.
623 		 */
624 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 			events[0] |= 0x20;	/* LE Remote Connection
626 						 * Parameter Request
627 						 */
628 
629 		/* If the controller supports the Data Length Extension
630 		 * feature, enable the corresponding event.
631 		 */
632 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 			events[0] |= 0x40;	/* LE Data Length Change */
634 
635 		/* If the controller supports Extended Scanner Filter
636 		 * Policies, enable the correspondig event.
637 		 */
638 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 			events[1] |= 0x04;	/* LE Direct Advertising
640 						 * Report
641 						 */
642 
643 		/* If the controller supports Channel Selection Algorithm #2
644 		 * feature, enable the corresponding event.
645 		 */
646 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 			events[2] |= 0x08;	/* LE Channel Selection
648 						 * Algorithm
649 						 */
650 
651 		/* If the controller supports the LE Set Scan Enable command,
652 		 * enable the corresponding advertising report event.
653 		 */
654 		if (hdev->commands[26] & 0x08)
655 			events[0] |= 0x02;	/* LE Advertising Report */
656 
657 		/* If the controller supports the LE Create Connection
658 		 * command, enable the corresponding event.
659 		 */
660 		if (hdev->commands[26] & 0x10)
661 			events[0] |= 0x01;	/* LE Connection Complete */
662 
663 		/* If the controller supports the LE Connection Update
664 		 * command, enable the corresponding event.
665 		 */
666 		if (hdev->commands[27] & 0x04)
667 			events[0] |= 0x04;	/* LE Connection Update
668 						 * Complete
669 						 */
670 
671 		/* If the controller supports the LE Read Remote Used Features
672 		 * command, enable the corresponding event.
673 		 */
674 		if (hdev->commands[27] & 0x20)
675 			events[0] |= 0x08;	/* LE Read Remote Used
676 						 * Features Complete
677 						 */
678 
679 		/* If the controller supports the LE Read Local P-256
680 		 * Public Key command, enable the corresponding event.
681 		 */
682 		if (hdev->commands[34] & 0x02)
683 			events[0] |= 0x80;	/* LE Read Local P-256
684 						 * Public Key Complete
685 						 */
686 
687 		/* If the controller supports the LE Generate DHKey
688 		 * command, enable the corresponding event.
689 		 */
690 		if (hdev->commands[34] & 0x04)
691 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
692 
693 		/* If the controller supports the LE Set Default PHY or
694 		 * LE Set PHY commands, enable the corresponding event.
695 		 */
696 		if (hdev->commands[35] & (0x20 | 0x40))
697 			events[1] |= 0x08;        /* LE PHY Update Complete */
698 
699 		/* If the controller supports LE Set Extended Scan Parameters
700 		 * and LE Set Extended Scan Enable commands, enable the
701 		 * corresponding event.
702 		 */
703 		if (use_ext_scan(hdev))
704 			events[1] |= 0x10;	/* LE Extended Advertising
705 						 * Report
706 						 */
707 
708 		/* If the controller supports the LE Extended Create Connection
709 		 * command, enable the corresponding event.
710 		 */
711 		if (use_ext_conn(hdev))
712 			events[1] |= 0x02;      /* LE Enhanced Connection
713 						 * Complete
714 						 */
715 
716 		/* If the controller supports the LE Extended Advertising
717 		 * command, enable the corresponding event.
718 		 */
719 		if (ext_adv_capable(hdev))
720 			events[2] |= 0x02;	/* LE Advertising Set
721 						 * Terminated
722 						 */
723 
724 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 			    events);
726 
727 		/* Read LE Advertising Channel TX Power */
728 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729 			/* HCI TS spec forbids mixing of legacy and extended
730 			 * advertising commands wherein READ_ADV_TX_POWER is
731 			 * also included. So do not call it if extended adv
732 			 * is supported otherwise controller will return
733 			 * COMMAND_DISALLOWED for extended commands.
734 			 */
735 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 		}
737 
738 		if (hdev->commands[26] & 0x40) {
739 			/* Read LE White List Size */
740 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 				    0, NULL);
742 		}
743 
744 		if (hdev->commands[26] & 0x80) {
745 			/* Clear LE White List */
746 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 		}
748 
749 		if (hdev->commands[34] & 0x40) {
750 			/* Read LE Resolving List Size */
751 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 				    0, NULL);
753 		}
754 
755 		if (hdev->commands[34] & 0x20) {
756 			/* Clear LE Resolving List */
757 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 		}
759 
760 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761 			/* Read LE Maximum Data Length */
762 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763 
764 			/* Read LE Suggested Default Data Length */
765 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 		}
767 
768 		if (ext_adv_capable(hdev)) {
769 			/* Read LE Number of Supported Advertising Sets */
770 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 				    0, NULL);
772 		}
773 
774 		hci_set_le_support(req);
775 	}
776 
777 	/* Read features beyond page 1 if available */
778 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 		struct hci_cp_read_local_ext_features cp;
780 
781 		cp.page = p;
782 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 			    sizeof(cp), &cp);
784 	}
785 
786 	return 0;
787 }
788 
hci_init4_req(struct hci_request * req,unsigned long opt)789 static int hci_init4_req(struct hci_request *req, unsigned long opt)
790 {
791 	struct hci_dev *hdev = req->hdev;
792 
793 	/* Some Broadcom based Bluetooth controllers do not support the
794 	 * Delete Stored Link Key command. They are clearly indicating its
795 	 * absence in the bit mask of supported commands.
796 	 *
797 	 * Check the supported commands and only if the the command is marked
798 	 * as supported send it. If not supported assume that the controller
799 	 * does not have actual support for stored link keys which makes this
800 	 * command redundant anyway.
801 	 *
802 	 * Some controllers indicate that they support handling deleting
803 	 * stored link keys, but they don't. The quirk lets a driver
804 	 * just disable this command.
805 	 */
806 	if (hdev->commands[6] & 0x80 &&
807 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 		struct hci_cp_delete_stored_link_key cp;
809 
810 		bacpy(&cp.bdaddr, BDADDR_ANY);
811 		cp.delete_all = 0x01;
812 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 			    sizeof(cp), &cp);
814 	}
815 
816 	/* Set event mask page 2 if the HCI command for it is supported */
817 	if (hdev->commands[22] & 0x04)
818 		hci_set_event_mask_page_2(req);
819 
820 	/* Read local codec list if the HCI command is supported */
821 	if (hdev->commands[29] & 0x20)
822 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823 
824 	/* Get MWS transport configuration if the HCI command is supported */
825 	if (hdev->commands[30] & 0x08)
826 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827 
828 	/* Check for Synchronization Train support */
829 	if (lmp_sync_train_capable(hdev))
830 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831 
832 	/* Enable Secure Connections if supported and configured */
833 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 	    bredr_sc_enabled(hdev)) {
835 		u8 support = 0x01;
836 
837 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 			    sizeof(support), &support);
839 	}
840 
841 	/* Set Suggested Default Data Length to maximum if supported */
842 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 		struct hci_cp_le_write_def_data_len cp;
844 
845 		cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
846 		cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
847 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 	}
849 
850 	/* Set Default PHY parameters if command is supported */
851 	if (hdev->commands[35] & 0x20) {
852 		struct hci_cp_le_set_default_phy cp;
853 
854 		cp.all_phys = 0x00;
855 		cp.tx_phys = hdev->le_tx_def_phys;
856 		cp.rx_phys = hdev->le_rx_def_phys;
857 
858 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 	}
860 
861 	return 0;
862 }
863 
__hci_init(struct hci_dev * hdev)864 static int __hci_init(struct hci_dev *hdev)
865 {
866 	int err;
867 
868 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 	if (err < 0)
870 		return err;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SETUP))
873 		hci_debugfs_create_basic(hdev);
874 
875 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 	if (err < 0)
877 		return err;
878 
879 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880 	 * BR/EDR/LE type controllers. AMP controllers only need the
881 	 * first two stages of init.
882 	 */
883 	if (hdev->dev_type != HCI_PRIMARY)
884 		return 0;
885 
886 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 	if (err < 0)
888 		return err;
889 
890 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 	if (err < 0)
892 		return err;
893 
894 	/* This function is only called when the controller is actually in
895 	 * configured state. When the controller is marked as unconfigured,
896 	 * this initialization procedure is not run.
897 	 *
898 	 * It means that it is possible that a controller runs through its
899 	 * setup phase and then discovers missing settings. If that is the
900 	 * case, then this function will not be called. It then will only
901 	 * be called during the config phase.
902 	 *
903 	 * So only when in setup phase or config phase, create the debugfs
904 	 * entries and register the SMP channels.
905 	 */
906 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
908 		return 0;
909 
910 	hci_debugfs_create_common(hdev);
911 
912 	if (lmp_bredr_capable(hdev))
913 		hci_debugfs_create_bredr(hdev);
914 
915 	if (lmp_le_capable(hdev))
916 		hci_debugfs_create_le(hdev);
917 
918 	return 0;
919 }
920 
hci_init0_req(struct hci_request * req,unsigned long opt)921 static int hci_init0_req(struct hci_request *req, unsigned long opt)
922 {
923 	struct hci_dev *hdev = req->hdev;
924 
925 	BT_DBG("%s %ld", hdev->name, opt);
926 
927 	/* Reset */
928 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 		hci_reset_req(req, 0);
930 
931 	/* Read Local Version */
932 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933 
934 	/* Read BD Address */
935 	if (hdev->set_bdaddr)
936 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937 
938 	return 0;
939 }
940 
__hci_unconf_init(struct hci_dev * hdev)941 static int __hci_unconf_init(struct hci_dev *hdev)
942 {
943 	int err;
944 
945 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 		return 0;
947 
948 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 	if (err < 0)
950 		return err;
951 
952 	if (hci_dev_test_flag(hdev, HCI_SETUP))
953 		hci_debugfs_create_basic(hdev);
954 
955 	return 0;
956 }
957 
hci_scan_req(struct hci_request * req,unsigned long opt)958 static int hci_scan_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 scan = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, scan);
963 
964 	/* Inquiry and Page scans */
965 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 	return 0;
967 }
968 
hci_auth_req(struct hci_request * req,unsigned long opt)969 static int hci_auth_req(struct hci_request *req, unsigned long opt)
970 {
971 	__u8 auth = opt;
972 
973 	BT_DBG("%s %x", req->hdev->name, auth);
974 
975 	/* Authentication */
976 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 	return 0;
978 }
979 
hci_encrypt_req(struct hci_request * req,unsigned long opt)980 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981 {
982 	__u8 encrypt = opt;
983 
984 	BT_DBG("%s %x", req->hdev->name, encrypt);
985 
986 	/* Encryption */
987 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 	return 0;
989 }
990 
hci_linkpol_req(struct hci_request * req,unsigned long opt)991 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992 {
993 	__le16 policy = cpu_to_le16(opt);
994 
995 	BT_DBG("%s %x", req->hdev->name, policy);
996 
997 	/* Default link policy */
998 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 	return 0;
1000 }
1001 
1002 /* Get HCI device by index.
1003  * Device is held on return. */
hci_dev_get(int index)1004 struct hci_dev *hci_dev_get(int index)
1005 {
1006 	struct hci_dev *hdev = NULL, *d;
1007 
1008 	BT_DBG("%d", index);
1009 
1010 	if (index < 0)
1011 		return NULL;
1012 
1013 	read_lock(&hci_dev_list_lock);
1014 	list_for_each_entry(d, &hci_dev_list, list) {
1015 		if (d->id == index) {
1016 			hdev = hci_dev_hold(d);
1017 			break;
1018 		}
1019 	}
1020 	read_unlock(&hci_dev_list_lock);
1021 	return hdev;
1022 }
1023 
1024 /* ---- Inquiry support ---- */
1025 
hci_discovery_active(struct hci_dev * hdev)1026 bool hci_discovery_active(struct hci_dev *hdev)
1027 {
1028 	struct discovery_state *discov = &hdev->discovery;
1029 
1030 	switch (discov->state) {
1031 	case DISCOVERY_FINDING:
1032 	case DISCOVERY_RESOLVING:
1033 		return true;
1034 
1035 	default:
1036 		return false;
1037 	}
1038 }
1039 
hci_discovery_set_state(struct hci_dev * hdev,int state)1040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041 {
1042 	int old_state = hdev->discovery.state;
1043 
1044 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045 
1046 	if (old_state == state)
1047 		return;
1048 
1049 	hdev->discovery.state = state;
1050 
1051 	switch (state) {
1052 	case DISCOVERY_STOPPED:
1053 		hci_update_background_scan(hdev);
1054 
1055 		if (old_state != DISCOVERY_STARTING)
1056 			mgmt_discovering(hdev, 0);
1057 		break;
1058 	case DISCOVERY_STARTING:
1059 		break;
1060 	case DISCOVERY_FINDING:
1061 		mgmt_discovering(hdev, 1);
1062 		break;
1063 	case DISCOVERY_RESOLVING:
1064 		break;
1065 	case DISCOVERY_STOPPING:
1066 		break;
1067 	}
1068 }
1069 
hci_inquiry_cache_flush(struct hci_dev * hdev)1070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071 {
1072 	struct discovery_state *cache = &hdev->discovery;
1073 	struct inquiry_entry *p, *n;
1074 
1075 	list_for_each_entry_safe(p, n, &cache->all, all) {
1076 		list_del(&p->all);
1077 		kfree(p);
1078 	}
1079 
1080 	INIT_LIST_HEAD(&cache->unknown);
1081 	INIT_LIST_HEAD(&cache->resolve);
1082 }
1083 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 					       bdaddr_t *bdaddr)
1086 {
1087 	struct discovery_state *cache = &hdev->discovery;
1088 	struct inquiry_entry *e;
1089 
1090 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1091 
1092 	list_for_each_entry(e, &cache->all, all) {
1093 		if (!bacmp(&e->data.bdaddr, bdaddr))
1094 			return e;
1095 	}
1096 
1097 	return NULL;
1098 }
1099 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)1100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 						       bdaddr_t *bdaddr)
1102 {
1103 	struct discovery_state *cache = &hdev->discovery;
1104 	struct inquiry_entry *e;
1105 
1106 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1107 
1108 	list_for_each_entry(e, &cache->unknown, list) {
1109 		if (!bacmp(&e->data.bdaddr, bdaddr))
1110 			return e;
1111 	}
1112 
1113 	return NULL;
1114 }
1115 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)1116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 						       bdaddr_t *bdaddr,
1118 						       int state)
1119 {
1120 	struct discovery_state *cache = &hdev->discovery;
1121 	struct inquiry_entry *e;
1122 
1123 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124 
1125 	list_for_each_entry(e, &cache->resolve, list) {
1126 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 			return e;
1128 		if (!bacmp(&e->data.bdaddr, bdaddr))
1129 			return e;
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)1135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 				      struct inquiry_entry *ie)
1137 {
1138 	struct discovery_state *cache = &hdev->discovery;
1139 	struct list_head *pos = &cache->resolve;
1140 	struct inquiry_entry *p;
1141 
1142 	list_del(&ie->list);
1143 
1144 	list_for_each_entry(p, &cache->resolve, list) {
1145 		if (p->name_state != NAME_PENDING &&
1146 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1147 			break;
1148 		pos = &p->list;
1149 	}
1150 
1151 	list_add(&ie->list, pos);
1152 }
1153 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)1154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 			     bool name_known)
1156 {
1157 	struct discovery_state *cache = &hdev->discovery;
1158 	struct inquiry_entry *ie;
1159 	u32 flags = 0;
1160 
1161 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162 
1163 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164 
1165 	if (!data->ssp_mode)
1166 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167 
1168 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 	if (ie) {
1170 		if (!ie->data.ssp_mode)
1171 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172 
1173 		if (ie->name_state == NAME_NEEDED &&
1174 		    data->rssi != ie->data.rssi) {
1175 			ie->data.rssi = data->rssi;
1176 			hci_inquiry_cache_update_resolve(hdev, ie);
1177 		}
1178 
1179 		goto update;
1180 	}
1181 
1182 	/* Entry not in the cache. Add new one. */
1183 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 	if (!ie) {
1185 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 		goto done;
1187 	}
1188 
1189 	list_add(&ie->all, &cache->all);
1190 
1191 	if (name_known) {
1192 		ie->name_state = NAME_KNOWN;
1193 	} else {
1194 		ie->name_state = NAME_NOT_KNOWN;
1195 		list_add(&ie->list, &cache->unknown);
1196 	}
1197 
1198 update:
1199 	if (name_known && ie->name_state != NAME_KNOWN &&
1200 	    ie->name_state != NAME_PENDING) {
1201 		ie->name_state = NAME_KNOWN;
1202 		list_del(&ie->list);
1203 	}
1204 
1205 	memcpy(&ie->data, data, sizeof(*data));
1206 	ie->timestamp = jiffies;
1207 	cache->timestamp = jiffies;
1208 
1209 	if (ie->name_state == NAME_NOT_KNOWN)
1210 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 
1212 done:
1213 	return flags;
1214 }
1215 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)1216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217 {
1218 	struct discovery_state *cache = &hdev->discovery;
1219 	struct inquiry_info *info = (struct inquiry_info *) buf;
1220 	struct inquiry_entry *e;
1221 	int copied = 0;
1222 
1223 	list_for_each_entry(e, &cache->all, all) {
1224 		struct inquiry_data *data = &e->data;
1225 
1226 		if (copied >= num)
1227 			break;
1228 
1229 		bacpy(&info->bdaddr, &data->bdaddr);
1230 		info->pscan_rep_mode	= data->pscan_rep_mode;
1231 		info->pscan_period_mode	= data->pscan_period_mode;
1232 		info->pscan_mode	= data->pscan_mode;
1233 		memcpy(info->dev_class, data->dev_class, 3);
1234 		info->clock_offset	= data->clock_offset;
1235 
1236 		info++;
1237 		copied++;
1238 	}
1239 
1240 	BT_DBG("cache %p, copied %d", cache, copied);
1241 	return copied;
1242 }
1243 
hci_inq_req(struct hci_request * req,unsigned long opt)1244 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245 {
1246 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 	struct hci_dev *hdev = req->hdev;
1248 	struct hci_cp_inquiry cp;
1249 
1250 	BT_DBG("%s", hdev->name);
1251 
1252 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 		return 0;
1254 
1255 	/* Start Inquiry */
1256 	memcpy(&cp.lap, &ir->lap, 3);
1257 	cp.length  = ir->length;
1258 	cp.num_rsp = ir->num_rsp;
1259 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260 
1261 	return 0;
1262 }
1263 
hci_inquiry(void __user * arg)1264 int hci_inquiry(void __user *arg)
1265 {
1266 	__u8 __user *ptr = arg;
1267 	struct hci_inquiry_req ir;
1268 	struct hci_dev *hdev;
1269 	int err = 0, do_inquiry = 0, max_rsp;
1270 	long timeo;
1271 	__u8 *buf;
1272 
1273 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 		return -EFAULT;
1275 
1276 	hdev = hci_dev_get(ir.dev_id);
1277 	if (!hdev)
1278 		return -ENODEV;
1279 
1280 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 		err = -EBUSY;
1282 		goto done;
1283 	}
1284 
1285 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 		err = -EOPNOTSUPP;
1287 		goto done;
1288 	}
1289 
1290 	if (hdev->dev_type != HCI_PRIMARY) {
1291 		err = -EOPNOTSUPP;
1292 		goto done;
1293 	}
1294 
1295 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 		err = -EOPNOTSUPP;
1297 		goto done;
1298 	}
1299 
1300 	hci_dev_lock(hdev);
1301 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 		hci_inquiry_cache_flush(hdev);
1304 		do_inquiry = 1;
1305 	}
1306 	hci_dev_unlock(hdev);
1307 
1308 	timeo = ir.length * msecs_to_jiffies(2000);
1309 
1310 	if (do_inquiry) {
1311 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 				   timeo, NULL);
1313 		if (err < 0)
1314 			goto done;
1315 
1316 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1317 		 * cleared). If it is interrupted by a signal, return -EINTR.
1318 		 */
1319 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 				TASK_INTERRUPTIBLE))
1321 			return -EINTR;
1322 	}
1323 
1324 	/* for unlimited number of responses we will use buffer with
1325 	 * 255 entries
1326 	 */
1327 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328 
1329 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1330 	 * copy it to the user space.
1331 	 */
1332 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 	if (!buf) {
1334 		err = -ENOMEM;
1335 		goto done;
1336 	}
1337 
1338 	hci_dev_lock(hdev);
1339 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 	hci_dev_unlock(hdev);
1341 
1342 	BT_DBG("num_rsp %d", ir.num_rsp);
1343 
1344 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 		ptr += sizeof(ir);
1346 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 				 ir.num_rsp))
1348 			err = -EFAULT;
1349 	} else
1350 		err = -EFAULT;
1351 
1352 	kfree(buf);
1353 
1354 done:
1355 	hci_dev_put(hdev);
1356 	return err;
1357 }
1358 
1359 /**
1360  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1361  *				       (BD_ADDR) for a HCI device from
1362  *				       a firmware node property.
1363  * @hdev:	The HCI device
1364  *
1365  * Search the firmware node for 'local-bd-address'.
1366  *
1367  * All-zero BD addresses are rejected, because those could be properties
1368  * that exist in the firmware tables, but were not updated by the firmware. For
1369  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1370  */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)1371 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372 {
1373 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 	bdaddr_t ba;
1375 	int ret;
1376 
1377 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 					    (u8 *)&ba, sizeof(ba));
1379 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 		return;
1381 
1382 	bacpy(&hdev->public_addr, &ba);
1383 }
1384 
hci_dev_do_open(struct hci_dev * hdev)1385 static int hci_dev_do_open(struct hci_dev *hdev)
1386 {
1387 	int ret = 0;
1388 
1389 	BT_DBG("%s %p", hdev->name, hdev);
1390 
1391 	hci_req_sync_lock(hdev);
1392 
1393 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 		ret = -ENODEV;
1395 		goto done;
1396 	}
1397 
1398 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 		/* Check for rfkill but allow the HCI setup stage to
1401 		 * proceed (which in itself doesn't cause any RF activity).
1402 		 */
1403 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 			ret = -ERFKILL;
1405 			goto done;
1406 		}
1407 
1408 		/* Check for valid public address or a configured static
1409 		 * random adddress, but let the HCI setup proceed to
1410 		 * be able to determine if there is a public address
1411 		 * or not.
1412 		 *
1413 		 * In case of user channel usage, it is not important
1414 		 * if a public address or static random address is
1415 		 * available.
1416 		 *
1417 		 * This check is only valid for BR/EDR controllers
1418 		 * since AMP controllers do not have an address.
1419 		 */
1420 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 		    hdev->dev_type == HCI_PRIMARY &&
1422 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 			ret = -EADDRNOTAVAIL;
1425 			goto done;
1426 		}
1427 	}
1428 
1429 	if (test_bit(HCI_UP, &hdev->flags)) {
1430 		ret = -EALREADY;
1431 		goto done;
1432 	}
1433 
1434 	if (hdev->open(hdev)) {
1435 		ret = -EIO;
1436 		goto done;
1437 	}
1438 
1439 	set_bit(HCI_RUNNING, &hdev->flags);
1440 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441 
1442 	atomic_set(&hdev->cmd_cnt, 1);
1443 	set_bit(HCI_INIT, &hdev->flags);
1444 
1445 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 		bool invalid_bdaddr;
1448 
1449 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1450 
1451 		if (hdev->setup)
1452 			ret = hdev->setup(hdev);
1453 
1454 		/* The transport driver can set the quirk to mark the
1455 		 * BD_ADDR invalid before creating the HCI device or in
1456 		 * its setup callback.
1457 		 */
1458 		invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1459 					  &hdev->quirks);
1460 
1461 		if (ret)
1462 			goto setup_failed;
1463 
1464 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1465 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1466 				hci_dev_get_bd_addr_from_property(hdev);
1467 
1468 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1469 			    hdev->set_bdaddr) {
1470 				ret = hdev->set_bdaddr(hdev,
1471 						       &hdev->public_addr);
1472 
1473 				/* If setting of the BD_ADDR from the device
1474 				 * property succeeds, then treat the address
1475 				 * as valid even if the invalid BD_ADDR
1476 				 * quirk indicates otherwise.
1477 				 */
1478 				if (!ret)
1479 					invalid_bdaddr = false;
1480 			}
1481 		}
1482 
1483 setup_failed:
1484 		/* The transport driver can set these quirks before
1485 		 * creating the HCI device or in its setup callback.
1486 		 *
1487 		 * For the invalid BD_ADDR quirk it is possible that
1488 		 * it becomes a valid address if the bootloader does
1489 		 * provide it (see above).
1490 		 *
1491 		 * In case any of them is set, the controller has to
1492 		 * start up as unconfigured.
1493 		 */
1494 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1495 		    invalid_bdaddr)
1496 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1497 
1498 		/* For an unconfigured controller it is required to
1499 		 * read at least the version information provided by
1500 		 * the Read Local Version Information command.
1501 		 *
1502 		 * If the set_bdaddr driver callback is provided, then
1503 		 * also the original Bluetooth public device address
1504 		 * will be read using the Read BD Address command.
1505 		 */
1506 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1507 			ret = __hci_unconf_init(hdev);
1508 	}
1509 
1510 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1511 		/* If public address change is configured, ensure that
1512 		 * the address gets programmed. If the driver does not
1513 		 * support changing the public address, fail the power
1514 		 * on procedure.
1515 		 */
1516 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517 		    hdev->set_bdaddr)
1518 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1519 		else
1520 			ret = -EADDRNOTAVAIL;
1521 	}
1522 
1523 	if (!ret) {
1524 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1525 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1526 			ret = __hci_init(hdev);
1527 			if (!ret && hdev->post_init)
1528 				ret = hdev->post_init(hdev);
1529 		}
1530 	}
1531 
1532 	/* If the HCI Reset command is clearing all diagnostic settings,
1533 	 * then they need to be reprogrammed after the init procedure
1534 	 * completed.
1535 	 */
1536 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1537 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1538 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1539 		ret = hdev->set_diag(hdev, true);
1540 
1541 	clear_bit(HCI_INIT, &hdev->flags);
1542 
1543 	if (!ret) {
1544 		hci_dev_hold(hdev);
1545 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1546 		hci_adv_instances_set_rpa_expired(hdev, true);
1547 		set_bit(HCI_UP, &hdev->flags);
1548 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1549 		hci_leds_update_powered(hdev, true);
1550 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1551 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1552 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1553 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1554 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1555 		    hdev->dev_type == HCI_PRIMARY) {
1556 			ret = __hci_req_hci_power_on(hdev);
1557 			mgmt_power_on(hdev, ret);
1558 		}
1559 	} else {
1560 		/* Init failed, cleanup */
1561 		flush_work(&hdev->tx_work);
1562 		flush_work(&hdev->cmd_work);
1563 		flush_work(&hdev->rx_work);
1564 
1565 		skb_queue_purge(&hdev->cmd_q);
1566 		skb_queue_purge(&hdev->rx_q);
1567 
1568 		if (hdev->flush)
1569 			hdev->flush(hdev);
1570 
1571 		if (hdev->sent_cmd) {
1572 			kfree_skb(hdev->sent_cmd);
1573 			hdev->sent_cmd = NULL;
1574 		}
1575 
1576 		clear_bit(HCI_RUNNING, &hdev->flags);
1577 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1578 
1579 		hdev->close(hdev);
1580 		hdev->flags &= BIT(HCI_RAW);
1581 	}
1582 
1583 done:
1584 	hci_req_sync_unlock(hdev);
1585 	return ret;
1586 }
1587 
1588 /* ---- HCI ioctl helpers ---- */
1589 
hci_dev_open(__u16 dev)1590 int hci_dev_open(__u16 dev)
1591 {
1592 	struct hci_dev *hdev;
1593 	int err;
1594 
1595 	hdev = hci_dev_get(dev);
1596 	if (!hdev)
1597 		return -ENODEV;
1598 
1599 	/* Devices that are marked as unconfigured can only be powered
1600 	 * up as user channel. Trying to bring them up as normal devices
1601 	 * will result into a failure. Only user channel operation is
1602 	 * possible.
1603 	 *
1604 	 * When this function is called for a user channel, the flag
1605 	 * HCI_USER_CHANNEL will be set first before attempting to
1606 	 * open the device.
1607 	 */
1608 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1609 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1610 		err = -EOPNOTSUPP;
1611 		goto done;
1612 	}
1613 
1614 	/* We need to ensure that no other power on/off work is pending
1615 	 * before proceeding to call hci_dev_do_open. This is
1616 	 * particularly important if the setup procedure has not yet
1617 	 * completed.
1618 	 */
1619 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1620 		cancel_delayed_work(&hdev->power_off);
1621 
1622 	/* After this call it is guaranteed that the setup procedure
1623 	 * has finished. This means that error conditions like RFKILL
1624 	 * or no valid public or static random address apply.
1625 	 */
1626 	flush_workqueue(hdev->req_workqueue);
1627 
1628 	/* For controllers not using the management interface and that
1629 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1630 	 * so that pairing works for them. Once the management interface
1631 	 * is in use this bit will be cleared again and userspace has
1632 	 * to explicitly enable it.
1633 	 */
1634 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1635 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1636 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1637 
1638 	err = hci_dev_do_open(hdev);
1639 
1640 done:
1641 	hci_dev_put(hdev);
1642 	return err;
1643 }
1644 
1645 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)1646 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1647 {
1648 	struct hci_conn_params *p;
1649 
1650 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1651 		if (p->conn) {
1652 			hci_conn_drop(p->conn);
1653 			hci_conn_put(p->conn);
1654 			p->conn = NULL;
1655 		}
1656 		list_del_init(&p->action);
1657 	}
1658 
1659 	BT_DBG("All LE pending actions cleared");
1660 }
1661 
hci_dev_do_close(struct hci_dev * hdev)1662 int hci_dev_do_close(struct hci_dev *hdev)
1663 {
1664 	bool auto_off;
1665 
1666 	BT_DBG("%s %p", hdev->name, hdev);
1667 
1668 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1669 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1670 	    test_bit(HCI_UP, &hdev->flags)) {
1671 		/* Execute vendor specific shutdown routine */
1672 		if (hdev->shutdown)
1673 			hdev->shutdown(hdev);
1674 	}
1675 
1676 	cancel_delayed_work(&hdev->power_off);
1677 
1678 	hci_request_cancel_all(hdev);
1679 	hci_req_sync_lock(hdev);
1680 
1681 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1682 		cancel_delayed_work_sync(&hdev->cmd_timer);
1683 		hci_req_sync_unlock(hdev);
1684 		return 0;
1685 	}
1686 
1687 	hci_leds_update_powered(hdev, false);
1688 
1689 	/* Flush RX and TX works */
1690 	flush_work(&hdev->tx_work);
1691 	flush_work(&hdev->rx_work);
1692 
1693 	if (hdev->discov_timeout > 0) {
1694 		hdev->discov_timeout = 0;
1695 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1696 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 	}
1698 
1699 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1700 		cancel_delayed_work(&hdev->service_cache);
1701 
1702 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1703 		struct adv_info *adv_instance;
1704 
1705 		cancel_delayed_work_sync(&hdev->rpa_expired);
1706 
1707 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1708 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1709 	}
1710 
1711 	/* Avoid potential lockdep warnings from the *_flush() calls by
1712 	 * ensuring the workqueue is empty up front.
1713 	 */
1714 	drain_workqueue(hdev->workqueue);
1715 
1716 	hci_dev_lock(hdev);
1717 
1718 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1719 
1720 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1721 
1722 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1723 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1724 	    hci_dev_test_flag(hdev, HCI_MGMT))
1725 		__mgmt_power_off(hdev);
1726 
1727 	hci_inquiry_cache_flush(hdev);
1728 	hci_pend_le_actions_clear(hdev);
1729 	hci_conn_hash_flush(hdev);
1730 	hci_dev_unlock(hdev);
1731 
1732 	smp_unregister(hdev);
1733 
1734 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1735 
1736 	if (hdev->flush)
1737 		hdev->flush(hdev);
1738 
1739 	/* Reset device */
1740 	skb_queue_purge(&hdev->cmd_q);
1741 	atomic_set(&hdev->cmd_cnt, 1);
1742 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1743 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1744 		set_bit(HCI_INIT, &hdev->flags);
1745 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1746 		clear_bit(HCI_INIT, &hdev->flags);
1747 	}
1748 
1749 	/* flush cmd  work */
1750 	flush_work(&hdev->cmd_work);
1751 
1752 	/* Drop queues */
1753 	skb_queue_purge(&hdev->rx_q);
1754 	skb_queue_purge(&hdev->cmd_q);
1755 	skb_queue_purge(&hdev->raw_q);
1756 
1757 	/* Drop last sent command */
1758 	if (hdev->sent_cmd) {
1759 		cancel_delayed_work_sync(&hdev->cmd_timer);
1760 		kfree_skb(hdev->sent_cmd);
1761 		hdev->sent_cmd = NULL;
1762 	}
1763 
1764 	clear_bit(HCI_RUNNING, &hdev->flags);
1765 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1766 
1767 	/* After this point our queues are empty
1768 	 * and no tasks are scheduled. */
1769 	hdev->close(hdev);
1770 
1771 	/* Clear flags */
1772 	hdev->flags &= BIT(HCI_RAW);
1773 	hci_dev_clear_volatile_flags(hdev);
1774 
1775 	/* Controller radio is available but is currently powered down */
1776 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1777 
1778 	memset(hdev->eir, 0, sizeof(hdev->eir));
1779 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1780 	bacpy(&hdev->random_addr, BDADDR_ANY);
1781 
1782 	hci_req_sync_unlock(hdev);
1783 
1784 	hci_dev_put(hdev);
1785 	return 0;
1786 }
1787 
hci_dev_close(__u16 dev)1788 int hci_dev_close(__u16 dev)
1789 {
1790 	struct hci_dev *hdev;
1791 	int err;
1792 
1793 	hdev = hci_dev_get(dev);
1794 	if (!hdev)
1795 		return -ENODEV;
1796 
1797 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1798 		err = -EBUSY;
1799 		goto done;
1800 	}
1801 
1802 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1803 		cancel_delayed_work(&hdev->power_off);
1804 
1805 	err = hci_dev_do_close(hdev);
1806 
1807 done:
1808 	hci_dev_put(hdev);
1809 	return err;
1810 }
1811 
hci_dev_do_reset(struct hci_dev * hdev)1812 static int hci_dev_do_reset(struct hci_dev *hdev)
1813 {
1814 	int ret;
1815 
1816 	BT_DBG("%s %p", hdev->name, hdev);
1817 
1818 	hci_req_sync_lock(hdev);
1819 
1820 	/* Drop queues */
1821 	skb_queue_purge(&hdev->rx_q);
1822 	skb_queue_purge(&hdev->cmd_q);
1823 
1824 	/* Avoid potential lockdep warnings from the *_flush() calls by
1825 	 * ensuring the workqueue is empty up front.
1826 	 */
1827 	drain_workqueue(hdev->workqueue);
1828 
1829 	hci_dev_lock(hdev);
1830 	hci_inquiry_cache_flush(hdev);
1831 	hci_conn_hash_flush(hdev);
1832 	hci_dev_unlock(hdev);
1833 
1834 	if (hdev->flush)
1835 		hdev->flush(hdev);
1836 
1837 	atomic_set(&hdev->cmd_cnt, 1);
1838 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1839 
1840 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1841 
1842 	hci_req_sync_unlock(hdev);
1843 	return ret;
1844 }
1845 
hci_dev_reset(__u16 dev)1846 int hci_dev_reset(__u16 dev)
1847 {
1848 	struct hci_dev *hdev;
1849 	int err;
1850 
1851 	hdev = hci_dev_get(dev);
1852 	if (!hdev)
1853 		return -ENODEV;
1854 
1855 	if (!test_bit(HCI_UP, &hdev->flags)) {
1856 		err = -ENETDOWN;
1857 		goto done;
1858 	}
1859 
1860 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861 		err = -EBUSY;
1862 		goto done;
1863 	}
1864 
1865 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1866 		err = -EOPNOTSUPP;
1867 		goto done;
1868 	}
1869 
1870 	err = hci_dev_do_reset(hdev);
1871 
1872 done:
1873 	hci_dev_put(hdev);
1874 	return err;
1875 }
1876 
hci_dev_reset_stat(__u16 dev)1877 int hci_dev_reset_stat(__u16 dev)
1878 {
1879 	struct hci_dev *hdev;
1880 	int ret = 0;
1881 
1882 	hdev = hci_dev_get(dev);
1883 	if (!hdev)
1884 		return -ENODEV;
1885 
1886 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1887 		ret = -EBUSY;
1888 		goto done;
1889 	}
1890 
1891 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1892 		ret = -EOPNOTSUPP;
1893 		goto done;
1894 	}
1895 
1896 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1897 
1898 done:
1899 	hci_dev_put(hdev);
1900 	return ret;
1901 }
1902 
hci_update_scan_state(struct hci_dev * hdev,u8 scan)1903 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1904 {
1905 	bool conn_changed, discov_changed;
1906 
1907 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1908 
1909 	if ((scan & SCAN_PAGE))
1910 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1911 							  HCI_CONNECTABLE);
1912 	else
1913 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1914 							   HCI_CONNECTABLE);
1915 
1916 	if ((scan & SCAN_INQUIRY)) {
1917 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1918 							    HCI_DISCOVERABLE);
1919 	} else {
1920 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1921 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1922 							     HCI_DISCOVERABLE);
1923 	}
1924 
1925 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1926 		return;
1927 
1928 	if (conn_changed || discov_changed) {
1929 		/* In case this was disabled through mgmt */
1930 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1931 
1932 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1933 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1934 
1935 		mgmt_new_settings(hdev);
1936 	}
1937 }
1938 
hci_dev_cmd(unsigned int cmd,void __user * arg)1939 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1940 {
1941 	struct hci_dev *hdev;
1942 	struct hci_dev_req dr;
1943 	int err = 0;
1944 
1945 	if (copy_from_user(&dr, arg, sizeof(dr)))
1946 		return -EFAULT;
1947 
1948 	hdev = hci_dev_get(dr.dev_id);
1949 	if (!hdev)
1950 		return -ENODEV;
1951 
1952 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1953 		err = -EBUSY;
1954 		goto done;
1955 	}
1956 
1957 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1958 		err = -EOPNOTSUPP;
1959 		goto done;
1960 	}
1961 
1962 	if (hdev->dev_type != HCI_PRIMARY) {
1963 		err = -EOPNOTSUPP;
1964 		goto done;
1965 	}
1966 
1967 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1968 		err = -EOPNOTSUPP;
1969 		goto done;
1970 	}
1971 
1972 	switch (cmd) {
1973 	case HCISETAUTH:
1974 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1975 				   HCI_INIT_TIMEOUT, NULL);
1976 		break;
1977 
1978 	case HCISETENCRYPT:
1979 		if (!lmp_encrypt_capable(hdev)) {
1980 			err = -EOPNOTSUPP;
1981 			break;
1982 		}
1983 
1984 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1985 			/* Auth must be enabled first */
1986 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1987 					   HCI_INIT_TIMEOUT, NULL);
1988 			if (err)
1989 				break;
1990 		}
1991 
1992 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1993 				   HCI_INIT_TIMEOUT, NULL);
1994 		break;
1995 
1996 	case HCISETSCAN:
1997 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1998 				   HCI_INIT_TIMEOUT, NULL);
1999 
2000 		/* Ensure that the connectable and discoverable states
2001 		 * get correctly modified as this was a non-mgmt change.
2002 		 */
2003 		if (!err)
2004 			hci_update_scan_state(hdev, dr.dev_opt);
2005 		break;
2006 
2007 	case HCISETLINKPOL:
2008 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2009 				   HCI_INIT_TIMEOUT, NULL);
2010 		break;
2011 
2012 	case HCISETLINKMODE:
2013 		hdev->link_mode = ((__u16) dr.dev_opt) &
2014 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2015 		break;
2016 
2017 	case HCISETPTYPE:
2018 		if (hdev->pkt_type == (__u16) dr.dev_opt)
2019 			break;
2020 
2021 		hdev->pkt_type = (__u16) dr.dev_opt;
2022 		mgmt_phy_configuration_changed(hdev, NULL);
2023 		break;
2024 
2025 	case HCISETACLMTU:
2026 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2027 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2028 		break;
2029 
2030 	case HCISETSCOMTU:
2031 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2032 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2033 		break;
2034 
2035 	default:
2036 		err = -EINVAL;
2037 		break;
2038 	}
2039 
2040 done:
2041 	hci_dev_put(hdev);
2042 	return err;
2043 }
2044 
hci_get_dev_list(void __user * arg)2045 int hci_get_dev_list(void __user *arg)
2046 {
2047 	struct hci_dev *hdev;
2048 	struct hci_dev_list_req *dl;
2049 	struct hci_dev_req *dr;
2050 	int n = 0, size, err;
2051 	__u16 dev_num;
2052 
2053 	if (get_user(dev_num, (__u16 __user *) arg))
2054 		return -EFAULT;
2055 
2056 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2057 		return -EINVAL;
2058 
2059 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2060 
2061 	dl = kzalloc(size, GFP_KERNEL);
2062 	if (!dl)
2063 		return -ENOMEM;
2064 
2065 	dr = dl->dev_req;
2066 
2067 	read_lock(&hci_dev_list_lock);
2068 	list_for_each_entry(hdev, &hci_dev_list, list) {
2069 		unsigned long flags = hdev->flags;
2070 
2071 		/* When the auto-off is configured it means the transport
2072 		 * is running, but in that case still indicate that the
2073 		 * device is actually down.
2074 		 */
2075 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2076 			flags &= ~BIT(HCI_UP);
2077 
2078 		(dr + n)->dev_id  = hdev->id;
2079 		(dr + n)->dev_opt = flags;
2080 
2081 		if (++n >= dev_num)
2082 			break;
2083 	}
2084 	read_unlock(&hci_dev_list_lock);
2085 
2086 	dl->dev_num = n;
2087 	size = sizeof(*dl) + n * sizeof(*dr);
2088 
2089 	err = copy_to_user(arg, dl, size);
2090 	kfree(dl);
2091 
2092 	return err ? -EFAULT : 0;
2093 }
2094 
hci_get_dev_info(void __user * arg)2095 int hci_get_dev_info(void __user *arg)
2096 {
2097 	struct hci_dev *hdev;
2098 	struct hci_dev_info di;
2099 	unsigned long flags;
2100 	int err = 0;
2101 
2102 	if (copy_from_user(&di, arg, sizeof(di)))
2103 		return -EFAULT;
2104 
2105 	hdev = hci_dev_get(di.dev_id);
2106 	if (!hdev)
2107 		return -ENODEV;
2108 
2109 	/* When the auto-off is configured it means the transport
2110 	 * is running, but in that case still indicate that the
2111 	 * device is actually down.
2112 	 */
2113 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2114 		flags = hdev->flags & ~BIT(HCI_UP);
2115 	else
2116 		flags = hdev->flags;
2117 
2118 	strcpy(di.name, hdev->name);
2119 	di.bdaddr   = hdev->bdaddr;
2120 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2121 	di.flags    = flags;
2122 	di.pkt_type = hdev->pkt_type;
2123 	if (lmp_bredr_capable(hdev)) {
2124 		di.acl_mtu  = hdev->acl_mtu;
2125 		di.acl_pkts = hdev->acl_pkts;
2126 		di.sco_mtu  = hdev->sco_mtu;
2127 		di.sco_pkts = hdev->sco_pkts;
2128 	} else {
2129 		di.acl_mtu  = hdev->le_mtu;
2130 		di.acl_pkts = hdev->le_pkts;
2131 		di.sco_mtu  = 0;
2132 		di.sco_pkts = 0;
2133 	}
2134 	di.link_policy = hdev->link_policy;
2135 	di.link_mode   = hdev->link_mode;
2136 
2137 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2138 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2139 
2140 	if (copy_to_user(arg, &di, sizeof(di)))
2141 		err = -EFAULT;
2142 
2143 	hci_dev_put(hdev);
2144 
2145 	return err;
2146 }
2147 
2148 /* ---- Interface to HCI drivers ---- */
2149 
hci_rfkill_set_block(void * data,bool blocked)2150 static int hci_rfkill_set_block(void *data, bool blocked)
2151 {
2152 	struct hci_dev *hdev = data;
2153 
2154 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2155 
2156 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2157 		return -EBUSY;
2158 
2159 	if (blocked) {
2160 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2161 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2162 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2163 			hci_dev_do_close(hdev);
2164 	} else {
2165 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2166 	}
2167 
2168 	return 0;
2169 }
2170 
2171 static const struct rfkill_ops hci_rfkill_ops = {
2172 	.set_block = hci_rfkill_set_block,
2173 };
2174 
hci_power_on(struct work_struct * work)2175 static void hci_power_on(struct work_struct *work)
2176 {
2177 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2178 	int err;
2179 
2180 	BT_DBG("%s", hdev->name);
2181 
2182 	if (test_bit(HCI_UP, &hdev->flags) &&
2183 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2184 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2185 		cancel_delayed_work(&hdev->power_off);
2186 		hci_req_sync_lock(hdev);
2187 		err = __hci_req_hci_power_on(hdev);
2188 		hci_req_sync_unlock(hdev);
2189 		mgmt_power_on(hdev, err);
2190 		return;
2191 	}
2192 
2193 	err = hci_dev_do_open(hdev);
2194 	if (err < 0) {
2195 		hci_dev_lock(hdev);
2196 		mgmt_set_powered_failed(hdev, err);
2197 		hci_dev_unlock(hdev);
2198 		return;
2199 	}
2200 
2201 	/* During the HCI setup phase, a few error conditions are
2202 	 * ignored and they need to be checked now. If they are still
2203 	 * valid, it is important to turn the device back off.
2204 	 */
2205 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2206 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2207 	    (hdev->dev_type == HCI_PRIMARY &&
2208 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2209 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2210 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2211 		hci_dev_do_close(hdev);
2212 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2213 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2214 				   HCI_AUTO_OFF_TIMEOUT);
2215 	}
2216 
2217 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2218 		/* For unconfigured devices, set the HCI_RAW flag
2219 		 * so that userspace can easily identify them.
2220 		 */
2221 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2222 			set_bit(HCI_RAW, &hdev->flags);
2223 
2224 		/* For fully configured devices, this will send
2225 		 * the Index Added event. For unconfigured devices,
2226 		 * it will send Unconfigued Index Added event.
2227 		 *
2228 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2229 		 * and no event will be send.
2230 		 */
2231 		mgmt_index_added(hdev);
2232 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2233 		/* When the controller is now configured, then it
2234 		 * is important to clear the HCI_RAW flag.
2235 		 */
2236 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2237 			clear_bit(HCI_RAW, &hdev->flags);
2238 
2239 		/* Powering on the controller with HCI_CONFIG set only
2240 		 * happens with the transition from unconfigured to
2241 		 * configured. This will send the Index Added event.
2242 		 */
2243 		mgmt_index_added(hdev);
2244 	}
2245 }
2246 
hci_power_off(struct work_struct * work)2247 static void hci_power_off(struct work_struct *work)
2248 {
2249 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2250 					    power_off.work);
2251 
2252 	BT_DBG("%s", hdev->name);
2253 
2254 	hci_dev_do_close(hdev);
2255 }
2256 
hci_error_reset(struct work_struct * work)2257 static void hci_error_reset(struct work_struct *work)
2258 {
2259 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2260 
2261 	BT_DBG("%s", hdev->name);
2262 
2263 	if (hdev->hw_error)
2264 		hdev->hw_error(hdev, hdev->hw_error_code);
2265 	else
2266 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2267 
2268 	if (hci_dev_do_close(hdev))
2269 		return;
2270 
2271 	hci_dev_do_open(hdev);
2272 }
2273 
hci_uuids_clear(struct hci_dev * hdev)2274 void hci_uuids_clear(struct hci_dev *hdev)
2275 {
2276 	struct bt_uuid *uuid, *tmp;
2277 
2278 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2279 		list_del(&uuid->list);
2280 		kfree(uuid);
2281 	}
2282 }
2283 
hci_link_keys_clear(struct hci_dev * hdev)2284 void hci_link_keys_clear(struct hci_dev *hdev)
2285 {
2286 	struct link_key *key;
2287 
2288 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2289 		list_del_rcu(&key->list);
2290 		kfree_rcu(key, rcu);
2291 	}
2292 }
2293 
hci_smp_ltks_clear(struct hci_dev * hdev)2294 void hci_smp_ltks_clear(struct hci_dev *hdev)
2295 {
2296 	struct smp_ltk *k;
2297 
2298 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2299 		list_del_rcu(&k->list);
2300 		kfree_rcu(k, rcu);
2301 	}
2302 }
2303 
hci_smp_irks_clear(struct hci_dev * hdev)2304 void hci_smp_irks_clear(struct hci_dev *hdev)
2305 {
2306 	struct smp_irk *k;
2307 
2308 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2309 		list_del_rcu(&k->list);
2310 		kfree_rcu(k, rcu);
2311 	}
2312 }
2313 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2314 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2315 {
2316 	struct link_key *k;
2317 
2318 	rcu_read_lock();
2319 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2320 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2321 			rcu_read_unlock();
2322 			return k;
2323 		}
2324 	}
2325 	rcu_read_unlock();
2326 
2327 	return NULL;
2328 }
2329 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)2330 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2331 			       u8 key_type, u8 old_key_type)
2332 {
2333 	/* Legacy key */
2334 	if (key_type < 0x03)
2335 		return true;
2336 
2337 	/* Debug keys are insecure so don't store them persistently */
2338 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2339 		return false;
2340 
2341 	/* Changed combination key and there's no previous one */
2342 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2343 		return false;
2344 
2345 	/* Security mode 3 case */
2346 	if (!conn)
2347 		return true;
2348 
2349 	/* BR/EDR key derived using SC from an LE link */
2350 	if (conn->type == LE_LINK)
2351 		return true;
2352 
2353 	/* Neither local nor remote side had no-bonding as requirement */
2354 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2355 		return true;
2356 
2357 	/* Local side had dedicated bonding as requirement */
2358 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2359 		return true;
2360 
2361 	/* Remote side had dedicated bonding as requirement */
2362 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2363 		return true;
2364 
2365 	/* If none of the above criteria match, then don't store the key
2366 	 * persistently */
2367 	return false;
2368 }
2369 
ltk_role(u8 type)2370 static u8 ltk_role(u8 type)
2371 {
2372 	if (type == SMP_LTK)
2373 		return HCI_ROLE_MASTER;
2374 
2375 	return HCI_ROLE_SLAVE;
2376 }
2377 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)2378 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2379 			     u8 addr_type, u8 role)
2380 {
2381 	struct smp_ltk *k;
2382 
2383 	rcu_read_lock();
2384 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2385 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2386 			continue;
2387 
2388 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2389 			rcu_read_unlock();
2390 			return k;
2391 		}
2392 	}
2393 	rcu_read_unlock();
2394 
2395 	return NULL;
2396 }
2397 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)2398 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2399 {
2400 	struct smp_irk *irk;
2401 
2402 	rcu_read_lock();
2403 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2404 		if (!bacmp(&irk->rpa, rpa)) {
2405 			rcu_read_unlock();
2406 			return irk;
2407 		}
2408 	}
2409 
2410 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2411 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2412 			bacpy(&irk->rpa, rpa);
2413 			rcu_read_unlock();
2414 			return irk;
2415 		}
2416 	}
2417 	rcu_read_unlock();
2418 
2419 	return NULL;
2420 }
2421 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2422 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423 				     u8 addr_type)
2424 {
2425 	struct smp_irk *irk;
2426 
2427 	/* Identity Address must be public or static random */
2428 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2429 		return NULL;
2430 
2431 	rcu_read_lock();
2432 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2433 		if (addr_type == irk->addr_type &&
2434 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2435 			rcu_read_unlock();
2436 			return irk;
2437 		}
2438 	}
2439 	rcu_read_unlock();
2440 
2441 	return NULL;
2442 }
2443 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)2444 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2445 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2446 				  u8 pin_len, bool *persistent)
2447 {
2448 	struct link_key *key, *old_key;
2449 	u8 old_key_type;
2450 
2451 	old_key = hci_find_link_key(hdev, bdaddr);
2452 	if (old_key) {
2453 		old_key_type = old_key->type;
2454 		key = old_key;
2455 	} else {
2456 		old_key_type = conn ? conn->key_type : 0xff;
2457 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2458 		if (!key)
2459 			return NULL;
2460 		list_add_rcu(&key->list, &hdev->link_keys);
2461 	}
2462 
2463 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2464 
2465 	/* Some buggy controller combinations generate a changed
2466 	 * combination key for legacy pairing even when there's no
2467 	 * previous key */
2468 	if (type == HCI_LK_CHANGED_COMBINATION &&
2469 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2470 		type = HCI_LK_COMBINATION;
2471 		if (conn)
2472 			conn->key_type = type;
2473 	}
2474 
2475 	bacpy(&key->bdaddr, bdaddr);
2476 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2477 	key->pin_len = pin_len;
2478 
2479 	if (type == HCI_LK_CHANGED_COMBINATION)
2480 		key->type = old_key_type;
2481 	else
2482 		key->type = type;
2483 
2484 	if (persistent)
2485 		*persistent = hci_persistent_key(hdev, conn, type,
2486 						 old_key_type);
2487 
2488 	return key;
2489 }
2490 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)2491 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2492 			    u8 addr_type, u8 type, u8 authenticated,
2493 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2494 {
2495 	struct smp_ltk *key, *old_key;
2496 	u8 role = ltk_role(type);
2497 
2498 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2499 	if (old_key)
2500 		key = old_key;
2501 	else {
2502 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2503 		if (!key)
2504 			return NULL;
2505 		list_add_rcu(&key->list, &hdev->long_term_keys);
2506 	}
2507 
2508 	bacpy(&key->bdaddr, bdaddr);
2509 	key->bdaddr_type = addr_type;
2510 	memcpy(key->val, tk, sizeof(key->val));
2511 	key->authenticated = authenticated;
2512 	key->ediv = ediv;
2513 	key->rand = rand;
2514 	key->enc_size = enc_size;
2515 	key->type = type;
2516 
2517 	return key;
2518 }
2519 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)2520 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2521 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2522 {
2523 	struct smp_irk *irk;
2524 
2525 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2526 	if (!irk) {
2527 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2528 		if (!irk)
2529 			return NULL;
2530 
2531 		bacpy(&irk->bdaddr, bdaddr);
2532 		irk->addr_type = addr_type;
2533 
2534 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2535 	}
2536 
2537 	memcpy(irk->val, val, 16);
2538 	bacpy(&irk->rpa, rpa);
2539 
2540 	return irk;
2541 }
2542 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)2543 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2544 {
2545 	struct link_key *key;
2546 
2547 	key = hci_find_link_key(hdev, bdaddr);
2548 	if (!key)
2549 		return -ENOENT;
2550 
2551 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2552 
2553 	list_del_rcu(&key->list);
2554 	kfree_rcu(key, rcu);
2555 
2556 	return 0;
2557 }
2558 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2559 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2560 {
2561 	struct smp_ltk *k;
2562 	int removed = 0;
2563 
2564 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2565 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2566 			continue;
2567 
2568 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2569 
2570 		list_del_rcu(&k->list);
2571 		kfree_rcu(k, rcu);
2572 		removed++;
2573 	}
2574 
2575 	return removed ? 0 : -ENOENT;
2576 }
2577 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2578 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2579 {
2580 	struct smp_irk *k;
2581 
2582 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2583 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2584 			continue;
2585 
2586 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2587 
2588 		list_del_rcu(&k->list);
2589 		kfree_rcu(k, rcu);
2590 	}
2591 }
2592 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)2593 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2594 {
2595 	struct smp_ltk *k;
2596 	struct smp_irk *irk;
2597 	u8 addr_type;
2598 
2599 	if (type == BDADDR_BREDR) {
2600 		if (hci_find_link_key(hdev, bdaddr))
2601 			return true;
2602 		return false;
2603 	}
2604 
2605 	/* Convert to HCI addr type which struct smp_ltk uses */
2606 	if (type == BDADDR_LE_PUBLIC)
2607 		addr_type = ADDR_LE_DEV_PUBLIC;
2608 	else
2609 		addr_type = ADDR_LE_DEV_RANDOM;
2610 
2611 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2612 	if (irk) {
2613 		bdaddr = &irk->bdaddr;
2614 		addr_type = irk->addr_type;
2615 	}
2616 
2617 	rcu_read_lock();
2618 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2619 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2620 			rcu_read_unlock();
2621 			return true;
2622 		}
2623 	}
2624 	rcu_read_unlock();
2625 
2626 	return false;
2627 }
2628 
2629 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)2630 static void hci_cmd_timeout(struct work_struct *work)
2631 {
2632 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2633 					    cmd_timer.work);
2634 
2635 	if (hdev->sent_cmd) {
2636 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2637 		u16 opcode = __le16_to_cpu(sent->opcode);
2638 
2639 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2640 	} else {
2641 		bt_dev_err(hdev, "command tx timeout");
2642 	}
2643 
2644 	if (hdev->cmd_timeout)
2645 		hdev->cmd_timeout(hdev);
2646 
2647 	atomic_set(&hdev->cmd_cnt, 1);
2648 	queue_work(hdev->workqueue, &hdev->cmd_work);
2649 }
2650 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2651 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2652 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2653 {
2654 	struct oob_data *data;
2655 
2656 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2657 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2658 			continue;
2659 		if (data->bdaddr_type != bdaddr_type)
2660 			continue;
2661 		return data;
2662 	}
2663 
2664 	return NULL;
2665 }
2666 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2667 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2668 			       u8 bdaddr_type)
2669 {
2670 	struct oob_data *data;
2671 
2672 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2673 	if (!data)
2674 		return -ENOENT;
2675 
2676 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2677 
2678 	list_del(&data->list);
2679 	kfree(data);
2680 
2681 	return 0;
2682 }
2683 
hci_remote_oob_data_clear(struct hci_dev * hdev)2684 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2685 {
2686 	struct oob_data *data, *n;
2687 
2688 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2689 		list_del(&data->list);
2690 		kfree(data);
2691 	}
2692 }
2693 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)2694 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2695 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2696 			    u8 *hash256, u8 *rand256)
2697 {
2698 	struct oob_data *data;
2699 
2700 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2701 	if (!data) {
2702 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2703 		if (!data)
2704 			return -ENOMEM;
2705 
2706 		bacpy(&data->bdaddr, bdaddr);
2707 		data->bdaddr_type = bdaddr_type;
2708 		list_add(&data->list, &hdev->remote_oob_data);
2709 	}
2710 
2711 	if (hash192 && rand192) {
2712 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2713 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2714 		if (hash256 && rand256)
2715 			data->present = 0x03;
2716 	} else {
2717 		memset(data->hash192, 0, sizeof(data->hash192));
2718 		memset(data->rand192, 0, sizeof(data->rand192));
2719 		if (hash256 && rand256)
2720 			data->present = 0x02;
2721 		else
2722 			data->present = 0x00;
2723 	}
2724 
2725 	if (hash256 && rand256) {
2726 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2727 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2728 	} else {
2729 		memset(data->hash256, 0, sizeof(data->hash256));
2730 		memset(data->rand256, 0, sizeof(data->rand256));
2731 		if (hash192 && rand192)
2732 			data->present = 0x01;
2733 	}
2734 
2735 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2736 
2737 	return 0;
2738 }
2739 
2740 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)2741 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2742 {
2743 	struct adv_info *adv_instance;
2744 
2745 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2746 		if (adv_instance->instance == instance)
2747 			return adv_instance;
2748 	}
2749 
2750 	return NULL;
2751 }
2752 
2753 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)2754 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2755 {
2756 	struct adv_info *cur_instance;
2757 
2758 	cur_instance = hci_find_adv_instance(hdev, instance);
2759 	if (!cur_instance)
2760 		return NULL;
2761 
2762 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2763 					    struct adv_info, list))
2764 		return list_first_entry(&hdev->adv_instances,
2765 						 struct adv_info, list);
2766 	else
2767 		return list_next_entry(cur_instance, list);
2768 }
2769 
2770 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)2771 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2772 {
2773 	struct adv_info *adv_instance;
2774 
2775 	adv_instance = hci_find_adv_instance(hdev, instance);
2776 	if (!adv_instance)
2777 		return -ENOENT;
2778 
2779 	BT_DBG("%s removing %dMR", hdev->name, instance);
2780 
2781 	if (hdev->cur_adv_instance == instance) {
2782 		if (hdev->adv_instance_timeout) {
2783 			cancel_delayed_work(&hdev->adv_instance_expire);
2784 			hdev->adv_instance_timeout = 0;
2785 		}
2786 		hdev->cur_adv_instance = 0x00;
2787 	}
2788 
2789 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2790 
2791 	list_del(&adv_instance->list);
2792 	kfree(adv_instance);
2793 
2794 	hdev->adv_instance_cnt--;
2795 
2796 	return 0;
2797 }
2798 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)2799 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2800 {
2801 	struct adv_info *adv_instance, *n;
2802 
2803 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2804 		adv_instance->rpa_expired = rpa_expired;
2805 }
2806 
2807 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)2808 void hci_adv_instances_clear(struct hci_dev *hdev)
2809 {
2810 	struct adv_info *adv_instance, *n;
2811 
2812 	if (hdev->adv_instance_timeout) {
2813 		cancel_delayed_work(&hdev->adv_instance_expire);
2814 		hdev->adv_instance_timeout = 0;
2815 	}
2816 
2817 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2818 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2819 		list_del(&adv_instance->list);
2820 		kfree(adv_instance);
2821 	}
2822 
2823 	hdev->adv_instance_cnt = 0;
2824 	hdev->cur_adv_instance = 0x00;
2825 }
2826 
adv_instance_rpa_expired(struct work_struct * work)2827 static void adv_instance_rpa_expired(struct work_struct *work)
2828 {
2829 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2830 						     rpa_expired_cb.work);
2831 
2832 	BT_DBG("");
2833 
2834 	adv_instance->rpa_expired = true;
2835 }
2836 
2837 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration)2838 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2839 			 u16 adv_data_len, u8 *adv_data,
2840 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2841 			 u16 timeout, u16 duration)
2842 {
2843 	struct adv_info *adv_instance;
2844 
2845 	adv_instance = hci_find_adv_instance(hdev, instance);
2846 	if (adv_instance) {
2847 		memset(adv_instance->adv_data, 0,
2848 		       sizeof(adv_instance->adv_data));
2849 		memset(adv_instance->scan_rsp_data, 0,
2850 		       sizeof(adv_instance->scan_rsp_data));
2851 	} else {
2852 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2853 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2854 			return -EOVERFLOW;
2855 
2856 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2857 		if (!adv_instance)
2858 			return -ENOMEM;
2859 
2860 		adv_instance->pending = true;
2861 		adv_instance->instance = instance;
2862 		list_add(&adv_instance->list, &hdev->adv_instances);
2863 		hdev->adv_instance_cnt++;
2864 	}
2865 
2866 	adv_instance->flags = flags;
2867 	adv_instance->adv_data_len = adv_data_len;
2868 	adv_instance->scan_rsp_len = scan_rsp_len;
2869 
2870 	if (adv_data_len)
2871 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2872 
2873 	if (scan_rsp_len)
2874 		memcpy(adv_instance->scan_rsp_data,
2875 		       scan_rsp_data, scan_rsp_len);
2876 
2877 	adv_instance->timeout = timeout;
2878 	adv_instance->remaining_time = timeout;
2879 
2880 	if (duration == 0)
2881 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2882 	else
2883 		adv_instance->duration = duration;
2884 
2885 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2886 
2887 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2888 			  adv_instance_rpa_expired);
2889 
2890 	BT_DBG("%s for %dMR", hdev->name, instance);
2891 
2892 	return 0;
2893 }
2894 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2895 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2896 					 bdaddr_t *bdaddr, u8 type)
2897 {
2898 	struct bdaddr_list *b;
2899 
2900 	list_for_each_entry(b, bdaddr_list, list) {
2901 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2902 			return b;
2903 	}
2904 
2905 	return NULL;
2906 }
2907 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2908 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2909 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2910 				u8 type)
2911 {
2912 	struct bdaddr_list_with_irk *b;
2913 
2914 	list_for_each_entry(b, bdaddr_list, list) {
2915 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2916 			return b;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2922 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2923 {
2924 	struct bdaddr_list *b, *n;
2925 
2926 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2927 		list_del(&b->list);
2928 		kfree(b);
2929 	}
2930 }
2931 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2932 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2933 {
2934 	struct bdaddr_list *entry;
2935 
2936 	if (!bacmp(bdaddr, BDADDR_ANY))
2937 		return -EBADF;
2938 
2939 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2940 		return -EEXIST;
2941 
2942 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2943 	if (!entry)
2944 		return -ENOMEM;
2945 
2946 	bacpy(&entry->bdaddr, bdaddr);
2947 	entry->bdaddr_type = type;
2948 
2949 	list_add(&entry->list, list);
2950 
2951 	return 0;
2952 }
2953 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2954 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2955 					u8 type, u8 *peer_irk, u8 *local_irk)
2956 {
2957 	struct bdaddr_list_with_irk *entry;
2958 
2959 	if (!bacmp(bdaddr, BDADDR_ANY))
2960 		return -EBADF;
2961 
2962 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2963 		return -EEXIST;
2964 
2965 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2966 	if (!entry)
2967 		return -ENOMEM;
2968 
2969 	bacpy(&entry->bdaddr, bdaddr);
2970 	entry->bdaddr_type = type;
2971 
2972 	if (peer_irk)
2973 		memcpy(entry->peer_irk, peer_irk, 16);
2974 
2975 	if (local_irk)
2976 		memcpy(entry->local_irk, local_irk, 16);
2977 
2978 	list_add(&entry->list, list);
2979 
2980 	return 0;
2981 }
2982 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2983 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2984 {
2985 	struct bdaddr_list *entry;
2986 
2987 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2988 		hci_bdaddr_list_clear(list);
2989 		return 0;
2990 	}
2991 
2992 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2993 	if (!entry)
2994 		return -ENOENT;
2995 
2996 	list_del(&entry->list);
2997 	kfree(entry);
2998 
2999 	return 0;
3000 }
3001 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)3002 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3003 							u8 type)
3004 {
3005 	struct bdaddr_list_with_irk *entry;
3006 
3007 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3008 		hci_bdaddr_list_clear(list);
3009 		return 0;
3010 	}
3011 
3012 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3013 	if (!entry)
3014 		return -ENOENT;
3015 
3016 	list_del(&entry->list);
3017 	kfree(entry);
3018 
3019 	return 0;
3020 }
3021 
3022 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3023 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3024 					       bdaddr_t *addr, u8 addr_type)
3025 {
3026 	struct hci_conn_params *params;
3027 
3028 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3029 		if (bacmp(&params->addr, addr) == 0 &&
3030 		    params->addr_type == addr_type) {
3031 			return params;
3032 		}
3033 	}
3034 
3035 	return NULL;
3036 }
3037 
3038 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)3039 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3040 						  bdaddr_t *addr, u8 addr_type)
3041 {
3042 	struct hci_conn_params *param;
3043 
3044 	list_for_each_entry(param, list, action) {
3045 		if (bacmp(&param->addr, addr) == 0 &&
3046 		    param->addr_type == addr_type)
3047 			return param;
3048 	}
3049 
3050 	return NULL;
3051 }
3052 
3053 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3054 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3055 					    bdaddr_t *addr, u8 addr_type)
3056 {
3057 	struct hci_conn_params *params;
3058 
3059 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3060 	if (params)
3061 		return params;
3062 
3063 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3064 	if (!params) {
3065 		bt_dev_err(hdev, "out of memory");
3066 		return NULL;
3067 	}
3068 
3069 	bacpy(&params->addr, addr);
3070 	params->addr_type = addr_type;
3071 
3072 	list_add(&params->list, &hdev->le_conn_params);
3073 	INIT_LIST_HEAD(&params->action);
3074 
3075 	params->conn_min_interval = hdev->le_conn_min_interval;
3076 	params->conn_max_interval = hdev->le_conn_max_interval;
3077 	params->conn_latency = hdev->le_conn_latency;
3078 	params->supervision_timeout = hdev->le_supv_timeout;
3079 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3080 
3081 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3082 
3083 	return params;
3084 }
3085 
hci_conn_params_free(struct hci_conn_params * params)3086 static void hci_conn_params_free(struct hci_conn_params *params)
3087 {
3088 	if (params->conn) {
3089 		hci_conn_drop(params->conn);
3090 		hci_conn_put(params->conn);
3091 	}
3092 
3093 	list_del(&params->action);
3094 	list_del(&params->list);
3095 	kfree(params);
3096 }
3097 
3098 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3099 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3100 {
3101 	struct hci_conn_params *params;
3102 
3103 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3104 	if (!params)
3105 		return;
3106 
3107 	hci_conn_params_free(params);
3108 
3109 	hci_update_background_scan(hdev);
3110 
3111 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3112 }
3113 
3114 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3115 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3116 {
3117 	struct hci_conn_params *params, *tmp;
3118 
3119 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3120 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3121 			continue;
3122 
3123 		/* If trying to estabilish one time connection to disabled
3124 		 * device, leave the params, but mark them as just once.
3125 		 */
3126 		if (params->explicit_connect) {
3127 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3128 			continue;
3129 		}
3130 
3131 		list_del(&params->list);
3132 		kfree(params);
3133 	}
3134 
3135 	BT_DBG("All LE disabled connection parameters were removed");
3136 }
3137 
3138 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3139 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3140 {
3141 	struct hci_conn_params *params, *tmp;
3142 
3143 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3144 		hci_conn_params_free(params);
3145 
3146 	BT_DBG("All LE connection parameters were removed");
3147 }
3148 
3149 /* Copy the Identity Address of the controller.
3150  *
3151  * If the controller has a public BD_ADDR, then by default use that one.
3152  * If this is a LE only controller without a public address, default to
3153  * the static random address.
3154  *
3155  * For debugging purposes it is possible to force controllers with a
3156  * public address to use the static random address instead.
3157  *
3158  * In case BR/EDR has been disabled on a dual-mode controller and
3159  * userspace has configured a static address, then that address
3160  * becomes the identity address instead of the public BR/EDR address.
3161  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3162 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3163 			       u8 *bdaddr_type)
3164 {
3165 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3166 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3167 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3168 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3169 		bacpy(bdaddr, &hdev->static_addr);
3170 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3171 	} else {
3172 		bacpy(bdaddr, &hdev->bdaddr);
3173 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3174 	}
3175 }
3176 
3177 /* Alloc HCI device */
hci_alloc_dev(void)3178 struct hci_dev *hci_alloc_dev(void)
3179 {
3180 	struct hci_dev *hdev;
3181 
3182 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3183 	if (!hdev)
3184 		return NULL;
3185 
3186 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3187 	hdev->esco_type = (ESCO_HV1);
3188 	hdev->link_mode = (HCI_LM_ACCEPT);
3189 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3190 	hdev->io_capability = 0x03;	/* No Input No Output */
3191 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3192 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3193 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3194 	hdev->adv_instance_cnt = 0;
3195 	hdev->cur_adv_instance = 0x00;
3196 	hdev->adv_instance_timeout = 0;
3197 
3198 	hdev->sniff_max_interval = 800;
3199 	hdev->sniff_min_interval = 80;
3200 
3201 	hdev->le_adv_channel_map = 0x07;
3202 	hdev->le_adv_min_interval = 0x0800;
3203 	hdev->le_adv_max_interval = 0x0800;
3204 	hdev->le_scan_interval = 0x0060;
3205 	hdev->le_scan_window = 0x0030;
3206 	hdev->le_conn_min_interval = 0x0018;
3207 	hdev->le_conn_max_interval = 0x0028;
3208 	hdev->le_conn_latency = 0x0000;
3209 	hdev->le_supv_timeout = 0x002a;
3210 	hdev->le_def_tx_len = 0x001b;
3211 	hdev->le_def_tx_time = 0x0148;
3212 	hdev->le_max_tx_len = 0x001b;
3213 	hdev->le_max_tx_time = 0x0148;
3214 	hdev->le_max_rx_len = 0x001b;
3215 	hdev->le_max_rx_time = 0x0148;
3216 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3217 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3218 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3219 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3220 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3221 
3222 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3223 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3224 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3225 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3226 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3227 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3228 
3229 	mutex_init(&hdev->lock);
3230 	mutex_init(&hdev->req_lock);
3231 
3232 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3233 	INIT_LIST_HEAD(&hdev->blacklist);
3234 	INIT_LIST_HEAD(&hdev->whitelist);
3235 	INIT_LIST_HEAD(&hdev->uuids);
3236 	INIT_LIST_HEAD(&hdev->link_keys);
3237 	INIT_LIST_HEAD(&hdev->long_term_keys);
3238 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3239 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3240 	INIT_LIST_HEAD(&hdev->le_white_list);
3241 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3242 	INIT_LIST_HEAD(&hdev->le_conn_params);
3243 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3244 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3245 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3246 	INIT_LIST_HEAD(&hdev->adv_instances);
3247 
3248 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3249 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3250 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3251 	INIT_WORK(&hdev->power_on, hci_power_on);
3252 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3253 
3254 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3255 
3256 	skb_queue_head_init(&hdev->rx_q);
3257 	skb_queue_head_init(&hdev->cmd_q);
3258 	skb_queue_head_init(&hdev->raw_q);
3259 
3260 	init_waitqueue_head(&hdev->req_wait_q);
3261 
3262 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3263 
3264 	hci_request_setup(hdev);
3265 
3266 	hci_init_sysfs(hdev);
3267 	discovery_init(hdev);
3268 
3269 	return hdev;
3270 }
3271 EXPORT_SYMBOL(hci_alloc_dev);
3272 
3273 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)3274 void hci_free_dev(struct hci_dev *hdev)
3275 {
3276 	/* will free via device release */
3277 	put_device(&hdev->dev);
3278 }
3279 EXPORT_SYMBOL(hci_free_dev);
3280 
3281 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)3282 int hci_register_dev(struct hci_dev *hdev)
3283 {
3284 	int id, error;
3285 
3286 	if (!hdev->open || !hdev->close || !hdev->send)
3287 		return -EINVAL;
3288 
3289 	/* Do not allow HCI_AMP devices to register at index 0,
3290 	 * so the index can be used as the AMP controller ID.
3291 	 */
3292 	switch (hdev->dev_type) {
3293 	case HCI_PRIMARY:
3294 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3295 		break;
3296 	case HCI_AMP:
3297 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3298 		break;
3299 	default:
3300 		return -EINVAL;
3301 	}
3302 
3303 	if (id < 0)
3304 		return id;
3305 
3306 	sprintf(hdev->name, "hci%d", id);
3307 	hdev->id = id;
3308 
3309 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3310 
3311 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3312 	if (!hdev->workqueue) {
3313 		error = -ENOMEM;
3314 		goto err;
3315 	}
3316 
3317 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3318 						      hdev->name);
3319 	if (!hdev->req_workqueue) {
3320 		destroy_workqueue(hdev->workqueue);
3321 		error = -ENOMEM;
3322 		goto err;
3323 	}
3324 
3325 	if (!IS_ERR_OR_NULL(bt_debugfs))
3326 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3327 
3328 	dev_set_name(&hdev->dev, "%s", hdev->name);
3329 
3330 	error = device_add(&hdev->dev);
3331 	if (error < 0)
3332 		goto err_wqueue;
3333 
3334 	hci_leds_init(hdev);
3335 
3336 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3337 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3338 				    hdev);
3339 	if (hdev->rfkill) {
3340 		if (rfkill_register(hdev->rfkill) < 0) {
3341 			rfkill_destroy(hdev->rfkill);
3342 			hdev->rfkill = NULL;
3343 		}
3344 	}
3345 
3346 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3347 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3348 
3349 	hci_dev_set_flag(hdev, HCI_SETUP);
3350 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3351 
3352 	if (hdev->dev_type == HCI_PRIMARY) {
3353 		/* Assume BR/EDR support until proven otherwise (such as
3354 		 * through reading supported features during init.
3355 		 */
3356 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3357 	}
3358 
3359 	write_lock(&hci_dev_list_lock);
3360 	list_add(&hdev->list, &hci_dev_list);
3361 	write_unlock(&hci_dev_list_lock);
3362 
3363 	/* Devices that are marked for raw-only usage are unconfigured
3364 	 * and should not be included in normal operation.
3365 	 */
3366 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3367 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3368 
3369 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3370 	hci_dev_hold(hdev);
3371 
3372 	queue_work(hdev->req_workqueue, &hdev->power_on);
3373 
3374 	return id;
3375 
3376 err_wqueue:
3377 	destroy_workqueue(hdev->workqueue);
3378 	destroy_workqueue(hdev->req_workqueue);
3379 err:
3380 	ida_simple_remove(&hci_index_ida, hdev->id);
3381 
3382 	return error;
3383 }
3384 EXPORT_SYMBOL(hci_register_dev);
3385 
3386 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)3387 void hci_unregister_dev(struct hci_dev *hdev)
3388 {
3389 	int id;
3390 
3391 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3392 
3393 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3394 
3395 	id = hdev->id;
3396 
3397 	write_lock(&hci_dev_list_lock);
3398 	list_del(&hdev->list);
3399 	write_unlock(&hci_dev_list_lock);
3400 
3401 	cancel_work_sync(&hdev->power_on);
3402 
3403 	hci_dev_do_close(hdev);
3404 
3405 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3406 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3407 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3408 		hci_dev_lock(hdev);
3409 		mgmt_index_removed(hdev);
3410 		hci_dev_unlock(hdev);
3411 	}
3412 
3413 	/* mgmt_index_removed should take care of emptying the
3414 	 * pending list */
3415 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3416 
3417 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3418 
3419 	if (hdev->rfkill) {
3420 		rfkill_unregister(hdev->rfkill);
3421 		rfkill_destroy(hdev->rfkill);
3422 	}
3423 
3424 	device_del(&hdev->dev);
3425 
3426 	debugfs_remove_recursive(hdev->debugfs);
3427 	kfree_const(hdev->hw_info);
3428 	kfree_const(hdev->fw_info);
3429 
3430 	destroy_workqueue(hdev->workqueue);
3431 	destroy_workqueue(hdev->req_workqueue);
3432 
3433 	hci_dev_lock(hdev);
3434 	hci_bdaddr_list_clear(&hdev->blacklist);
3435 	hci_bdaddr_list_clear(&hdev->whitelist);
3436 	hci_uuids_clear(hdev);
3437 	hci_link_keys_clear(hdev);
3438 	hci_smp_ltks_clear(hdev);
3439 	hci_smp_irks_clear(hdev);
3440 	hci_remote_oob_data_clear(hdev);
3441 	hci_adv_instances_clear(hdev);
3442 	hci_bdaddr_list_clear(&hdev->le_white_list);
3443 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3444 	hci_conn_params_clear_all(hdev);
3445 	hci_discovery_filter_clear(hdev);
3446 	hci_dev_unlock(hdev);
3447 
3448 	hci_dev_put(hdev);
3449 
3450 	ida_simple_remove(&hci_index_ida, id);
3451 }
3452 EXPORT_SYMBOL(hci_unregister_dev);
3453 
3454 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)3455 int hci_suspend_dev(struct hci_dev *hdev)
3456 {
3457 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3458 	return 0;
3459 }
3460 EXPORT_SYMBOL(hci_suspend_dev);
3461 
3462 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)3463 int hci_resume_dev(struct hci_dev *hdev)
3464 {
3465 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3466 	return 0;
3467 }
3468 EXPORT_SYMBOL(hci_resume_dev);
3469 
3470 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)3471 int hci_reset_dev(struct hci_dev *hdev)
3472 {
3473 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3474 	struct sk_buff *skb;
3475 
3476 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3477 	if (!skb)
3478 		return -ENOMEM;
3479 
3480 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3481 	skb_put_data(skb, hw_err, 3);
3482 
3483 	/* Send Hardware Error to upper stack */
3484 	return hci_recv_frame(hdev, skb);
3485 }
3486 EXPORT_SYMBOL(hci_reset_dev);
3487 
3488 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)3489 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3490 {
3491 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3492 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3493 		kfree_skb(skb);
3494 		return -ENXIO;
3495 	}
3496 
3497 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3498 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3499 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3500 		kfree_skb(skb);
3501 		return -EINVAL;
3502 	}
3503 
3504 	/* Incoming skb */
3505 	bt_cb(skb)->incoming = 1;
3506 
3507 	/* Time stamp */
3508 	__net_timestamp(skb);
3509 
3510 	skb_queue_tail(&hdev->rx_q, skb);
3511 	queue_work(hdev->workqueue, &hdev->rx_work);
3512 
3513 	return 0;
3514 }
3515 EXPORT_SYMBOL(hci_recv_frame);
3516 
3517 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)3518 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3519 {
3520 	/* Mark as diagnostic packet */
3521 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3522 
3523 	/* Time stamp */
3524 	__net_timestamp(skb);
3525 
3526 	skb_queue_tail(&hdev->rx_q, skb);
3527 	queue_work(hdev->workqueue, &hdev->rx_work);
3528 
3529 	return 0;
3530 }
3531 EXPORT_SYMBOL(hci_recv_diag);
3532 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)3533 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3534 {
3535 	va_list vargs;
3536 
3537 	va_start(vargs, fmt);
3538 	kfree_const(hdev->hw_info);
3539 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3540 	va_end(vargs);
3541 }
3542 EXPORT_SYMBOL(hci_set_hw_info);
3543 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3544 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3545 {
3546 	va_list vargs;
3547 
3548 	va_start(vargs, fmt);
3549 	kfree_const(hdev->fw_info);
3550 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3551 	va_end(vargs);
3552 }
3553 EXPORT_SYMBOL(hci_set_fw_info);
3554 
3555 /* ---- Interface to upper protocols ---- */
3556 
hci_register_cb(struct hci_cb * cb)3557 int hci_register_cb(struct hci_cb *cb)
3558 {
3559 	BT_DBG("%p name %s", cb, cb->name);
3560 
3561 	mutex_lock(&hci_cb_list_lock);
3562 	list_add_tail(&cb->list, &hci_cb_list);
3563 	mutex_unlock(&hci_cb_list_lock);
3564 
3565 	return 0;
3566 }
3567 EXPORT_SYMBOL(hci_register_cb);
3568 
hci_unregister_cb(struct hci_cb * cb)3569 int hci_unregister_cb(struct hci_cb *cb)
3570 {
3571 	BT_DBG("%p name %s", cb, cb->name);
3572 
3573 	mutex_lock(&hci_cb_list_lock);
3574 	list_del(&cb->list);
3575 	mutex_unlock(&hci_cb_list_lock);
3576 
3577 	return 0;
3578 }
3579 EXPORT_SYMBOL(hci_unregister_cb);
3580 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3581 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3582 {
3583 	int err;
3584 
3585 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3586 	       skb->len);
3587 
3588 	/* Time stamp */
3589 	__net_timestamp(skb);
3590 
3591 	/* Send copy to monitor */
3592 	hci_send_to_monitor(hdev, skb);
3593 
3594 	if (atomic_read(&hdev->promisc)) {
3595 		/* Send copy to the sockets */
3596 		hci_send_to_sock(hdev, skb);
3597 	}
3598 
3599 	/* Get rid of skb owner, prior to sending to the driver. */
3600 	skb_orphan(skb);
3601 
3602 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3603 		kfree_skb(skb);
3604 		return;
3605 	}
3606 
3607 	err = hdev->send(hdev, skb);
3608 	if (err < 0) {
3609 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3610 		kfree_skb(skb);
3611 	}
3612 }
3613 
3614 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3615 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3616 		 const void *param)
3617 {
3618 	struct sk_buff *skb;
3619 
3620 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3621 
3622 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3623 	if (!skb) {
3624 		bt_dev_err(hdev, "no memory for command");
3625 		return -ENOMEM;
3626 	}
3627 
3628 	/* Stand-alone HCI commands must be flagged as
3629 	 * single-command requests.
3630 	 */
3631 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3632 
3633 	skb_queue_tail(&hdev->cmd_q, skb);
3634 	queue_work(hdev->workqueue, &hdev->cmd_work);
3635 
3636 	return 0;
3637 }
3638 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3639 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3640 		   const void *param)
3641 {
3642 	struct sk_buff *skb;
3643 
3644 	if (hci_opcode_ogf(opcode) != 0x3f) {
3645 		/* A controller receiving a command shall respond with either
3646 		 * a Command Status Event or a Command Complete Event.
3647 		 * Therefore, all standard HCI commands must be sent via the
3648 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3649 		 * Some vendors do not comply with this rule for vendor-specific
3650 		 * commands and do not return any event. We want to support
3651 		 * unresponded commands for such cases only.
3652 		 */
3653 		bt_dev_err(hdev, "unresponded command not supported");
3654 		return -EINVAL;
3655 	}
3656 
3657 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3658 	if (!skb) {
3659 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3660 			   opcode);
3661 		return -ENOMEM;
3662 	}
3663 
3664 	hci_send_frame(hdev, skb);
3665 
3666 	return 0;
3667 }
3668 EXPORT_SYMBOL(__hci_cmd_send);
3669 
3670 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3671 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3672 {
3673 	struct hci_command_hdr *hdr;
3674 
3675 	if (!hdev->sent_cmd)
3676 		return NULL;
3677 
3678 	hdr = (void *) hdev->sent_cmd->data;
3679 
3680 	if (hdr->opcode != cpu_to_le16(opcode))
3681 		return NULL;
3682 
3683 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3684 
3685 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3686 }
3687 
3688 /* Send HCI command and wait for command commplete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)3689 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3690 			     const void *param, u32 timeout)
3691 {
3692 	struct sk_buff *skb;
3693 
3694 	if (!test_bit(HCI_UP, &hdev->flags))
3695 		return ERR_PTR(-ENETDOWN);
3696 
3697 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3698 
3699 	hci_req_sync_lock(hdev);
3700 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3701 	hci_req_sync_unlock(hdev);
3702 
3703 	return skb;
3704 }
3705 EXPORT_SYMBOL(hci_cmd_sync);
3706 
3707 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3708 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3709 {
3710 	struct hci_acl_hdr *hdr;
3711 	int len = skb->len;
3712 
3713 	skb_push(skb, HCI_ACL_HDR_SIZE);
3714 	skb_reset_transport_header(skb);
3715 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3716 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3717 	hdr->dlen   = cpu_to_le16(len);
3718 }
3719 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3720 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3721 			  struct sk_buff *skb, __u16 flags)
3722 {
3723 	struct hci_conn *conn = chan->conn;
3724 	struct hci_dev *hdev = conn->hdev;
3725 	struct sk_buff *list;
3726 
3727 	skb->len = skb_headlen(skb);
3728 	skb->data_len = 0;
3729 
3730 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3731 
3732 	switch (hdev->dev_type) {
3733 	case HCI_PRIMARY:
3734 		hci_add_acl_hdr(skb, conn->handle, flags);
3735 		break;
3736 	case HCI_AMP:
3737 		hci_add_acl_hdr(skb, chan->handle, flags);
3738 		break;
3739 	default:
3740 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3741 		return;
3742 	}
3743 
3744 	list = skb_shinfo(skb)->frag_list;
3745 	if (!list) {
3746 		/* Non fragmented */
3747 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3748 
3749 		skb_queue_tail(queue, skb);
3750 	} else {
3751 		/* Fragmented */
3752 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3753 
3754 		skb_shinfo(skb)->frag_list = NULL;
3755 
3756 		/* Queue all fragments atomically. We need to use spin_lock_bh
3757 		 * here because of 6LoWPAN links, as there this function is
3758 		 * called from softirq and using normal spin lock could cause
3759 		 * deadlocks.
3760 		 */
3761 		spin_lock_bh(&queue->lock);
3762 
3763 		__skb_queue_tail(queue, skb);
3764 
3765 		flags &= ~ACL_START;
3766 		flags |= ACL_CONT;
3767 		do {
3768 			skb = list; list = list->next;
3769 
3770 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3771 			hci_add_acl_hdr(skb, conn->handle, flags);
3772 
3773 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3774 
3775 			__skb_queue_tail(queue, skb);
3776 		} while (list);
3777 
3778 		spin_unlock_bh(&queue->lock);
3779 	}
3780 }
3781 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3782 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3783 {
3784 	struct hci_dev *hdev = chan->conn->hdev;
3785 
3786 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3787 
3788 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3789 
3790 	queue_work(hdev->workqueue, &hdev->tx_work);
3791 }
3792 
3793 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3794 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3795 {
3796 	struct hci_dev *hdev = conn->hdev;
3797 	struct hci_sco_hdr hdr;
3798 
3799 	BT_DBG("%s len %d", hdev->name, skb->len);
3800 
3801 	hdr.handle = cpu_to_le16(conn->handle);
3802 	hdr.dlen   = skb->len;
3803 
3804 	skb_push(skb, HCI_SCO_HDR_SIZE);
3805 	skb_reset_transport_header(skb);
3806 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3807 
3808 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3809 
3810 	skb_queue_tail(&conn->data_q, skb);
3811 	queue_work(hdev->workqueue, &hdev->tx_work);
3812 }
3813 
3814 /* ---- HCI TX task (outgoing data) ---- */
3815 
3816 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3817 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3818 				     int *quote)
3819 {
3820 	struct hci_conn_hash *h = &hdev->conn_hash;
3821 	struct hci_conn *conn = NULL, *c;
3822 	unsigned int num = 0, min = ~0;
3823 
3824 	/* We don't have to lock device here. Connections are always
3825 	 * added and removed with TX task disabled. */
3826 
3827 	rcu_read_lock();
3828 
3829 	list_for_each_entry_rcu(c, &h->list, list) {
3830 		if (c->type != type || skb_queue_empty(&c->data_q))
3831 			continue;
3832 
3833 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3834 			continue;
3835 
3836 		num++;
3837 
3838 		if (c->sent < min) {
3839 			min  = c->sent;
3840 			conn = c;
3841 		}
3842 
3843 		if (hci_conn_num(hdev, type) == num)
3844 			break;
3845 	}
3846 
3847 	rcu_read_unlock();
3848 
3849 	if (conn) {
3850 		int cnt, q;
3851 
3852 		switch (conn->type) {
3853 		case ACL_LINK:
3854 			cnt = hdev->acl_cnt;
3855 			break;
3856 		case SCO_LINK:
3857 		case ESCO_LINK:
3858 			cnt = hdev->sco_cnt;
3859 			break;
3860 		case LE_LINK:
3861 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3862 			break;
3863 		default:
3864 			cnt = 0;
3865 			bt_dev_err(hdev, "unknown link type %d", conn->type);
3866 		}
3867 
3868 		q = cnt / num;
3869 		*quote = q ? q : 1;
3870 	} else
3871 		*quote = 0;
3872 
3873 	BT_DBG("conn %p quote %d", conn, *quote);
3874 	return conn;
3875 }
3876 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3877 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3878 {
3879 	struct hci_conn_hash *h = &hdev->conn_hash;
3880 	struct hci_conn *c;
3881 
3882 	bt_dev_err(hdev, "link tx timeout");
3883 
3884 	rcu_read_lock();
3885 
3886 	/* Kill stalled connections */
3887 	list_for_each_entry_rcu(c, &h->list, list) {
3888 		if (c->type == type && c->sent) {
3889 			bt_dev_err(hdev, "killing stalled connection %pMR",
3890 				   &c->dst);
3891 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3892 		}
3893 	}
3894 
3895 	rcu_read_unlock();
3896 }
3897 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3898 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3899 				      int *quote)
3900 {
3901 	struct hci_conn_hash *h = &hdev->conn_hash;
3902 	struct hci_chan *chan = NULL;
3903 	unsigned int num = 0, min = ~0, cur_prio = 0;
3904 	struct hci_conn *conn;
3905 	int cnt, q, conn_num = 0;
3906 
3907 	BT_DBG("%s", hdev->name);
3908 
3909 	rcu_read_lock();
3910 
3911 	list_for_each_entry_rcu(conn, &h->list, list) {
3912 		struct hci_chan *tmp;
3913 
3914 		if (conn->type != type)
3915 			continue;
3916 
3917 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3918 			continue;
3919 
3920 		conn_num++;
3921 
3922 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3923 			struct sk_buff *skb;
3924 
3925 			if (skb_queue_empty(&tmp->data_q))
3926 				continue;
3927 
3928 			skb = skb_peek(&tmp->data_q);
3929 			if (skb->priority < cur_prio)
3930 				continue;
3931 
3932 			if (skb->priority > cur_prio) {
3933 				num = 0;
3934 				min = ~0;
3935 				cur_prio = skb->priority;
3936 			}
3937 
3938 			num++;
3939 
3940 			if (conn->sent < min) {
3941 				min  = conn->sent;
3942 				chan = tmp;
3943 			}
3944 		}
3945 
3946 		if (hci_conn_num(hdev, type) == conn_num)
3947 			break;
3948 	}
3949 
3950 	rcu_read_unlock();
3951 
3952 	if (!chan)
3953 		return NULL;
3954 
3955 	switch (chan->conn->type) {
3956 	case ACL_LINK:
3957 		cnt = hdev->acl_cnt;
3958 		break;
3959 	case AMP_LINK:
3960 		cnt = hdev->block_cnt;
3961 		break;
3962 	case SCO_LINK:
3963 	case ESCO_LINK:
3964 		cnt = hdev->sco_cnt;
3965 		break;
3966 	case LE_LINK:
3967 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3968 		break;
3969 	default:
3970 		cnt = 0;
3971 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3972 	}
3973 
3974 	q = cnt / num;
3975 	*quote = q ? q : 1;
3976 	BT_DBG("chan %p quote %d", chan, *quote);
3977 	return chan;
3978 }
3979 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3980 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3981 {
3982 	struct hci_conn_hash *h = &hdev->conn_hash;
3983 	struct hci_conn *conn;
3984 	int num = 0;
3985 
3986 	BT_DBG("%s", hdev->name);
3987 
3988 	rcu_read_lock();
3989 
3990 	list_for_each_entry_rcu(conn, &h->list, list) {
3991 		struct hci_chan *chan;
3992 
3993 		if (conn->type != type)
3994 			continue;
3995 
3996 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3997 			continue;
3998 
3999 		num++;
4000 
4001 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4002 			struct sk_buff *skb;
4003 
4004 			if (chan->sent) {
4005 				chan->sent = 0;
4006 				continue;
4007 			}
4008 
4009 			if (skb_queue_empty(&chan->data_q))
4010 				continue;
4011 
4012 			skb = skb_peek(&chan->data_q);
4013 			if (skb->priority >= HCI_PRIO_MAX - 1)
4014 				continue;
4015 
4016 			skb->priority = HCI_PRIO_MAX - 1;
4017 
4018 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4019 			       skb->priority);
4020 		}
4021 
4022 		if (hci_conn_num(hdev, type) == num)
4023 			break;
4024 	}
4025 
4026 	rcu_read_unlock();
4027 
4028 }
4029 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4030 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4031 {
4032 	/* Calculate count of blocks used by this packet */
4033 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4034 }
4035 
__check_timeout(struct hci_dev * hdev,unsigned int cnt)4036 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4037 {
4038 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4039 		/* ACL tx timeout must be longer than maximum
4040 		 * link supervision timeout (40.9 seconds) */
4041 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4042 				       HCI_ACL_TX_TIMEOUT))
4043 			hci_link_tx_to(hdev, ACL_LINK);
4044 	}
4045 }
4046 
hci_sched_acl_pkt(struct hci_dev * hdev)4047 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4048 {
4049 	unsigned int cnt = hdev->acl_cnt;
4050 	struct hci_chan *chan;
4051 	struct sk_buff *skb;
4052 	int quote;
4053 
4054 	__check_timeout(hdev, cnt);
4055 
4056 	while (hdev->acl_cnt &&
4057 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4058 		u32 priority = (skb_peek(&chan->data_q))->priority;
4059 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4060 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4061 			       skb->len, skb->priority);
4062 
4063 			/* Stop if priority has changed */
4064 			if (skb->priority < priority)
4065 				break;
4066 
4067 			skb = skb_dequeue(&chan->data_q);
4068 
4069 			hci_conn_enter_active_mode(chan->conn,
4070 						   bt_cb(skb)->force_active);
4071 
4072 			hci_send_frame(hdev, skb);
4073 			hdev->acl_last_tx = jiffies;
4074 
4075 			hdev->acl_cnt--;
4076 			chan->sent++;
4077 			chan->conn->sent++;
4078 		}
4079 	}
4080 
4081 	if (cnt != hdev->acl_cnt)
4082 		hci_prio_recalculate(hdev, ACL_LINK);
4083 }
4084 
hci_sched_acl_blk(struct hci_dev * hdev)4085 static void hci_sched_acl_blk(struct hci_dev *hdev)
4086 {
4087 	unsigned int cnt = hdev->block_cnt;
4088 	struct hci_chan *chan;
4089 	struct sk_buff *skb;
4090 	int quote;
4091 	u8 type;
4092 
4093 	__check_timeout(hdev, cnt);
4094 
4095 	BT_DBG("%s", hdev->name);
4096 
4097 	if (hdev->dev_type == HCI_AMP)
4098 		type = AMP_LINK;
4099 	else
4100 		type = ACL_LINK;
4101 
4102 	while (hdev->block_cnt > 0 &&
4103 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4104 		u32 priority = (skb_peek(&chan->data_q))->priority;
4105 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4106 			int blocks;
4107 
4108 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4109 			       skb->len, skb->priority);
4110 
4111 			/* Stop if priority has changed */
4112 			if (skb->priority < priority)
4113 				break;
4114 
4115 			skb = skb_dequeue(&chan->data_q);
4116 
4117 			blocks = __get_blocks(hdev, skb);
4118 			if (blocks > hdev->block_cnt)
4119 				return;
4120 
4121 			hci_conn_enter_active_mode(chan->conn,
4122 						   bt_cb(skb)->force_active);
4123 
4124 			hci_send_frame(hdev, skb);
4125 			hdev->acl_last_tx = jiffies;
4126 
4127 			hdev->block_cnt -= blocks;
4128 			quote -= blocks;
4129 
4130 			chan->sent += blocks;
4131 			chan->conn->sent += blocks;
4132 		}
4133 	}
4134 
4135 	if (cnt != hdev->block_cnt)
4136 		hci_prio_recalculate(hdev, type);
4137 }
4138 
hci_sched_acl(struct hci_dev * hdev)4139 static void hci_sched_acl(struct hci_dev *hdev)
4140 {
4141 	BT_DBG("%s", hdev->name);
4142 
4143 	/* No ACL link over BR/EDR controller */
4144 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4145 		return;
4146 
4147 	/* No AMP link over AMP controller */
4148 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4149 		return;
4150 
4151 	switch (hdev->flow_ctl_mode) {
4152 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4153 		hci_sched_acl_pkt(hdev);
4154 		break;
4155 
4156 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4157 		hci_sched_acl_blk(hdev);
4158 		break;
4159 	}
4160 }
4161 
4162 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)4163 static void hci_sched_sco(struct hci_dev *hdev)
4164 {
4165 	struct hci_conn *conn;
4166 	struct sk_buff *skb;
4167 	int quote;
4168 
4169 	BT_DBG("%s", hdev->name);
4170 
4171 	if (!hci_conn_num(hdev, SCO_LINK))
4172 		return;
4173 
4174 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4175 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4176 			BT_DBG("skb %p len %d", skb, skb->len);
4177 			hci_send_frame(hdev, skb);
4178 
4179 			conn->sent++;
4180 			if (conn->sent == ~0)
4181 				conn->sent = 0;
4182 		}
4183 	}
4184 }
4185 
hci_sched_esco(struct hci_dev * hdev)4186 static void hci_sched_esco(struct hci_dev *hdev)
4187 {
4188 	struct hci_conn *conn;
4189 	struct sk_buff *skb;
4190 	int quote;
4191 
4192 	BT_DBG("%s", hdev->name);
4193 
4194 	if (!hci_conn_num(hdev, ESCO_LINK))
4195 		return;
4196 
4197 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4198 						     &quote))) {
4199 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4200 			BT_DBG("skb %p len %d", skb, skb->len);
4201 			hci_send_frame(hdev, skb);
4202 
4203 			conn->sent++;
4204 			if (conn->sent == ~0)
4205 				conn->sent = 0;
4206 		}
4207 	}
4208 }
4209 
hci_sched_le(struct hci_dev * hdev)4210 static void hci_sched_le(struct hci_dev *hdev)
4211 {
4212 	struct hci_chan *chan;
4213 	struct sk_buff *skb;
4214 	int quote, cnt, tmp;
4215 
4216 	BT_DBG("%s", hdev->name);
4217 
4218 	if (!hci_conn_num(hdev, LE_LINK))
4219 		return;
4220 
4221 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4222 		/* LE tx timeout must be longer than maximum
4223 		 * link supervision timeout (40.9 seconds) */
4224 		if (!hdev->le_cnt && hdev->le_pkts &&
4225 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4226 			hci_link_tx_to(hdev, LE_LINK);
4227 	}
4228 
4229 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4230 	tmp = cnt;
4231 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4232 		u32 priority = (skb_peek(&chan->data_q))->priority;
4233 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4234 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4235 			       skb->len, skb->priority);
4236 
4237 			/* Stop if priority has changed */
4238 			if (skb->priority < priority)
4239 				break;
4240 
4241 			skb = skb_dequeue(&chan->data_q);
4242 
4243 			hci_send_frame(hdev, skb);
4244 			hdev->le_last_tx = jiffies;
4245 
4246 			cnt--;
4247 			chan->sent++;
4248 			chan->conn->sent++;
4249 		}
4250 	}
4251 
4252 	if (hdev->le_pkts)
4253 		hdev->le_cnt = cnt;
4254 	else
4255 		hdev->acl_cnt = cnt;
4256 
4257 	if (cnt != tmp)
4258 		hci_prio_recalculate(hdev, LE_LINK);
4259 }
4260 
hci_tx_work(struct work_struct * work)4261 static void hci_tx_work(struct work_struct *work)
4262 {
4263 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4264 	struct sk_buff *skb;
4265 
4266 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4267 	       hdev->sco_cnt, hdev->le_cnt);
4268 
4269 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4270 		/* Schedule queues and send stuff to HCI driver */
4271 		hci_sched_acl(hdev);
4272 		hci_sched_sco(hdev);
4273 		hci_sched_esco(hdev);
4274 		hci_sched_le(hdev);
4275 	}
4276 
4277 	/* Send next queued raw (unknown type) packet */
4278 	while ((skb = skb_dequeue(&hdev->raw_q)))
4279 		hci_send_frame(hdev, skb);
4280 }
4281 
4282 /* ----- HCI RX task (incoming data processing) ----- */
4283 
4284 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)4285 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4286 {
4287 	struct hci_acl_hdr *hdr = (void *) skb->data;
4288 	struct hci_conn *conn;
4289 	__u16 handle, flags;
4290 
4291 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4292 
4293 	handle = __le16_to_cpu(hdr->handle);
4294 	flags  = hci_flags(handle);
4295 	handle = hci_handle(handle);
4296 
4297 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4298 	       handle, flags);
4299 
4300 	hdev->stat.acl_rx++;
4301 
4302 	hci_dev_lock(hdev);
4303 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4304 	hci_dev_unlock(hdev);
4305 
4306 	if (conn) {
4307 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4308 
4309 		/* Send to upper protocol */
4310 		l2cap_recv_acldata(conn, skb, flags);
4311 		return;
4312 	} else {
4313 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4314 			   handle);
4315 	}
4316 
4317 	kfree_skb(skb);
4318 }
4319 
4320 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)4321 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4322 {
4323 	struct hci_sco_hdr *hdr = (void *) skb->data;
4324 	struct hci_conn *conn;
4325 	__u16 handle;
4326 
4327 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4328 
4329 	handle = __le16_to_cpu(hdr->handle);
4330 
4331 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4332 
4333 	hdev->stat.sco_rx++;
4334 
4335 	hci_dev_lock(hdev);
4336 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4337 	hci_dev_unlock(hdev);
4338 
4339 	if (conn) {
4340 		/* Send to upper protocol */
4341 		sco_recv_scodata(conn, skb);
4342 		return;
4343 	} else {
4344 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4345 			   handle);
4346 	}
4347 
4348 	kfree_skb(skb);
4349 }
4350 
hci_req_is_complete(struct hci_dev * hdev)4351 static bool hci_req_is_complete(struct hci_dev *hdev)
4352 {
4353 	struct sk_buff *skb;
4354 
4355 	skb = skb_peek(&hdev->cmd_q);
4356 	if (!skb)
4357 		return true;
4358 
4359 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4360 }
4361 
hci_resend_last(struct hci_dev * hdev)4362 static void hci_resend_last(struct hci_dev *hdev)
4363 {
4364 	struct hci_command_hdr *sent;
4365 	struct sk_buff *skb;
4366 	u16 opcode;
4367 
4368 	if (!hdev->sent_cmd)
4369 		return;
4370 
4371 	sent = (void *) hdev->sent_cmd->data;
4372 	opcode = __le16_to_cpu(sent->opcode);
4373 	if (opcode == HCI_OP_RESET)
4374 		return;
4375 
4376 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4377 	if (!skb)
4378 		return;
4379 
4380 	skb_queue_head(&hdev->cmd_q, skb);
4381 	queue_work(hdev->workqueue, &hdev->cmd_work);
4382 }
4383 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4384 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4385 			  hci_req_complete_t *req_complete,
4386 			  hci_req_complete_skb_t *req_complete_skb)
4387 {
4388 	struct sk_buff *skb;
4389 	unsigned long flags;
4390 
4391 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4392 
4393 	/* If the completed command doesn't match the last one that was
4394 	 * sent we need to do special handling of it.
4395 	 */
4396 	if (!hci_sent_cmd_data(hdev, opcode)) {
4397 		/* Some CSR based controllers generate a spontaneous
4398 		 * reset complete event during init and any pending
4399 		 * command will never be completed. In such a case we
4400 		 * need to resend whatever was the last sent
4401 		 * command.
4402 		 */
4403 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4404 			hci_resend_last(hdev);
4405 
4406 		return;
4407 	}
4408 
4409 	/* If we reach this point this event matches the last command sent */
4410 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4411 
4412 	/* If the command succeeded and there's still more commands in
4413 	 * this request the request is not yet complete.
4414 	 */
4415 	if (!status && !hci_req_is_complete(hdev))
4416 		return;
4417 
4418 	/* If this was the last command in a request the complete
4419 	 * callback would be found in hdev->sent_cmd instead of the
4420 	 * command queue (hdev->cmd_q).
4421 	 */
4422 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4423 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4424 		return;
4425 	}
4426 
4427 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4428 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4429 		return;
4430 	}
4431 
4432 	/* Remove all pending commands belonging to this request */
4433 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4434 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4435 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4436 			__skb_queue_head(&hdev->cmd_q, skb);
4437 			break;
4438 		}
4439 
4440 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4441 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4442 		else
4443 			*req_complete = bt_cb(skb)->hci.req_complete;
4444 		kfree_skb(skb);
4445 	}
4446 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4447 }
4448 
hci_rx_work(struct work_struct * work)4449 static void hci_rx_work(struct work_struct *work)
4450 {
4451 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4452 	struct sk_buff *skb;
4453 
4454 	BT_DBG("%s", hdev->name);
4455 
4456 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4457 		/* Send copy to monitor */
4458 		hci_send_to_monitor(hdev, skb);
4459 
4460 		if (atomic_read(&hdev->promisc)) {
4461 			/* Send copy to the sockets */
4462 			hci_send_to_sock(hdev, skb);
4463 		}
4464 
4465 		/* If the device has been opened in HCI_USER_CHANNEL,
4466 		 * the userspace has exclusive access to device.
4467 		 * When device is HCI_INIT, we still need to process
4468 		 * the data packets to the driver in order
4469 		 * to complete its setup().
4470 		 */
4471 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4472 		    !test_bit(HCI_INIT, &hdev->flags)) {
4473 			kfree_skb(skb);
4474 			continue;
4475 		}
4476 
4477 		if (test_bit(HCI_INIT, &hdev->flags)) {
4478 			/* Don't process data packets in this states. */
4479 			switch (hci_skb_pkt_type(skb)) {
4480 			case HCI_ACLDATA_PKT:
4481 			case HCI_SCODATA_PKT:
4482 				kfree_skb(skb);
4483 				continue;
4484 			}
4485 		}
4486 
4487 		/* Process frame */
4488 		switch (hci_skb_pkt_type(skb)) {
4489 		case HCI_EVENT_PKT:
4490 			BT_DBG("%s Event packet", hdev->name);
4491 			hci_event_packet(hdev, skb);
4492 			break;
4493 
4494 		case HCI_ACLDATA_PKT:
4495 			BT_DBG("%s ACL data packet", hdev->name);
4496 			hci_acldata_packet(hdev, skb);
4497 			break;
4498 
4499 		case HCI_SCODATA_PKT:
4500 			BT_DBG("%s SCO data packet", hdev->name);
4501 			hci_scodata_packet(hdev, skb);
4502 			break;
4503 
4504 		default:
4505 			kfree_skb(skb);
4506 			break;
4507 		}
4508 	}
4509 }
4510 
hci_cmd_work(struct work_struct * work)4511 static void hci_cmd_work(struct work_struct *work)
4512 {
4513 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4514 	struct sk_buff *skb;
4515 
4516 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4517 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4518 
4519 	/* Send queued commands */
4520 	if (atomic_read(&hdev->cmd_cnt)) {
4521 		skb = skb_dequeue(&hdev->cmd_q);
4522 		if (!skb)
4523 			return;
4524 
4525 		kfree_skb(hdev->sent_cmd);
4526 
4527 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4528 		if (hdev->sent_cmd) {
4529 			if (hci_req_status_pend(hdev))
4530 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4531 			atomic_dec(&hdev->cmd_cnt);
4532 			hci_send_frame(hdev, skb);
4533 			if (test_bit(HCI_RESET, &hdev->flags))
4534 				cancel_delayed_work(&hdev->cmd_timer);
4535 			else
4536 				schedule_delayed_work(&hdev->cmd_timer,
4537 						      HCI_CMD_TIMEOUT);
4538 		} else {
4539 			skb_queue_head(&hdev->cmd_q, skb);
4540 			queue_work(hdev->workqueue, &hdev->cmd_work);
4541 		}
4542 	}
4543 }
4544