• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34 
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39 
40 #include "smp.h"
41 
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45 
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49 
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53 
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56 
57 /* ----- HCI requests ----- */
58 
59 #define HCI_REQ_DONE	  0
60 #define HCI_REQ_PEND	  1
61 #define HCI_REQ_CANCELED  2
62 
63 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
65 
66 /* ---- HCI notifications ---- */
67 
hci_notify(struct hci_dev * hdev,int event)68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70 	hci_sock_dev_event(hdev, event);
71 }
72 
73 /* ---- HCI debugfs entries ---- */
74 
dut_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 			     size_t count, loff_t *ppos)
77 {
78 	struct hci_dev *hdev = file->private_data;
79 	char buf[3];
80 
81 	buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82 	buf[1] = '\n';
83 	buf[2] = '\0';
84 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86 
dut_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 			      size_t count, loff_t *ppos)
89 {
90 	struct hci_dev *hdev = file->private_data;
91 	struct sk_buff *skb;
92 	char buf[32];
93 	size_t buf_size = min(count, (sizeof(buf)-1));
94 	bool enable;
95 	int err;
96 
97 	if (!test_bit(HCI_UP, &hdev->flags))
98 		return -ENETDOWN;
99 
100 	if (copy_from_user(buf, user_buf, buf_size))
101 		return -EFAULT;
102 
103 	buf[buf_size] = '\0';
104 	if (strtobool(buf, &enable))
105 		return -EINVAL;
106 
107 	if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108 		return -EALREADY;
109 
110 	hci_req_lock(hdev);
111 	if (enable)
112 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 				     HCI_CMD_TIMEOUT);
114 	else
115 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 				     HCI_CMD_TIMEOUT);
117 	hci_req_unlock(hdev);
118 
119 	if (IS_ERR(skb))
120 		return PTR_ERR(skb);
121 
122 	err = -bt_to_errno(skb->data[0]);
123 	kfree_skb(skb);
124 
125 	if (err < 0)
126 		return err;
127 
128 	change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129 
130 	return count;
131 }
132 
133 static const struct file_operations dut_mode_fops = {
134 	.open		= simple_open,
135 	.read		= dut_mode_read,
136 	.write		= dut_mode_write,
137 	.llseek		= default_llseek,
138 };
139 
features_show(struct seq_file * f,void * ptr)140 static int features_show(struct seq_file *f, void *ptr)
141 {
142 	struct hci_dev *hdev = f->private;
143 	u8 p;
144 
145 	hci_dev_lock(hdev);
146 	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 			   hdev->features[p][0], hdev->features[p][1],
150 			   hdev->features[p][2], hdev->features[p][3],
151 			   hdev->features[p][4], hdev->features[p][5],
152 			   hdev->features[p][6], hdev->features[p][7]);
153 	}
154 	if (lmp_le_capable(hdev))
155 		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 			   hdev->le_features[0], hdev->le_features[1],
158 			   hdev->le_features[2], hdev->le_features[3],
159 			   hdev->le_features[4], hdev->le_features[5],
160 			   hdev->le_features[6], hdev->le_features[7]);
161 	hci_dev_unlock(hdev);
162 
163 	return 0;
164 }
165 
features_open(struct inode * inode,struct file * file)166 static int features_open(struct inode *inode, struct file *file)
167 {
168 	return single_open(file, features_show, inode->i_private);
169 }
170 
171 static const struct file_operations features_fops = {
172 	.open		= features_open,
173 	.read		= seq_read,
174 	.llseek		= seq_lseek,
175 	.release	= single_release,
176 };
177 
blacklist_show(struct seq_file * f,void * p)178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180 	struct hci_dev *hdev = f->private;
181 	struct bdaddr_list *b;
182 
183 	hci_dev_lock(hdev);
184 	list_for_each_entry(b, &hdev->blacklist, list)
185 		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 	hci_dev_unlock(hdev);
187 
188 	return 0;
189 }
190 
blacklist_open(struct inode * inode,struct file * file)191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193 	return single_open(file, blacklist_show, inode->i_private);
194 }
195 
196 static const struct file_operations blacklist_fops = {
197 	.open		= blacklist_open,
198 	.read		= seq_read,
199 	.llseek		= seq_lseek,
200 	.release	= single_release,
201 };
202 
whitelist_show(struct seq_file * f,void * p)203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205 	struct hci_dev *hdev = f->private;
206 	struct bdaddr_list *b;
207 
208 	hci_dev_lock(hdev);
209 	list_for_each_entry(b, &hdev->whitelist, list)
210 		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 	hci_dev_unlock(hdev);
212 
213 	return 0;
214 }
215 
whitelist_open(struct inode * inode,struct file * file)216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218 	return single_open(file, whitelist_show, inode->i_private);
219 }
220 
221 static const struct file_operations whitelist_fops = {
222 	.open		= whitelist_open,
223 	.read		= seq_read,
224 	.llseek		= seq_lseek,
225 	.release	= single_release,
226 };
227 
uuids_show(struct seq_file * f,void * p)228 static int uuids_show(struct seq_file *f, void *p)
229 {
230 	struct hci_dev *hdev = f->private;
231 	struct bt_uuid *uuid;
232 
233 	hci_dev_lock(hdev);
234 	list_for_each_entry(uuid, &hdev->uuids, list) {
235 		u8 i, val[16];
236 
237 		/* The Bluetooth UUID values are stored in big endian,
238 		 * but with reversed byte order. So convert them into
239 		 * the right order for the %pUb modifier.
240 		 */
241 		for (i = 0; i < 16; i++)
242 			val[i] = uuid->uuid[15 - i];
243 
244 		seq_printf(f, "%pUb\n", val);
245 	}
246 	hci_dev_unlock(hdev);
247 
248 	return 0;
249 }
250 
uuids_open(struct inode * inode,struct file * file)251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253 	return single_open(file, uuids_show, inode->i_private);
254 }
255 
256 static const struct file_operations uuids_fops = {
257 	.open		= uuids_open,
258 	.read		= seq_read,
259 	.llseek		= seq_lseek,
260 	.release	= single_release,
261 };
262 
inquiry_cache_show(struct seq_file * f,void * p)263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265 	struct hci_dev *hdev = f->private;
266 	struct discovery_state *cache = &hdev->discovery;
267 	struct inquiry_entry *e;
268 
269 	hci_dev_lock(hdev);
270 
271 	list_for_each_entry(e, &cache->all, all) {
272 		struct inquiry_data *data = &e->data;
273 		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 			   &data->bdaddr,
275 			   data->pscan_rep_mode, data->pscan_period_mode,
276 			   data->pscan_mode, data->dev_class[2],
277 			   data->dev_class[1], data->dev_class[0],
278 			   __le16_to_cpu(data->clock_offset),
279 			   data->rssi, data->ssp_mode, e->timestamp);
280 	}
281 
282 	hci_dev_unlock(hdev);
283 
284 	return 0;
285 }
286 
inquiry_cache_open(struct inode * inode,struct file * file)287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289 	return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291 
292 static const struct file_operations inquiry_cache_fops = {
293 	.open		= inquiry_cache_open,
294 	.read		= seq_read,
295 	.llseek		= seq_lseek,
296 	.release	= single_release,
297 };
298 
link_keys_show(struct seq_file * f,void * ptr)299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301 	struct hci_dev *hdev = f->private;
302 	struct list_head *p, *n;
303 
304 	hci_dev_lock(hdev);
305 	list_for_each_safe(p, n, &hdev->link_keys) {
306 		struct link_key *key = list_entry(p, struct link_key, list);
307 		seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 			   HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 	}
310 	hci_dev_unlock(hdev);
311 
312 	return 0;
313 }
314 
link_keys_open(struct inode * inode,struct file * file)315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317 	return single_open(file, link_keys_show, inode->i_private);
318 }
319 
320 static const struct file_operations link_keys_fops = {
321 	.open		= link_keys_open,
322 	.read		= seq_read,
323 	.llseek		= seq_lseek,
324 	.release	= single_release,
325 };
326 
dev_class_show(struct seq_file * f,void * ptr)327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329 	struct hci_dev *hdev = f->private;
330 
331 	hci_dev_lock(hdev);
332 	seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 		   hdev->dev_class[1], hdev->dev_class[0]);
334 	hci_dev_unlock(hdev);
335 
336 	return 0;
337 }
338 
dev_class_open(struct inode * inode,struct file * file)339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341 	return single_open(file, dev_class_show, inode->i_private);
342 }
343 
344 static const struct file_operations dev_class_fops = {
345 	.open		= dev_class_open,
346 	.read		= seq_read,
347 	.llseek		= seq_lseek,
348 	.release	= single_release,
349 };
350 
voice_setting_get(void * data,u64 * val)351 static int voice_setting_get(void *data, u64 *val)
352 {
353 	struct hci_dev *hdev = data;
354 
355 	hci_dev_lock(hdev);
356 	*val = hdev->voice_setting;
357 	hci_dev_unlock(hdev);
358 
359 	return 0;
360 }
361 
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 			NULL, "0x%4.4llx\n");
364 
auto_accept_delay_set(void * data,u64 val)365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367 	struct hci_dev *hdev = data;
368 
369 	hci_dev_lock(hdev);
370 	hdev->auto_accept_delay = val;
371 	hci_dev_unlock(hdev);
372 
373 	return 0;
374 }
375 
auto_accept_delay_get(void * data,u64 * val)376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378 	struct hci_dev *hdev = data;
379 
380 	hci_dev_lock(hdev);
381 	*val = hdev->auto_accept_delay;
382 	hci_dev_unlock(hdev);
383 
384 	return 0;
385 }
386 
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 			auto_accept_delay_set, "%llu\n");
389 
force_sc_support_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 				     size_t count, loff_t *ppos)
392 {
393 	struct hci_dev *hdev = file->private_data;
394 	char buf[3];
395 
396 	buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397 	buf[1] = '\n';
398 	buf[2] = '\0';
399 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401 
force_sc_support_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)402 static ssize_t force_sc_support_write(struct file *file,
403 				      const char __user *user_buf,
404 				      size_t count, loff_t *ppos)
405 {
406 	struct hci_dev *hdev = file->private_data;
407 	char buf[32];
408 	size_t buf_size = min(count, (sizeof(buf)-1));
409 	bool enable;
410 
411 	if (test_bit(HCI_UP, &hdev->flags))
412 		return -EBUSY;
413 
414 	if (copy_from_user(buf, user_buf, buf_size))
415 		return -EFAULT;
416 
417 	buf[buf_size] = '\0';
418 	if (strtobool(buf, &enable))
419 		return -EINVAL;
420 
421 	if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422 		return -EALREADY;
423 
424 	change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425 
426 	return count;
427 }
428 
429 static const struct file_operations force_sc_support_fops = {
430 	.open		= simple_open,
431 	.read		= force_sc_support_read,
432 	.write		= force_sc_support_write,
433 	.llseek		= default_llseek,
434 };
435 
sc_only_mode_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 				 size_t count, loff_t *ppos)
438 {
439 	struct hci_dev *hdev = file->private_data;
440 	char buf[3];
441 
442 	buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 	buf[1] = '\n';
444 	buf[2] = '\0';
445 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447 
448 static const struct file_operations sc_only_mode_fops = {
449 	.open		= simple_open,
450 	.read		= sc_only_mode_read,
451 	.llseek		= default_llseek,
452 };
453 
idle_timeout_set(void * data,u64 val)454 static int idle_timeout_set(void *data, u64 val)
455 {
456 	struct hci_dev *hdev = data;
457 
458 	if (val != 0 && (val < 500 || val > 3600000))
459 		return -EINVAL;
460 
461 	hci_dev_lock(hdev);
462 	hdev->idle_timeout = val;
463 	hci_dev_unlock(hdev);
464 
465 	return 0;
466 }
467 
idle_timeout_get(void * data,u64 * val)468 static int idle_timeout_get(void *data, u64 *val)
469 {
470 	struct hci_dev *hdev = data;
471 
472 	hci_dev_lock(hdev);
473 	*val = hdev->idle_timeout;
474 	hci_dev_unlock(hdev);
475 
476 	return 0;
477 }
478 
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 			idle_timeout_set, "%llu\n");
481 
rpa_timeout_set(void * data,u64 val)482 static int rpa_timeout_set(void *data, u64 val)
483 {
484 	struct hci_dev *hdev = data;
485 
486 	/* Require the RPA timeout to be at least 30 seconds and at most
487 	 * 24 hours.
488 	 */
489 	if (val < 30 || val > (60 * 60 * 24))
490 		return -EINVAL;
491 
492 	hci_dev_lock(hdev);
493 	hdev->rpa_timeout = val;
494 	hci_dev_unlock(hdev);
495 
496 	return 0;
497 }
498 
rpa_timeout_get(void * data,u64 * val)499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501 	struct hci_dev *hdev = data;
502 
503 	hci_dev_lock(hdev);
504 	*val = hdev->rpa_timeout;
505 	hci_dev_unlock(hdev);
506 
507 	return 0;
508 }
509 
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 			rpa_timeout_set, "%llu\n");
512 
sniff_min_interval_set(void * data,u64 val)513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515 	struct hci_dev *hdev = data;
516 
517 	if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 		return -EINVAL;
519 
520 	hci_dev_lock(hdev);
521 	hdev->sniff_min_interval = val;
522 	hci_dev_unlock(hdev);
523 
524 	return 0;
525 }
526 
sniff_min_interval_get(void * data,u64 * val)527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529 	struct hci_dev *hdev = data;
530 
531 	hci_dev_lock(hdev);
532 	*val = hdev->sniff_min_interval;
533 	hci_dev_unlock(hdev);
534 
535 	return 0;
536 }
537 
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 			sniff_min_interval_set, "%llu\n");
540 
sniff_max_interval_set(void * data,u64 val)541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543 	struct hci_dev *hdev = data;
544 
545 	if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 		return -EINVAL;
547 
548 	hci_dev_lock(hdev);
549 	hdev->sniff_max_interval = val;
550 	hci_dev_unlock(hdev);
551 
552 	return 0;
553 }
554 
sniff_max_interval_get(void * data,u64 * val)555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557 	struct hci_dev *hdev = data;
558 
559 	hci_dev_lock(hdev);
560 	*val = hdev->sniff_max_interval;
561 	hci_dev_unlock(hdev);
562 
563 	return 0;
564 }
565 
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 			sniff_max_interval_set, "%llu\n");
568 
conn_info_min_age_set(void * data,u64 val)569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571 	struct hci_dev *hdev = data;
572 
573 	if (val == 0 || val > hdev->conn_info_max_age)
574 		return -EINVAL;
575 
576 	hci_dev_lock(hdev);
577 	hdev->conn_info_min_age = val;
578 	hci_dev_unlock(hdev);
579 
580 	return 0;
581 }
582 
conn_info_min_age_get(void * data,u64 * val)583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585 	struct hci_dev *hdev = data;
586 
587 	hci_dev_lock(hdev);
588 	*val = hdev->conn_info_min_age;
589 	hci_dev_unlock(hdev);
590 
591 	return 0;
592 }
593 
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 			conn_info_min_age_set, "%llu\n");
596 
conn_info_max_age_set(void * data,u64 val)597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599 	struct hci_dev *hdev = data;
600 
601 	if (val == 0 || val < hdev->conn_info_min_age)
602 		return -EINVAL;
603 
604 	hci_dev_lock(hdev);
605 	hdev->conn_info_max_age = val;
606 	hci_dev_unlock(hdev);
607 
608 	return 0;
609 }
610 
conn_info_max_age_get(void * data,u64 * val)611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613 	struct hci_dev *hdev = data;
614 
615 	hci_dev_lock(hdev);
616 	*val = hdev->conn_info_max_age;
617 	hci_dev_unlock(hdev);
618 
619 	return 0;
620 }
621 
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 			conn_info_max_age_set, "%llu\n");
624 
identity_show(struct seq_file * f,void * p)625 static int identity_show(struct seq_file *f, void *p)
626 {
627 	struct hci_dev *hdev = f->private;
628 	bdaddr_t addr;
629 	u8 addr_type;
630 
631 	hci_dev_lock(hdev);
632 
633 	hci_copy_identity_address(hdev, &addr, &addr_type);
634 
635 	seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 		   16, hdev->irk, &hdev->rpa);
637 
638 	hci_dev_unlock(hdev);
639 
640 	return 0;
641 }
642 
identity_open(struct inode * inode,struct file * file)643 static int identity_open(struct inode *inode, struct file *file)
644 {
645 	return single_open(file, identity_show, inode->i_private);
646 }
647 
648 static const struct file_operations identity_fops = {
649 	.open		= identity_open,
650 	.read		= seq_read,
651 	.llseek		= seq_lseek,
652 	.release	= single_release,
653 };
654 
random_address_show(struct seq_file * f,void * p)655 static int random_address_show(struct seq_file *f, void *p)
656 {
657 	struct hci_dev *hdev = f->private;
658 
659 	hci_dev_lock(hdev);
660 	seq_printf(f, "%pMR\n", &hdev->random_addr);
661 	hci_dev_unlock(hdev);
662 
663 	return 0;
664 }
665 
random_address_open(struct inode * inode,struct file * file)666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668 	return single_open(file, random_address_show, inode->i_private);
669 }
670 
671 static const struct file_operations random_address_fops = {
672 	.open		= random_address_open,
673 	.read		= seq_read,
674 	.llseek		= seq_lseek,
675 	.release	= single_release,
676 };
677 
static_address_show(struct seq_file * f,void * p)678 static int static_address_show(struct seq_file *f, void *p)
679 {
680 	struct hci_dev *hdev = f->private;
681 
682 	hci_dev_lock(hdev);
683 	seq_printf(f, "%pMR\n", &hdev->static_addr);
684 	hci_dev_unlock(hdev);
685 
686 	return 0;
687 }
688 
static_address_open(struct inode * inode,struct file * file)689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691 	return single_open(file, static_address_show, inode->i_private);
692 }
693 
694 static const struct file_operations static_address_fops = {
695 	.open		= static_address_open,
696 	.read		= seq_read,
697 	.llseek		= seq_lseek,
698 	.release	= single_release,
699 };
700 
force_static_address_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)701 static ssize_t force_static_address_read(struct file *file,
702 					 char __user *user_buf,
703 					 size_t count, loff_t *ppos)
704 {
705 	struct hci_dev *hdev = file->private_data;
706 	char buf[3];
707 
708 	buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709 	buf[1] = '\n';
710 	buf[2] = '\0';
711 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713 
force_static_address_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)714 static ssize_t force_static_address_write(struct file *file,
715 					  const char __user *user_buf,
716 					  size_t count, loff_t *ppos)
717 {
718 	struct hci_dev *hdev = file->private_data;
719 	char buf[32];
720 	size_t buf_size = min(count, (sizeof(buf)-1));
721 	bool enable;
722 
723 	if (test_bit(HCI_UP, &hdev->flags))
724 		return -EBUSY;
725 
726 	if (copy_from_user(buf, user_buf, buf_size))
727 		return -EFAULT;
728 
729 	buf[buf_size] = '\0';
730 	if (strtobool(buf, &enable))
731 		return -EINVAL;
732 
733 	if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734 		return -EALREADY;
735 
736 	change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737 
738 	return count;
739 }
740 
741 static const struct file_operations force_static_address_fops = {
742 	.open		= simple_open,
743 	.read		= force_static_address_read,
744 	.write		= force_static_address_write,
745 	.llseek		= default_llseek,
746 };
747 
white_list_show(struct seq_file * f,void * ptr)748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750 	struct hci_dev *hdev = f->private;
751 	struct bdaddr_list *b;
752 
753 	hci_dev_lock(hdev);
754 	list_for_each_entry(b, &hdev->le_white_list, list)
755 		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 	hci_dev_unlock(hdev);
757 
758 	return 0;
759 }
760 
white_list_open(struct inode * inode,struct file * file)761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763 	return single_open(file, white_list_show, inode->i_private);
764 }
765 
766 static const struct file_operations white_list_fops = {
767 	.open		= white_list_open,
768 	.read		= seq_read,
769 	.llseek		= seq_lseek,
770 	.release	= single_release,
771 };
772 
identity_resolving_keys_show(struct seq_file * f,void * ptr)773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775 	struct hci_dev *hdev = f->private;
776 	struct list_head *p, *n;
777 
778 	hci_dev_lock(hdev);
779 	list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 		struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 		seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 			   &irk->bdaddr, irk->addr_type,
783 			   16, irk->val, &irk->rpa);
784 	}
785 	hci_dev_unlock(hdev);
786 
787 	return 0;
788 }
789 
identity_resolving_keys_open(struct inode * inode,struct file * file)790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792 	return single_open(file, identity_resolving_keys_show,
793 			   inode->i_private);
794 }
795 
796 static const struct file_operations identity_resolving_keys_fops = {
797 	.open		= identity_resolving_keys_open,
798 	.read		= seq_read,
799 	.llseek		= seq_lseek,
800 	.release	= single_release,
801 };
802 
long_term_keys_show(struct seq_file * f,void * ptr)803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805 	struct hci_dev *hdev = f->private;
806 	struct list_head *p, *n;
807 
808 	hci_dev_lock(hdev);
809 	list_for_each_safe(p, n, &hdev->long_term_keys) {
810 		struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 		seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 			   &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 			   ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 			   __le64_to_cpu(ltk->rand), 16, ltk->val);
815 	}
816 	hci_dev_unlock(hdev);
817 
818 	return 0;
819 }
820 
long_term_keys_open(struct inode * inode,struct file * file)821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823 	return single_open(file, long_term_keys_show, inode->i_private);
824 }
825 
826 static const struct file_operations long_term_keys_fops = {
827 	.open		= long_term_keys_open,
828 	.read		= seq_read,
829 	.llseek		= seq_lseek,
830 	.release	= single_release,
831 };
832 
conn_min_interval_set(void * data,u64 val)833 static int conn_min_interval_set(void *data, u64 val)
834 {
835 	struct hci_dev *hdev = data;
836 
837 	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 		return -EINVAL;
839 
840 	hci_dev_lock(hdev);
841 	hdev->le_conn_min_interval = val;
842 	hci_dev_unlock(hdev);
843 
844 	return 0;
845 }
846 
conn_min_interval_get(void * data,u64 * val)847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849 	struct hci_dev *hdev = data;
850 
851 	hci_dev_lock(hdev);
852 	*val = hdev->le_conn_min_interval;
853 	hci_dev_unlock(hdev);
854 
855 	return 0;
856 }
857 
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 			conn_min_interval_set, "%llu\n");
860 
conn_max_interval_set(void * data,u64 val)861 static int conn_max_interval_set(void *data, u64 val)
862 {
863 	struct hci_dev *hdev = data;
864 
865 	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 		return -EINVAL;
867 
868 	hci_dev_lock(hdev);
869 	hdev->le_conn_max_interval = val;
870 	hci_dev_unlock(hdev);
871 
872 	return 0;
873 }
874 
conn_max_interval_get(void * data,u64 * val)875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877 	struct hci_dev *hdev = data;
878 
879 	hci_dev_lock(hdev);
880 	*val = hdev->le_conn_max_interval;
881 	hci_dev_unlock(hdev);
882 
883 	return 0;
884 }
885 
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 			conn_max_interval_set, "%llu\n");
888 
conn_latency_set(void * data,u64 val)889 static int conn_latency_set(void *data, u64 val)
890 {
891 	struct hci_dev *hdev = data;
892 
893 	if (val > 0x01f3)
894 		return -EINVAL;
895 
896 	hci_dev_lock(hdev);
897 	hdev->le_conn_latency = val;
898 	hci_dev_unlock(hdev);
899 
900 	return 0;
901 }
902 
conn_latency_get(void * data,u64 * val)903 static int conn_latency_get(void *data, u64 *val)
904 {
905 	struct hci_dev *hdev = data;
906 
907 	hci_dev_lock(hdev);
908 	*val = hdev->le_conn_latency;
909 	hci_dev_unlock(hdev);
910 
911 	return 0;
912 }
913 
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 			conn_latency_set, "%llu\n");
916 
supervision_timeout_set(void * data,u64 val)917 static int supervision_timeout_set(void *data, u64 val)
918 {
919 	struct hci_dev *hdev = data;
920 
921 	if (val < 0x000a || val > 0x0c80)
922 		return -EINVAL;
923 
924 	hci_dev_lock(hdev);
925 	hdev->le_supv_timeout = val;
926 	hci_dev_unlock(hdev);
927 
928 	return 0;
929 }
930 
supervision_timeout_get(void * data,u64 * val)931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933 	struct hci_dev *hdev = data;
934 
935 	hci_dev_lock(hdev);
936 	*val = hdev->le_supv_timeout;
937 	hci_dev_unlock(hdev);
938 
939 	return 0;
940 }
941 
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 			supervision_timeout_set, "%llu\n");
944 
adv_channel_map_set(void * data,u64 val)945 static int adv_channel_map_set(void *data, u64 val)
946 {
947 	struct hci_dev *hdev = data;
948 
949 	if (val < 0x01 || val > 0x07)
950 		return -EINVAL;
951 
952 	hci_dev_lock(hdev);
953 	hdev->le_adv_channel_map = val;
954 	hci_dev_unlock(hdev);
955 
956 	return 0;
957 }
958 
adv_channel_map_get(void * data,u64 * val)959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961 	struct hci_dev *hdev = data;
962 
963 	hci_dev_lock(hdev);
964 	*val = hdev->le_adv_channel_map;
965 	hci_dev_unlock(hdev);
966 
967 	return 0;
968 }
969 
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 			adv_channel_map_set, "%llu\n");
972 
adv_min_interval_set(void * data,u64 val)973 static int adv_min_interval_set(void *data, u64 val)
974 {
975 	struct hci_dev *hdev = data;
976 
977 	if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 		return -EINVAL;
979 
980 	hci_dev_lock(hdev);
981 	hdev->le_adv_min_interval = val;
982 	hci_dev_unlock(hdev);
983 
984 	return 0;
985 }
986 
adv_min_interval_get(void * data,u64 * val)987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989 	struct hci_dev *hdev = data;
990 
991 	hci_dev_lock(hdev);
992 	*val = hdev->le_adv_min_interval;
993 	hci_dev_unlock(hdev);
994 
995 	return 0;
996 }
997 
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 			adv_min_interval_set, "%llu\n");
1000 
adv_max_interval_set(void * data,u64 val)1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003 	struct hci_dev *hdev = data;
1004 
1005 	if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006 		return -EINVAL;
1007 
1008 	hci_dev_lock(hdev);
1009 	hdev->le_adv_max_interval = val;
1010 	hci_dev_unlock(hdev);
1011 
1012 	return 0;
1013 }
1014 
adv_max_interval_get(void * data,u64 * val)1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017 	struct hci_dev *hdev = data;
1018 
1019 	hci_dev_lock(hdev);
1020 	*val = hdev->le_adv_max_interval;
1021 	hci_dev_unlock(hdev);
1022 
1023 	return 0;
1024 }
1025 
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 			adv_max_interval_set, "%llu\n");
1028 
device_list_show(struct seq_file * f,void * ptr)1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031 	struct hci_dev *hdev = f->private;
1032 	struct hci_conn_params *p;
1033 
1034 	hci_dev_lock(hdev);
1035 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1036 		seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037 			   p->auto_connect);
1038 	}
1039 	hci_dev_unlock(hdev);
1040 
1041 	return 0;
1042 }
1043 
device_list_open(struct inode * inode,struct file * file)1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046 	return single_open(file, device_list_show, inode->i_private);
1047 }
1048 
1049 static const struct file_operations device_list_fops = {
1050 	.open		= device_list_open,
1051 	.read		= seq_read,
1052 	.llseek		= seq_lseek,
1053 	.release	= single_release,
1054 };
1055 
1056 /* ---- HCI requests ---- */
1057 
hci_req_sync_complete(struct hci_dev * hdev,u8 result)1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061 
1062 	if (hdev->req_status == HCI_REQ_PEND) {
1063 		hdev->req_result = result;
1064 		hdev->req_status = HCI_REQ_DONE;
1065 		wake_up_interruptible(&hdev->req_wait_q);
1066 	}
1067 }
1068 
hci_req_cancel(struct hci_dev * hdev,int err)1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072 
1073 	if (hdev->req_status == HCI_REQ_PEND) {
1074 		hdev->req_result = err;
1075 		hdev->req_status = HCI_REQ_CANCELED;
1076 		wake_up_interruptible(&hdev->req_wait_q);
1077 	}
1078 }
1079 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event)1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 					    u8 event)
1082 {
1083 	struct hci_ev_cmd_complete *ev;
1084 	struct hci_event_hdr *hdr;
1085 	struct sk_buff *skb;
1086 
1087 	hci_dev_lock(hdev);
1088 
1089 	skb = hdev->recv_evt;
1090 	hdev->recv_evt = NULL;
1091 
1092 	hci_dev_unlock(hdev);
1093 
1094 	if (!skb)
1095 		return ERR_PTR(-ENODATA);
1096 
1097 	if (skb->len < sizeof(*hdr)) {
1098 		BT_ERR("Too short HCI event");
1099 		goto failed;
1100 	}
1101 
1102 	hdr = (void *) skb->data;
1103 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104 
1105 	if (event) {
1106 		if (hdr->evt != event)
1107 			goto failed;
1108 		return skb;
1109 	}
1110 
1111 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 		goto failed;
1114 	}
1115 
1116 	if (skb->len < sizeof(*ev)) {
1117 		BT_ERR("Too short cmd_complete event");
1118 		goto failed;
1119 	}
1120 
1121 	ev = (void *) skb->data;
1122 	skb_pull(skb, sizeof(*ev));
1123 
1124 	if (opcode == __le16_to_cpu(ev->opcode))
1125 		return skb;
1126 
1127 	BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 	       __le16_to_cpu(ev->opcode));
1129 
1130 failed:
1131 	kfree_skb(skb);
1132 	return ERR_PTR(-ENODATA);
1133 }
1134 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136 				  const void *param, u8 event, u32 timeout)
1137 {
1138 	DECLARE_WAITQUEUE(wait, current);
1139 	struct hci_request req;
1140 	int err = 0;
1141 
1142 	BT_DBG("%s", hdev->name);
1143 
1144 	hci_req_init(&req, hdev);
1145 
1146 	hci_req_add_ev(&req, opcode, plen, param, event);
1147 
1148 	hdev->req_status = HCI_REQ_PEND;
1149 
1150 	err = hci_req_run(&req, hci_req_sync_complete);
1151 	if (err < 0)
1152 		return ERR_PTR(err);
1153 
1154 	add_wait_queue(&hdev->req_wait_q, &wait);
1155 	set_current_state(TASK_INTERRUPTIBLE);
1156 
1157 	schedule_timeout(timeout);
1158 
1159 	remove_wait_queue(&hdev->req_wait_q, &wait);
1160 
1161 	if (signal_pending(current))
1162 		return ERR_PTR(-EINTR);
1163 
1164 	switch (hdev->req_status) {
1165 	case HCI_REQ_DONE:
1166 		err = -bt_to_errno(hdev->req_result);
1167 		break;
1168 
1169 	case HCI_REQ_CANCELED:
1170 		err = -hdev->req_result;
1171 		break;
1172 
1173 	default:
1174 		err = -ETIMEDOUT;
1175 		break;
1176 	}
1177 
1178 	hdev->req_status = hdev->req_result = 0;
1179 
1180 	BT_DBG("%s end: err %d", hdev->name, err);
1181 
1182 	if (err < 0)
1183 		return ERR_PTR(err);
1184 
1185 	return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190 			       const void *param, u32 timeout)
1191 {
1192 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195 
1196 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,void (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)1197 static int __hci_req_sync(struct hci_dev *hdev,
1198 			  void (*func)(struct hci_request *req,
1199 				      unsigned long opt),
1200 			  unsigned long opt, __u32 timeout)
1201 {
1202 	struct hci_request req;
1203 	DECLARE_WAITQUEUE(wait, current);
1204 	int err = 0;
1205 
1206 	BT_DBG("%s start", hdev->name);
1207 
1208 	hci_req_init(&req, hdev);
1209 
1210 	hdev->req_status = HCI_REQ_PEND;
1211 
1212 	func(&req, opt);
1213 
1214 	err = hci_req_run(&req, hci_req_sync_complete);
1215 	if (err < 0) {
1216 		hdev->req_status = 0;
1217 
1218 		/* ENODATA means the HCI request command queue is empty.
1219 		 * This can happen when a request with conditionals doesn't
1220 		 * trigger any commands to be sent. This is normal behavior
1221 		 * and should not trigger an error return.
1222 		 */
1223 		if (err == -ENODATA)
1224 			return 0;
1225 
1226 		return err;
1227 	}
1228 
1229 	add_wait_queue(&hdev->req_wait_q, &wait);
1230 	set_current_state(TASK_INTERRUPTIBLE);
1231 
1232 	schedule_timeout(timeout);
1233 
1234 	remove_wait_queue(&hdev->req_wait_q, &wait);
1235 
1236 	if (signal_pending(current))
1237 		return -EINTR;
1238 
1239 	switch (hdev->req_status) {
1240 	case HCI_REQ_DONE:
1241 		err = -bt_to_errno(hdev->req_result);
1242 		break;
1243 
1244 	case HCI_REQ_CANCELED:
1245 		err = -hdev->req_result;
1246 		break;
1247 
1248 	default:
1249 		err = -ETIMEDOUT;
1250 		break;
1251 	}
1252 
1253 	hdev->req_status = hdev->req_result = 0;
1254 
1255 	BT_DBG("%s end: err %d", hdev->name, err);
1256 
1257 	return err;
1258 }
1259 
hci_req_sync(struct hci_dev * hdev,void (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)1260 static int hci_req_sync(struct hci_dev *hdev,
1261 			void (*req)(struct hci_request *req,
1262 				    unsigned long opt),
1263 			unsigned long opt, __u32 timeout)
1264 {
1265 	int ret;
1266 
1267 	if (!test_bit(HCI_UP, &hdev->flags))
1268 		return -ENETDOWN;
1269 
1270 	/* Serialize all requests */
1271 	hci_req_lock(hdev);
1272 	ret = __hci_req_sync(hdev, req, opt, timeout);
1273 	hci_req_unlock(hdev);
1274 
1275 	return ret;
1276 }
1277 
hci_reset_req(struct hci_request * req,unsigned long opt)1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280 	BT_DBG("%s %ld", req->hdev->name, opt);
1281 
1282 	/* Reset device */
1283 	set_bit(HCI_RESET, &req->hdev->flags);
1284 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286 
bredr_init(struct hci_request * req)1287 static void bredr_init(struct hci_request *req)
1288 {
1289 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290 
1291 	/* Read Local Supported Features */
1292 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293 
1294 	/* Read Local Version */
1295 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296 
1297 	/* Read BD Address */
1298 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300 
amp_init(struct hci_request * req)1301 static void amp_init(struct hci_request *req)
1302 {
1303 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304 
1305 	/* Read Local Version */
1306 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307 
1308 	/* Read Local Supported Commands */
1309 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310 
1311 	/* Read Local Supported Features */
1312 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313 
1314 	/* Read Local AMP Info */
1315 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316 
1317 	/* Read Data Blk size */
1318 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319 
1320 	/* Read Flow Control Mode */
1321 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322 
1323 	/* Read Location Data */
1324 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326 
hci_init1_req(struct hci_request * req,unsigned long opt)1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329 	struct hci_dev *hdev = req->hdev;
1330 
1331 	BT_DBG("%s %ld", hdev->name, opt);
1332 
1333 	/* Reset */
1334 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335 		hci_reset_req(req, 0);
1336 
1337 	switch (hdev->dev_type) {
1338 	case HCI_BREDR:
1339 		bredr_init(req);
1340 		break;
1341 
1342 	case HCI_AMP:
1343 		amp_init(req);
1344 		break;
1345 
1346 	default:
1347 		BT_ERR("Unknown device type %d", hdev->dev_type);
1348 		break;
1349 	}
1350 }
1351 
bredr_setup(struct hci_request * req)1352 static void bredr_setup(struct hci_request *req)
1353 {
1354 	struct hci_dev *hdev = req->hdev;
1355 
1356 	__le16 param;
1357 	__u8 flt_type;
1358 
1359 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361 
1362 	/* Read Class of Device */
1363 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364 
1365 	/* Read Local Name */
1366 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367 
1368 	/* Read Voice Setting */
1369 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370 
1371 	/* Read Number of Supported IAC */
1372 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373 
1374 	/* Read Current IAC LAP */
1375 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376 
1377 	/* Clear Event Filters */
1378 	flt_type = HCI_FLT_CLEAR_ALL;
1379 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380 
1381 	/* Connection accept timeout ~20 secs */
1382 	param = cpu_to_le16(0x7d00);
1383 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384 
1385 	/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 	 * but it does not support page scan related HCI commands.
1387 	 */
1388 	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391 	}
1392 }
1393 
le_setup(struct hci_request * req)1394 static void le_setup(struct hci_request *req)
1395 {
1396 	struct hci_dev *hdev = req->hdev;
1397 
1398 	/* Read LE Buffer Size */
1399 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400 
1401 	/* Read LE Local Supported Features */
1402 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403 
1404 	/* Read LE Supported States */
1405 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406 
1407 	/* Read LE White List Size */
1408 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409 
1410 	/* Clear LE White List */
1411 	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412 
1413 	/* LE-only controllers have LE implicitly enabled */
1414 	if (!lmp_bredr_capable(hdev))
1415 		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417 
hci_get_inquiry_mode(struct hci_dev * hdev)1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420 	if (lmp_ext_inq_capable(hdev))
1421 		return 0x02;
1422 
1423 	if (lmp_inq_rssi_capable(hdev))
1424 		return 0x01;
1425 
1426 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 	    hdev->lmp_subver == 0x0757)
1428 		return 0x01;
1429 
1430 	if (hdev->manufacturer == 15) {
1431 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 			return 0x01;
1433 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 			return 0x01;
1435 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 			return 0x01;
1437 	}
1438 
1439 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 	    hdev->lmp_subver == 0x1805)
1441 		return 0x01;
1442 
1443 	return 0x00;
1444 }
1445 
hci_setup_inquiry_mode(struct hci_request * req)1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448 	u8 mode;
1449 
1450 	mode = hci_get_inquiry_mode(req->hdev);
1451 
1452 	hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454 
hci_setup_event_mask(struct hci_request * req)1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457 	struct hci_dev *hdev = req->hdev;
1458 
1459 	/* The second byte is 0xff instead of 0x9f (two reserved bits
1460 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 	 * command otherwise.
1462 	 */
1463 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464 
1465 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 	 * any event mask for pre 1.2 devices.
1467 	 */
1468 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 		return;
1470 
1471 	if (lmp_bredr_capable(hdev)) {
1472 		events[4] |= 0x01; /* Flow Specification Complete */
1473 		events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 		events[5] |= 0x08; /* Synchronous Connection Complete */
1476 		events[5] |= 0x10; /* Synchronous Connection Changed */
1477 	} else {
1478 		/* Use a different default for LE-only devices */
1479 		memset(events, 0, sizeof(events));
1480 		events[0] |= 0x10; /* Disconnection Complete */
1481 		events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 		events[1] |= 0x20; /* Command Complete */
1483 		events[1] |= 0x40; /* Command Status */
1484 		events[1] |= 0x80; /* Hardware Error */
1485 		events[2] |= 0x04; /* Number of Completed Packets */
1486 		events[3] |= 0x02; /* Data Buffer Overflow */
1487 
1488 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 			events[0] |= 0x80; /* Encryption Change */
1490 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 		}
1492 	}
1493 
1494 	if (lmp_inq_rssi_capable(hdev))
1495 		events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 
1497 	if (lmp_sniffsubr_capable(hdev))
1498 		events[5] |= 0x20; /* Sniff Subrating */
1499 
1500 	if (lmp_pause_enc_capable(hdev))
1501 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502 
1503 	if (lmp_ext_inq_capable(hdev))
1504 		events[5] |= 0x40; /* Extended Inquiry Result */
1505 
1506 	if (lmp_no_flush_capable(hdev))
1507 		events[7] |= 0x01; /* Enhanced Flush Complete */
1508 
1509 	if (lmp_lsto_capable(hdev))
1510 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511 
1512 	if (lmp_ssp_capable(hdev)) {
1513 		events[6] |= 0x01;	/* IO Capability Request */
1514 		events[6] |= 0x02;	/* IO Capability Response */
1515 		events[6] |= 0x04;	/* User Confirmation Request */
1516 		events[6] |= 0x08;	/* User Passkey Request */
1517 		events[6] |= 0x10;	/* Remote OOB Data Request */
1518 		events[6] |= 0x20;	/* Simple Pairing Complete */
1519 		events[7] |= 0x04;	/* User Passkey Notification */
1520 		events[7] |= 0x08;	/* Keypress Notification */
1521 		events[7] |= 0x10;	/* Remote Host Supported
1522 					 * Features Notification
1523 					 */
1524 	}
1525 
1526 	if (lmp_le_capable(hdev))
1527 		events[7] |= 0x20;	/* LE Meta-Event */
1528 
1529 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531 
hci_init2_req(struct hci_request * req,unsigned long opt)1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534 	struct hci_dev *hdev = req->hdev;
1535 
1536 	if (lmp_bredr_capable(hdev))
1537 		bredr_setup(req);
1538 	else
1539 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540 
1541 	if (lmp_le_capable(hdev))
1542 		le_setup(req);
1543 
1544 	/* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 	 * local supported commands HCI command.
1546 	 */
1547 	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549 
1550 	if (lmp_ssp_capable(hdev)) {
1551 		/* When SSP is available, then the host features page
1552 		 * should also be available as well. However some
1553 		 * controllers list the max_page as 0 as long as SSP
1554 		 * has not been enabled. To achieve proper debugging
1555 		 * output, force the minimum max_page to 1 at least.
1556 		 */
1557 		hdev->max_page = 0x01;
1558 
1559 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 			u8 mode = 0x01;
1561 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 				    sizeof(mode), &mode);
1563 		} else {
1564 			struct hci_cp_write_eir cp;
1565 
1566 			memset(hdev->eir, 0, sizeof(hdev->eir));
1567 			memset(&cp, 0, sizeof(cp));
1568 
1569 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570 		}
1571 	}
1572 
1573 	if (lmp_inq_rssi_capable(hdev))
1574 		hci_setup_inquiry_mode(req);
1575 
1576 	if (lmp_inq_tx_pwr_capable(hdev))
1577 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578 
1579 	if (lmp_ext_feat_capable(hdev)) {
1580 		struct hci_cp_read_local_ext_features cp;
1581 
1582 		cp.page = 0x01;
1583 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 			    sizeof(cp), &cp);
1585 	}
1586 
1587 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 		u8 enable = 1;
1589 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 			    &enable);
1591 	}
1592 }
1593 
hci_setup_link_policy(struct hci_request * req)1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596 	struct hci_dev *hdev = req->hdev;
1597 	struct hci_cp_write_def_link_policy cp;
1598 	u16 link_policy = 0;
1599 
1600 	if (lmp_rswitch_capable(hdev))
1601 		link_policy |= HCI_LP_RSWITCH;
1602 	if (lmp_hold_capable(hdev))
1603 		link_policy |= HCI_LP_HOLD;
1604 	if (lmp_sniff_capable(hdev))
1605 		link_policy |= HCI_LP_SNIFF;
1606 	if (lmp_park_capable(hdev))
1607 		link_policy |= HCI_LP_PARK;
1608 
1609 	cp.policy = cpu_to_le16(link_policy);
1610 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612 
hci_set_le_support(struct hci_request * req)1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615 	struct hci_dev *hdev = req->hdev;
1616 	struct hci_cp_write_le_host_supported cp;
1617 
1618 	/* LE-only devices do not support explicit enablement */
1619 	if (!lmp_bredr_capable(hdev))
1620 		return;
1621 
1622 	memset(&cp, 0, sizeof(cp));
1623 
1624 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 		cp.le = 0x01;
1626 		cp.simul = 0x00;
1627 	}
1628 
1629 	if (cp.le != lmp_host_le_capable(hdev))
1630 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 			    &cp);
1632 }
1633 
hci_set_event_mask_page_2(struct hci_request * req)1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636 	struct hci_dev *hdev = req->hdev;
1637 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638 
1639 	/* If Connectionless Slave Broadcast master role is supported
1640 	 * enable all necessary events for it.
1641 	 */
1642 	if (lmp_csb_master_capable(hdev)) {
1643 		events[1] |= 0x40;	/* Triggered Clock Capture */
1644 		events[1] |= 0x80;	/* Synchronization Train Complete */
1645 		events[2] |= 0x10;	/* Slave Page Response Timeout */
1646 		events[2] |= 0x20;	/* CSB Channel Map Change */
1647 	}
1648 
1649 	/* If Connectionless Slave Broadcast slave role is supported
1650 	 * enable all necessary events for it.
1651 	 */
1652 	if (lmp_csb_slave_capable(hdev)) {
1653 		events[2] |= 0x01;	/* Synchronization Train Received */
1654 		events[2] |= 0x02;	/* CSB Receive */
1655 		events[2] |= 0x04;	/* CSB Timeout */
1656 		events[2] |= 0x08;	/* Truncated Page Complete */
1657 	}
1658 
1659 	/* Enable Authenticated Payload Timeout Expired event if supported */
1660 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661 		events[2] |= 0x80;
1662 
1663 	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664 }
1665 
hci_init3_req(struct hci_request * req,unsigned long opt)1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1667 {
1668 	struct hci_dev *hdev = req->hdev;
1669 	u8 p;
1670 
1671 	hci_setup_event_mask(req);
1672 
1673 	/* Some Broadcom based Bluetooth controllers do not support the
1674 	 * Delete Stored Link Key command. They are clearly indicating its
1675 	 * absence in the bit mask of supported commands.
1676 	 *
1677 	 * Check the supported commands and only if the the command is marked
1678 	 * as supported send it. If not supported assume that the controller
1679 	 * does not have actual support for stored link keys which makes this
1680 	 * command redundant anyway.
1681 	 *
1682 	 * Some controllers indicate that they support handling deleting
1683 	 * stored link keys, but they don't. The quirk lets a driver
1684 	 * just disable this command.
1685 	 */
1686 	if (hdev->commands[6] & 0x80 &&
1687 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688 		struct hci_cp_delete_stored_link_key cp;
1689 
1690 		bacpy(&cp.bdaddr, BDADDR_ANY);
1691 		cp.delete_all = 0x01;
1692 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 			    sizeof(cp), &cp);
1694 	}
1695 
1696 	if (hdev->commands[5] & 0x10)
1697 		hci_setup_link_policy(req);
1698 
1699 	if (lmp_le_capable(hdev)) {
1700 		u8 events[8];
1701 
1702 		memset(events, 0, sizeof(events));
1703 		events[0] = 0x0f;
1704 
1705 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 			events[0] |= 0x10;	/* LE Long Term Key Request */
1707 
1708 		/* If controller supports the Connection Parameters Request
1709 		 * Link Layer Procedure, enable the corresponding event.
1710 		 */
1711 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 			events[0] |= 0x20;	/* LE Remote Connection
1713 						 * Parameter Request
1714 						 */
1715 
1716 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 			    events);
1718 
1719 		if (hdev->commands[25] & 0x40) {
1720 			/* Read LE Advertising Channel TX Power */
1721 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 		}
1723 
1724 		hci_set_le_support(req);
1725 	}
1726 
1727 	/* Read features beyond page 1 if available */
1728 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 		struct hci_cp_read_local_ext_features cp;
1730 
1731 		cp.page = p;
1732 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 			    sizeof(cp), &cp);
1734 	}
1735 }
1736 
hci_init4_req(struct hci_request * req,unsigned long opt)1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738 {
1739 	struct hci_dev *hdev = req->hdev;
1740 
1741 	/* Set event mask page 2 if the HCI command for it is supported */
1742 	if (hdev->commands[22] & 0x04)
1743 		hci_set_event_mask_page_2(req);
1744 
1745 	/* Read local codec list if the HCI command is supported */
1746 	if (hdev->commands[29] & 0x20)
1747 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748 
1749 	/* Get MWS transport configuration if the HCI command is supported */
1750 	if (hdev->commands[30] & 0x08)
1751 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752 
1753 	/* Check for Synchronization Train support */
1754 	if (lmp_sync_train_capable(hdev))
1755 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1756 
1757 	/* Enable Secure Connections if supported and configured */
1758 	if ((lmp_sc_capable(hdev) ||
1759 	     test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760 	    test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 		u8 support = 0x01;
1762 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 			    sizeof(support), &support);
1764 	}
1765 }
1766 
__hci_init(struct hci_dev * hdev)1767 static int __hci_init(struct hci_dev *hdev)
1768 {
1769 	int err;
1770 
1771 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 	if (err < 0)
1773 		return err;
1774 
1775 	/* The Device Under Test (DUT) mode is special and available for
1776 	 * all controller types. So just create it early on.
1777 	 */
1778 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 				    &dut_mode_fops);
1781 	}
1782 
1783 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 	 * BR/EDR/LE type controllers. AMP controllers only need the
1785 	 * first stage init.
1786 	 */
1787 	if (hdev->dev_type != HCI_BREDR)
1788 		return 0;
1789 
1790 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 	if (err < 0)
1792 		return err;
1793 
1794 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 	if (err < 0)
1796 		return err;
1797 
1798 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 	if (err < 0)
1800 		return err;
1801 
1802 	/* Only create debugfs entries during the initial setup
1803 	 * phase and not every time the controller gets powered on.
1804 	 */
1805 	if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 		return 0;
1807 
1808 	debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 			    &features_fops);
1810 	debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 			   &hdev->manufacturer);
1812 	debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 	debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814 	debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 			    &blacklist_fops);
1816 	debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 			    &whitelist_fops);
1818 	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819 
1820 	debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 			    &conn_info_min_age_fops);
1822 	debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 			    &conn_info_max_age_fops);
1824 
1825 	if (lmp_bredr_capable(hdev)) {
1826 		debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 				    hdev, &inquiry_cache_fops);
1828 		debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 				    hdev, &link_keys_fops);
1830 		debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 				    hdev, &dev_class_fops);
1832 		debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 				    hdev, &voice_setting_fops);
1834 	}
1835 
1836 	if (lmp_ssp_capable(hdev)) {
1837 		debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 				    hdev, &auto_accept_delay_fops);
1839 		debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 				    hdev, &force_sc_support_fops);
1841 		debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 				    hdev, &sc_only_mode_fops);
1843 	}
1844 
1845 	if (lmp_sniff_capable(hdev)) {
1846 		debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 				    hdev, &idle_timeout_fops);
1848 		debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 				    hdev, &sniff_min_interval_fops);
1850 		debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 				    hdev, &sniff_max_interval_fops);
1852 	}
1853 
1854 	if (lmp_le_capable(hdev)) {
1855 		debugfs_create_file("identity", 0400, hdev->debugfs,
1856 				    hdev, &identity_fops);
1857 		debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 				    hdev, &rpa_timeout_fops);
1859 		debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 				    hdev, &random_address_fops);
1861 		debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 				    hdev, &static_address_fops);
1863 
1864 		/* For controllers with a public address, provide a debug
1865 		 * option to force the usage of the configured static
1866 		 * address. By default the public address is used.
1867 		 */
1868 		if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 			debugfs_create_file("force_static_address", 0644,
1870 					    hdev->debugfs, hdev,
1871 					    &force_static_address_fops);
1872 
1873 		debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 				  &hdev->le_white_list_size);
1875 		debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 				    &white_list_fops);
1877 		debugfs_create_file("identity_resolving_keys", 0400,
1878 				    hdev->debugfs, hdev,
1879 				    &identity_resolving_keys_fops);
1880 		debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 				    hdev, &long_term_keys_fops);
1882 		debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 				    hdev, &conn_min_interval_fops);
1884 		debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 				    hdev, &conn_max_interval_fops);
1886 		debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 				    hdev, &conn_latency_fops);
1888 		debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 				    hdev, &supervision_timeout_fops);
1890 		debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 				    hdev, &adv_channel_map_fops);
1892 		debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 				    hdev, &adv_min_interval_fops);
1894 		debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 				    hdev, &adv_max_interval_fops);
1896 		debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 				    &device_list_fops);
1898 		debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 				   hdev->debugfs,
1900 				   &hdev->discov_interleaved_timeout);
1901 
1902 		smp_register(hdev);
1903 	}
1904 
1905 	return 0;
1906 }
1907 
hci_init0_req(struct hci_request * req,unsigned long opt)1908 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1909 {
1910 	struct hci_dev *hdev = req->hdev;
1911 
1912 	BT_DBG("%s %ld", hdev->name, opt);
1913 
1914 	/* Reset */
1915 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1916 		hci_reset_req(req, 0);
1917 
1918 	/* Read Local Version */
1919 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1920 
1921 	/* Read BD Address */
1922 	if (hdev->set_bdaddr)
1923 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924 }
1925 
__hci_unconf_init(struct hci_dev * hdev)1926 static int __hci_unconf_init(struct hci_dev *hdev)
1927 {
1928 	int err;
1929 
1930 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1931 		return 0;
1932 
1933 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1934 	if (err < 0)
1935 		return err;
1936 
1937 	return 0;
1938 }
1939 
hci_scan_req(struct hci_request * req,unsigned long opt)1940 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1941 {
1942 	__u8 scan = opt;
1943 
1944 	BT_DBG("%s %x", req->hdev->name, scan);
1945 
1946 	/* Inquiry and Page scans */
1947 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1948 }
1949 
hci_auth_req(struct hci_request * req,unsigned long opt)1950 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1951 {
1952 	__u8 auth = opt;
1953 
1954 	BT_DBG("%s %x", req->hdev->name, auth);
1955 
1956 	/* Authentication */
1957 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1958 }
1959 
hci_encrypt_req(struct hci_request * req,unsigned long opt)1960 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1961 {
1962 	__u8 encrypt = opt;
1963 
1964 	BT_DBG("%s %x", req->hdev->name, encrypt);
1965 
1966 	/* Encryption */
1967 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1968 }
1969 
hci_linkpol_req(struct hci_request * req,unsigned long opt)1970 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1971 {
1972 	__le16 policy = cpu_to_le16(opt);
1973 
1974 	BT_DBG("%s %x", req->hdev->name, policy);
1975 
1976 	/* Default link policy */
1977 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1978 }
1979 
1980 /* Get HCI device by index.
1981  * Device is held on return. */
hci_dev_get(int index)1982 struct hci_dev *hci_dev_get(int index)
1983 {
1984 	struct hci_dev *hdev = NULL, *d;
1985 
1986 	BT_DBG("%d", index);
1987 
1988 	if (index < 0)
1989 		return NULL;
1990 
1991 	read_lock(&hci_dev_list_lock);
1992 	list_for_each_entry(d, &hci_dev_list, list) {
1993 		if (d->id == index) {
1994 			hdev = hci_dev_hold(d);
1995 			break;
1996 		}
1997 	}
1998 	read_unlock(&hci_dev_list_lock);
1999 	return hdev;
2000 }
2001 
2002 /* ---- Inquiry support ---- */
2003 
hci_discovery_active(struct hci_dev * hdev)2004 bool hci_discovery_active(struct hci_dev *hdev)
2005 {
2006 	struct discovery_state *discov = &hdev->discovery;
2007 
2008 	switch (discov->state) {
2009 	case DISCOVERY_FINDING:
2010 	case DISCOVERY_RESOLVING:
2011 		return true;
2012 
2013 	default:
2014 		return false;
2015 	}
2016 }
2017 
hci_discovery_set_state(struct hci_dev * hdev,int state)2018 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2019 {
2020 	int old_state = hdev->discovery.state;
2021 
2022 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2023 
2024 	if (old_state == state)
2025 		return;
2026 
2027 	hdev->discovery.state = state;
2028 
2029 	switch (state) {
2030 	case DISCOVERY_STOPPED:
2031 		hci_update_background_scan(hdev);
2032 
2033 		if (old_state != DISCOVERY_STARTING)
2034 			mgmt_discovering(hdev, 0);
2035 		break;
2036 	case DISCOVERY_STARTING:
2037 		break;
2038 	case DISCOVERY_FINDING:
2039 		mgmt_discovering(hdev, 1);
2040 		break;
2041 	case DISCOVERY_RESOLVING:
2042 		break;
2043 	case DISCOVERY_STOPPING:
2044 		break;
2045 	}
2046 }
2047 
hci_inquiry_cache_flush(struct hci_dev * hdev)2048 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2049 {
2050 	struct discovery_state *cache = &hdev->discovery;
2051 	struct inquiry_entry *p, *n;
2052 
2053 	list_for_each_entry_safe(p, n, &cache->all, all) {
2054 		list_del(&p->all);
2055 		kfree(p);
2056 	}
2057 
2058 	INIT_LIST_HEAD(&cache->unknown);
2059 	INIT_LIST_HEAD(&cache->resolve);
2060 }
2061 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)2062 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2063 					       bdaddr_t *bdaddr)
2064 {
2065 	struct discovery_state *cache = &hdev->discovery;
2066 	struct inquiry_entry *e;
2067 
2068 	BT_DBG("cache %p, %pMR", cache, bdaddr);
2069 
2070 	list_for_each_entry(e, &cache->all, all) {
2071 		if (!bacmp(&e->data.bdaddr, bdaddr))
2072 			return e;
2073 	}
2074 
2075 	return NULL;
2076 }
2077 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)2078 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2079 						       bdaddr_t *bdaddr)
2080 {
2081 	struct discovery_state *cache = &hdev->discovery;
2082 	struct inquiry_entry *e;
2083 
2084 	BT_DBG("cache %p, %pMR", cache, bdaddr);
2085 
2086 	list_for_each_entry(e, &cache->unknown, list) {
2087 		if (!bacmp(&e->data.bdaddr, bdaddr))
2088 			return e;
2089 	}
2090 
2091 	return NULL;
2092 }
2093 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)2094 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2095 						       bdaddr_t *bdaddr,
2096 						       int state)
2097 {
2098 	struct discovery_state *cache = &hdev->discovery;
2099 	struct inquiry_entry *e;
2100 
2101 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2102 
2103 	list_for_each_entry(e, &cache->resolve, list) {
2104 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2105 			return e;
2106 		if (!bacmp(&e->data.bdaddr, bdaddr))
2107 			return e;
2108 	}
2109 
2110 	return NULL;
2111 }
2112 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)2113 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2114 				      struct inquiry_entry *ie)
2115 {
2116 	struct discovery_state *cache = &hdev->discovery;
2117 	struct list_head *pos = &cache->resolve;
2118 	struct inquiry_entry *p;
2119 
2120 	list_del(&ie->list);
2121 
2122 	list_for_each_entry(p, &cache->resolve, list) {
2123 		if (p->name_state != NAME_PENDING &&
2124 		    abs(p->data.rssi) >= abs(ie->data.rssi))
2125 			break;
2126 		pos = &p->list;
2127 	}
2128 
2129 	list_add(&ie->list, pos);
2130 }
2131 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)2132 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2133 			     bool name_known)
2134 {
2135 	struct discovery_state *cache = &hdev->discovery;
2136 	struct inquiry_entry *ie;
2137 	u32 flags = 0;
2138 
2139 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2140 
2141 	hci_remove_remote_oob_data(hdev, &data->bdaddr);
2142 
2143 	if (!data->ssp_mode)
2144 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2145 
2146 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2147 	if (ie) {
2148 		if (!ie->data.ssp_mode)
2149 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2150 
2151 		if (ie->name_state == NAME_NEEDED &&
2152 		    data->rssi != ie->data.rssi) {
2153 			ie->data.rssi = data->rssi;
2154 			hci_inquiry_cache_update_resolve(hdev, ie);
2155 		}
2156 
2157 		goto update;
2158 	}
2159 
2160 	/* Entry not in the cache. Add new one. */
2161 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2162 	if (!ie) {
2163 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2164 		goto done;
2165 	}
2166 
2167 	list_add(&ie->all, &cache->all);
2168 
2169 	if (name_known) {
2170 		ie->name_state = NAME_KNOWN;
2171 	} else {
2172 		ie->name_state = NAME_NOT_KNOWN;
2173 		list_add(&ie->list, &cache->unknown);
2174 	}
2175 
2176 update:
2177 	if (name_known && ie->name_state != NAME_KNOWN &&
2178 	    ie->name_state != NAME_PENDING) {
2179 		ie->name_state = NAME_KNOWN;
2180 		list_del(&ie->list);
2181 	}
2182 
2183 	memcpy(&ie->data, data, sizeof(*data));
2184 	ie->timestamp = jiffies;
2185 	cache->timestamp = jiffies;
2186 
2187 	if (ie->name_state == NAME_NOT_KNOWN)
2188 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2189 
2190 done:
2191 	return flags;
2192 }
2193 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)2194 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2195 {
2196 	struct discovery_state *cache = &hdev->discovery;
2197 	struct inquiry_info *info = (struct inquiry_info *) buf;
2198 	struct inquiry_entry *e;
2199 	int copied = 0;
2200 
2201 	list_for_each_entry(e, &cache->all, all) {
2202 		struct inquiry_data *data = &e->data;
2203 
2204 		if (copied >= num)
2205 			break;
2206 
2207 		bacpy(&info->bdaddr, &data->bdaddr);
2208 		info->pscan_rep_mode	= data->pscan_rep_mode;
2209 		info->pscan_period_mode	= data->pscan_period_mode;
2210 		info->pscan_mode	= data->pscan_mode;
2211 		memcpy(info->dev_class, data->dev_class, 3);
2212 		info->clock_offset	= data->clock_offset;
2213 
2214 		info++;
2215 		copied++;
2216 	}
2217 
2218 	BT_DBG("cache %p, copied %d", cache, copied);
2219 	return copied;
2220 }
2221 
hci_inq_req(struct hci_request * req,unsigned long opt)2222 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2223 {
2224 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2225 	struct hci_dev *hdev = req->hdev;
2226 	struct hci_cp_inquiry cp;
2227 
2228 	BT_DBG("%s", hdev->name);
2229 
2230 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2231 		return;
2232 
2233 	/* Start Inquiry */
2234 	memcpy(&cp.lap, &ir->lap, 3);
2235 	cp.length  = ir->length;
2236 	cp.num_rsp = ir->num_rsp;
2237 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2238 }
2239 
hci_inquiry(void __user * arg)2240 int hci_inquiry(void __user *arg)
2241 {
2242 	__u8 __user *ptr = arg;
2243 	struct hci_inquiry_req ir;
2244 	struct hci_dev *hdev;
2245 	int err = 0, do_inquiry = 0, max_rsp;
2246 	long timeo;
2247 	__u8 *buf;
2248 
2249 	if (copy_from_user(&ir, ptr, sizeof(ir)))
2250 		return -EFAULT;
2251 
2252 	hdev = hci_dev_get(ir.dev_id);
2253 	if (!hdev)
2254 		return -ENODEV;
2255 
2256 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 		err = -EBUSY;
2258 		goto done;
2259 	}
2260 
2261 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2262 		err = -EOPNOTSUPP;
2263 		goto done;
2264 	}
2265 
2266 	if (hdev->dev_type != HCI_BREDR) {
2267 		err = -EOPNOTSUPP;
2268 		goto done;
2269 	}
2270 
2271 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2272 		err = -EOPNOTSUPP;
2273 		goto done;
2274 	}
2275 
2276 	hci_dev_lock(hdev);
2277 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2278 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2279 		hci_inquiry_cache_flush(hdev);
2280 		do_inquiry = 1;
2281 	}
2282 	hci_dev_unlock(hdev);
2283 
2284 	timeo = ir.length * msecs_to_jiffies(2000);
2285 
2286 	if (do_inquiry) {
2287 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2288 				   timeo);
2289 		if (err < 0)
2290 			goto done;
2291 
2292 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2293 		 * cleared). If it is interrupted by a signal, return -EINTR.
2294 		 */
2295 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2296 				TASK_INTERRUPTIBLE))
2297 			return -EINTR;
2298 	}
2299 
2300 	/* for unlimited number of responses we will use buffer with
2301 	 * 255 entries
2302 	 */
2303 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2304 
2305 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
2306 	 * copy it to the user space.
2307 	 */
2308 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2309 	if (!buf) {
2310 		err = -ENOMEM;
2311 		goto done;
2312 	}
2313 
2314 	hci_dev_lock(hdev);
2315 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2316 	hci_dev_unlock(hdev);
2317 
2318 	BT_DBG("num_rsp %d", ir.num_rsp);
2319 
2320 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2321 		ptr += sizeof(ir);
2322 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2323 				 ir.num_rsp))
2324 			err = -EFAULT;
2325 	} else
2326 		err = -EFAULT;
2327 
2328 	kfree(buf);
2329 
2330 done:
2331 	hci_dev_put(hdev);
2332 	return err;
2333 }
2334 
hci_dev_do_open(struct hci_dev * hdev)2335 static int hci_dev_do_open(struct hci_dev *hdev)
2336 {
2337 	int ret = 0;
2338 
2339 	BT_DBG("%s %p", hdev->name, hdev);
2340 
2341 	hci_req_lock(hdev);
2342 
2343 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2344 		ret = -ENODEV;
2345 		goto done;
2346 	}
2347 
2348 	if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2349 	    !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2350 		/* Check for rfkill but allow the HCI setup stage to
2351 		 * proceed (which in itself doesn't cause any RF activity).
2352 		 */
2353 		if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2354 			ret = -ERFKILL;
2355 			goto done;
2356 		}
2357 
2358 		/* Check for valid public address or a configured static
2359 		 * random adddress, but let the HCI setup proceed to
2360 		 * be able to determine if there is a public address
2361 		 * or not.
2362 		 *
2363 		 * In case of user channel usage, it is not important
2364 		 * if a public address or static random address is
2365 		 * available.
2366 		 *
2367 		 * This check is only valid for BR/EDR controllers
2368 		 * since AMP controllers do not have an address.
2369 		 */
2370 		if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2371 		    hdev->dev_type == HCI_BREDR &&
2372 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2373 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2374 			ret = -EADDRNOTAVAIL;
2375 			goto done;
2376 		}
2377 	}
2378 
2379 	if (test_bit(HCI_UP, &hdev->flags)) {
2380 		ret = -EALREADY;
2381 		goto done;
2382 	}
2383 
2384 	if (hdev->open(hdev)) {
2385 		ret = -EIO;
2386 		goto done;
2387 	}
2388 
2389 	atomic_set(&hdev->cmd_cnt, 1);
2390 	set_bit(HCI_INIT, &hdev->flags);
2391 
2392 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2393 		if (hdev->setup)
2394 			ret = hdev->setup(hdev);
2395 
2396 		/* The transport driver can set these quirks before
2397 		 * creating the HCI device or in its setup callback.
2398 		 *
2399 		 * In case any of them is set, the controller has to
2400 		 * start up as unconfigured.
2401 		 */
2402 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2403 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2404 			set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2405 
2406 		/* For an unconfigured controller it is required to
2407 		 * read at least the version information provided by
2408 		 * the Read Local Version Information command.
2409 		 *
2410 		 * If the set_bdaddr driver callback is provided, then
2411 		 * also the original Bluetooth public device address
2412 		 * will be read using the Read BD Address command.
2413 		 */
2414 		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2415 			ret = __hci_unconf_init(hdev);
2416 	}
2417 
2418 	if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2419 		/* If public address change is configured, ensure that
2420 		 * the address gets programmed. If the driver does not
2421 		 * support changing the public address, fail the power
2422 		 * on procedure.
2423 		 */
2424 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2425 		    hdev->set_bdaddr)
2426 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2427 		else
2428 			ret = -EADDRNOTAVAIL;
2429 	}
2430 
2431 	if (!ret) {
2432 		if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2433 		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2434 			ret = __hci_init(hdev);
2435 	}
2436 
2437 	clear_bit(HCI_INIT, &hdev->flags);
2438 
2439 	if (!ret) {
2440 		hci_dev_hold(hdev);
2441 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2442 		set_bit(HCI_UP, &hdev->flags);
2443 		hci_notify(hdev, HCI_DEV_UP);
2444 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2445 		    !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2446 		    !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2447 		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2448 		    hdev->dev_type == HCI_BREDR) {
2449 			hci_dev_lock(hdev);
2450 			mgmt_powered(hdev, 1);
2451 			hci_dev_unlock(hdev);
2452 		}
2453 	} else {
2454 		/* Init failed, cleanup */
2455 		flush_work(&hdev->tx_work);
2456 		flush_work(&hdev->cmd_work);
2457 		flush_work(&hdev->rx_work);
2458 
2459 		skb_queue_purge(&hdev->cmd_q);
2460 		skb_queue_purge(&hdev->rx_q);
2461 
2462 		if (hdev->flush)
2463 			hdev->flush(hdev);
2464 
2465 		if (hdev->sent_cmd) {
2466 			kfree_skb(hdev->sent_cmd);
2467 			hdev->sent_cmd = NULL;
2468 		}
2469 
2470 		hdev->close(hdev);
2471 		hdev->flags &= BIT(HCI_RAW);
2472 	}
2473 
2474 done:
2475 	hci_req_unlock(hdev);
2476 	return ret;
2477 }
2478 
2479 /* ---- HCI ioctl helpers ---- */
2480 
hci_dev_open(__u16 dev)2481 int hci_dev_open(__u16 dev)
2482 {
2483 	struct hci_dev *hdev;
2484 	int err;
2485 
2486 	hdev = hci_dev_get(dev);
2487 	if (!hdev)
2488 		return -ENODEV;
2489 
2490 	/* Devices that are marked as unconfigured can only be powered
2491 	 * up as user channel. Trying to bring them up as normal devices
2492 	 * will result into a failure. Only user channel operation is
2493 	 * possible.
2494 	 *
2495 	 * When this function is called for a user channel, the flag
2496 	 * HCI_USER_CHANNEL will be set first before attempting to
2497 	 * open the device.
2498 	 */
2499 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2500 	    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 		err = -EOPNOTSUPP;
2502 		goto done;
2503 	}
2504 
2505 	/* We need to ensure that no other power on/off work is pending
2506 	 * before proceeding to call hci_dev_do_open. This is
2507 	 * particularly important if the setup procedure has not yet
2508 	 * completed.
2509 	 */
2510 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2511 		cancel_delayed_work(&hdev->power_off);
2512 
2513 	/* After this call it is guaranteed that the setup procedure
2514 	 * has finished. This means that error conditions like RFKILL
2515 	 * or no valid public or static random address apply.
2516 	 */
2517 	flush_workqueue(hdev->req_workqueue);
2518 
2519 	/* For controllers not using the management interface and that
2520 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2521 	 * so that pairing works for them. Once the management interface
2522 	 * is in use this bit will be cleared again and userspace has
2523 	 * to explicitly enable it.
2524 	 */
2525 	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2526 	    !test_bit(HCI_MGMT, &hdev->dev_flags))
2527 		set_bit(HCI_BONDABLE, &hdev->dev_flags);
2528 
2529 	err = hci_dev_do_open(hdev);
2530 
2531 done:
2532 	hci_dev_put(hdev);
2533 	return err;
2534 }
2535 
2536 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)2537 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2538 {
2539 	struct hci_conn_params *p;
2540 
2541 	list_for_each_entry(p, &hdev->le_conn_params, list) {
2542 		if (p->conn) {
2543 			hci_conn_drop(p->conn);
2544 			hci_conn_put(p->conn);
2545 			p->conn = NULL;
2546 		}
2547 		list_del_init(&p->action);
2548 	}
2549 
2550 	BT_DBG("All LE pending actions cleared");
2551 }
2552 
hci_dev_do_close(struct hci_dev * hdev)2553 static int hci_dev_do_close(struct hci_dev *hdev)
2554 {
2555 	BT_DBG("%s %p", hdev->name, hdev);
2556 
2557 	cancel_delayed_work(&hdev->power_off);
2558 
2559 	hci_req_cancel(hdev, ENODEV);
2560 	hci_req_lock(hdev);
2561 
2562 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2563 		cancel_delayed_work_sync(&hdev->cmd_timer);
2564 		hci_req_unlock(hdev);
2565 		return 0;
2566 	}
2567 
2568 	/* Flush RX and TX works */
2569 	flush_work(&hdev->tx_work);
2570 	flush_work(&hdev->rx_work);
2571 
2572 	if (hdev->discov_timeout > 0) {
2573 		cancel_delayed_work(&hdev->discov_off);
2574 		hdev->discov_timeout = 0;
2575 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2576 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2577 	}
2578 
2579 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2580 		cancel_delayed_work(&hdev->service_cache);
2581 
2582 	cancel_delayed_work_sync(&hdev->le_scan_disable);
2583 
2584 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
2585 		cancel_delayed_work_sync(&hdev->rpa_expired);
2586 
2587 	hci_dev_lock(hdev);
2588 	hci_inquiry_cache_flush(hdev);
2589 	hci_pend_le_actions_clear(hdev);
2590 	hci_conn_hash_flush(hdev);
2591 	hci_dev_unlock(hdev);
2592 
2593 	hci_notify(hdev, HCI_DEV_DOWN);
2594 
2595 	if (hdev->flush)
2596 		hdev->flush(hdev);
2597 
2598 	/* Reset device */
2599 	skb_queue_purge(&hdev->cmd_q);
2600 	atomic_set(&hdev->cmd_cnt, 1);
2601 	if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2602 	    !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2603 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2604 		set_bit(HCI_INIT, &hdev->flags);
2605 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2606 		clear_bit(HCI_INIT, &hdev->flags);
2607 	}
2608 
2609 	/* flush cmd  work */
2610 	flush_work(&hdev->cmd_work);
2611 
2612 	/* Drop queues */
2613 	skb_queue_purge(&hdev->rx_q);
2614 	skb_queue_purge(&hdev->cmd_q);
2615 	skb_queue_purge(&hdev->raw_q);
2616 
2617 	/* Drop last sent command */
2618 	if (hdev->sent_cmd) {
2619 		cancel_delayed_work_sync(&hdev->cmd_timer);
2620 		kfree_skb(hdev->sent_cmd);
2621 		hdev->sent_cmd = NULL;
2622 	}
2623 
2624 	kfree_skb(hdev->recv_evt);
2625 	hdev->recv_evt = NULL;
2626 
2627 	/* After this point our queues are empty
2628 	 * and no tasks are scheduled. */
2629 	hdev->close(hdev);
2630 
2631 	/* Clear flags */
2632 	hdev->flags &= BIT(HCI_RAW);
2633 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2634 
2635 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2636 		if (hdev->dev_type == HCI_BREDR) {
2637 			hci_dev_lock(hdev);
2638 			mgmt_powered(hdev, 0);
2639 			hci_dev_unlock(hdev);
2640 		}
2641 	}
2642 
2643 	/* Controller radio is available but is currently powered down */
2644 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2645 
2646 	memset(hdev->eir, 0, sizeof(hdev->eir));
2647 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2648 	bacpy(&hdev->random_addr, BDADDR_ANY);
2649 
2650 	hci_req_unlock(hdev);
2651 
2652 	hci_dev_put(hdev);
2653 	return 0;
2654 }
2655 
hci_dev_close(__u16 dev)2656 int hci_dev_close(__u16 dev)
2657 {
2658 	struct hci_dev *hdev;
2659 	int err;
2660 
2661 	hdev = hci_dev_get(dev);
2662 	if (!hdev)
2663 		return -ENODEV;
2664 
2665 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2666 		err = -EBUSY;
2667 		goto done;
2668 	}
2669 
2670 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2671 		cancel_delayed_work(&hdev->power_off);
2672 
2673 	err = hci_dev_do_close(hdev);
2674 
2675 done:
2676 	hci_dev_put(hdev);
2677 	return err;
2678 }
2679 
hci_dev_reset(__u16 dev)2680 int hci_dev_reset(__u16 dev)
2681 {
2682 	struct hci_dev *hdev;
2683 	int ret = 0;
2684 
2685 	hdev = hci_dev_get(dev);
2686 	if (!hdev)
2687 		return -ENODEV;
2688 
2689 	hci_req_lock(hdev);
2690 
2691 	if (!test_bit(HCI_UP, &hdev->flags)) {
2692 		ret = -ENETDOWN;
2693 		goto done;
2694 	}
2695 
2696 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2697 		ret = -EBUSY;
2698 		goto done;
2699 	}
2700 
2701 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2702 		ret = -EOPNOTSUPP;
2703 		goto done;
2704 	}
2705 
2706 	/* Drop queues */
2707 	skb_queue_purge(&hdev->rx_q);
2708 	skb_queue_purge(&hdev->cmd_q);
2709 
2710 	hci_dev_lock(hdev);
2711 	hci_inquiry_cache_flush(hdev);
2712 	hci_conn_hash_flush(hdev);
2713 	hci_dev_unlock(hdev);
2714 
2715 	if (hdev->flush)
2716 		hdev->flush(hdev);
2717 
2718 	atomic_set(&hdev->cmd_cnt, 1);
2719 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2720 
2721 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2722 
2723 done:
2724 	hci_req_unlock(hdev);
2725 	hci_dev_put(hdev);
2726 	return ret;
2727 }
2728 
hci_dev_reset_stat(__u16 dev)2729 int hci_dev_reset_stat(__u16 dev)
2730 {
2731 	struct hci_dev *hdev;
2732 	int ret = 0;
2733 
2734 	hdev = hci_dev_get(dev);
2735 	if (!hdev)
2736 		return -ENODEV;
2737 
2738 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739 		ret = -EBUSY;
2740 		goto done;
2741 	}
2742 
2743 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2744 		ret = -EOPNOTSUPP;
2745 		goto done;
2746 	}
2747 
2748 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2749 
2750 done:
2751 	hci_dev_put(hdev);
2752 	return ret;
2753 }
2754 
hci_update_scan_state(struct hci_dev * hdev,u8 scan)2755 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2756 {
2757 	bool conn_changed, discov_changed;
2758 
2759 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
2760 
2761 	if ((scan & SCAN_PAGE))
2762 		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2763 						 &hdev->dev_flags);
2764 	else
2765 		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2766 						  &hdev->dev_flags);
2767 
2768 	if ((scan & SCAN_INQUIRY)) {
2769 		discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2770 						   &hdev->dev_flags);
2771 	} else {
2772 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2773 		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2774 						    &hdev->dev_flags);
2775 	}
2776 
2777 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2778 		return;
2779 
2780 	if (conn_changed || discov_changed) {
2781 		/* In case this was disabled through mgmt */
2782 		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2783 
2784 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2785 			mgmt_update_adv_data(hdev);
2786 
2787 		mgmt_new_settings(hdev);
2788 	}
2789 }
2790 
hci_dev_cmd(unsigned int cmd,void __user * arg)2791 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2792 {
2793 	struct hci_dev *hdev;
2794 	struct hci_dev_req dr;
2795 	int err = 0;
2796 
2797 	if (copy_from_user(&dr, arg, sizeof(dr)))
2798 		return -EFAULT;
2799 
2800 	hdev = hci_dev_get(dr.dev_id);
2801 	if (!hdev)
2802 		return -ENODEV;
2803 
2804 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2805 		err = -EBUSY;
2806 		goto done;
2807 	}
2808 
2809 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2810 		err = -EOPNOTSUPP;
2811 		goto done;
2812 	}
2813 
2814 	if (hdev->dev_type != HCI_BREDR) {
2815 		err = -EOPNOTSUPP;
2816 		goto done;
2817 	}
2818 
2819 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2820 		err = -EOPNOTSUPP;
2821 		goto done;
2822 	}
2823 
2824 	switch (cmd) {
2825 	case HCISETAUTH:
2826 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2827 				   HCI_INIT_TIMEOUT);
2828 		break;
2829 
2830 	case HCISETENCRYPT:
2831 		if (!lmp_encrypt_capable(hdev)) {
2832 			err = -EOPNOTSUPP;
2833 			break;
2834 		}
2835 
2836 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
2837 			/* Auth must be enabled first */
2838 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2839 					   HCI_INIT_TIMEOUT);
2840 			if (err)
2841 				break;
2842 		}
2843 
2844 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2845 				   HCI_INIT_TIMEOUT);
2846 		break;
2847 
2848 	case HCISETSCAN:
2849 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2850 				   HCI_INIT_TIMEOUT);
2851 
2852 		/* Ensure that the connectable and discoverable states
2853 		 * get correctly modified as this was a non-mgmt change.
2854 		 */
2855 		if (!err)
2856 			hci_update_scan_state(hdev, dr.dev_opt);
2857 		break;
2858 
2859 	case HCISETLINKPOL:
2860 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2861 				   HCI_INIT_TIMEOUT);
2862 		break;
2863 
2864 	case HCISETLINKMODE:
2865 		hdev->link_mode = ((__u16) dr.dev_opt) &
2866 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2867 		break;
2868 
2869 	case HCISETPTYPE:
2870 		hdev->pkt_type = (__u16) dr.dev_opt;
2871 		break;
2872 
2873 	case HCISETACLMTU:
2874 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2875 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2876 		break;
2877 
2878 	case HCISETSCOMTU:
2879 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2880 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2881 		break;
2882 
2883 	default:
2884 		err = -EINVAL;
2885 		break;
2886 	}
2887 
2888 done:
2889 	hci_dev_put(hdev);
2890 	return err;
2891 }
2892 
hci_get_dev_list(void __user * arg)2893 int hci_get_dev_list(void __user *arg)
2894 {
2895 	struct hci_dev *hdev;
2896 	struct hci_dev_list_req *dl;
2897 	struct hci_dev_req *dr;
2898 	int n = 0, size, err;
2899 	__u16 dev_num;
2900 
2901 	if (get_user(dev_num, (__u16 __user *) arg))
2902 		return -EFAULT;
2903 
2904 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2905 		return -EINVAL;
2906 
2907 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2908 
2909 	dl = kzalloc(size, GFP_KERNEL);
2910 	if (!dl)
2911 		return -ENOMEM;
2912 
2913 	dr = dl->dev_req;
2914 
2915 	read_lock(&hci_dev_list_lock);
2916 	list_for_each_entry(hdev, &hci_dev_list, list) {
2917 		unsigned long flags = hdev->flags;
2918 
2919 		/* When the auto-off is configured it means the transport
2920 		 * is running, but in that case still indicate that the
2921 		 * device is actually down.
2922 		 */
2923 		if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2924 			flags &= ~BIT(HCI_UP);
2925 
2926 		(dr + n)->dev_id  = hdev->id;
2927 		(dr + n)->dev_opt = flags;
2928 
2929 		if (++n >= dev_num)
2930 			break;
2931 	}
2932 	read_unlock(&hci_dev_list_lock);
2933 
2934 	dl->dev_num = n;
2935 	size = sizeof(*dl) + n * sizeof(*dr);
2936 
2937 	err = copy_to_user(arg, dl, size);
2938 	kfree(dl);
2939 
2940 	return err ? -EFAULT : 0;
2941 }
2942 
hci_get_dev_info(void __user * arg)2943 int hci_get_dev_info(void __user *arg)
2944 {
2945 	struct hci_dev *hdev;
2946 	struct hci_dev_info di;
2947 	unsigned long flags;
2948 	int err = 0;
2949 
2950 	if (copy_from_user(&di, arg, sizeof(di)))
2951 		return -EFAULT;
2952 
2953 	hdev = hci_dev_get(di.dev_id);
2954 	if (!hdev)
2955 		return -ENODEV;
2956 
2957 	/* When the auto-off is configured it means the transport
2958 	 * is running, but in that case still indicate that the
2959 	 * device is actually down.
2960 	 */
2961 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2962 		flags = hdev->flags & ~BIT(HCI_UP);
2963 	else
2964 		flags = hdev->flags;
2965 
2966 	strcpy(di.name, hdev->name);
2967 	di.bdaddr   = hdev->bdaddr;
2968 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2969 	di.flags    = flags;
2970 	di.pkt_type = hdev->pkt_type;
2971 	if (lmp_bredr_capable(hdev)) {
2972 		di.acl_mtu  = hdev->acl_mtu;
2973 		di.acl_pkts = hdev->acl_pkts;
2974 		di.sco_mtu  = hdev->sco_mtu;
2975 		di.sco_pkts = hdev->sco_pkts;
2976 	} else {
2977 		di.acl_mtu  = hdev->le_mtu;
2978 		di.acl_pkts = hdev->le_pkts;
2979 		di.sco_mtu  = 0;
2980 		di.sco_pkts = 0;
2981 	}
2982 	di.link_policy = hdev->link_policy;
2983 	di.link_mode   = hdev->link_mode;
2984 
2985 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2986 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2987 
2988 	if (copy_to_user(arg, &di, sizeof(di)))
2989 		err = -EFAULT;
2990 
2991 	hci_dev_put(hdev);
2992 
2993 	return err;
2994 }
2995 
2996 /* ---- Interface to HCI drivers ---- */
2997 
hci_rfkill_set_block(void * data,bool blocked)2998 static int hci_rfkill_set_block(void *data, bool blocked)
2999 {
3000 	struct hci_dev *hdev = data;
3001 
3002 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3003 
3004 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3005 		return -EBUSY;
3006 
3007 	if (blocked) {
3008 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
3009 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3010 		    !test_bit(HCI_CONFIG, &hdev->dev_flags))
3011 			hci_dev_do_close(hdev);
3012 	} else {
3013 		clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3014 	}
3015 
3016 	return 0;
3017 }
3018 
3019 static const struct rfkill_ops hci_rfkill_ops = {
3020 	.set_block = hci_rfkill_set_block,
3021 };
3022 
hci_power_on(struct work_struct * work)3023 static void hci_power_on(struct work_struct *work)
3024 {
3025 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3026 	int err;
3027 
3028 	BT_DBG("%s", hdev->name);
3029 
3030 	err = hci_dev_do_open(hdev);
3031 	if (err < 0) {
3032 		mgmt_set_powered_failed(hdev, err);
3033 		return;
3034 	}
3035 
3036 	/* During the HCI setup phase, a few error conditions are
3037 	 * ignored and they need to be checked now. If they are still
3038 	 * valid, it is important to turn the device back off.
3039 	 */
3040 	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3041 	    test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3042 	    (hdev->dev_type == HCI_BREDR &&
3043 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3044 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3045 		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3046 		hci_dev_do_close(hdev);
3047 	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3048 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3049 				   HCI_AUTO_OFF_TIMEOUT);
3050 	}
3051 
3052 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3053 		/* For unconfigured devices, set the HCI_RAW flag
3054 		 * so that userspace can easily identify them.
3055 		 */
3056 		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 			set_bit(HCI_RAW, &hdev->flags);
3058 
3059 		/* For fully configured devices, this will send
3060 		 * the Index Added event. For unconfigured devices,
3061 		 * it will send Unconfigued Index Added event.
3062 		 *
3063 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3064 		 * and no event will be send.
3065 		 */
3066 		mgmt_index_added(hdev);
3067 	} else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3068 		/* When the controller is now configured, then it
3069 		 * is important to clear the HCI_RAW flag.
3070 		 */
3071 		if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3072 			clear_bit(HCI_RAW, &hdev->flags);
3073 
3074 		/* Powering on the controller with HCI_CONFIG set only
3075 		 * happens with the transition from unconfigured to
3076 		 * configured. This will send the Index Added event.
3077 		 */
3078 		mgmt_index_added(hdev);
3079 	}
3080 }
3081 
hci_power_off(struct work_struct * work)3082 static void hci_power_off(struct work_struct *work)
3083 {
3084 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3085 					    power_off.work);
3086 
3087 	BT_DBG("%s", hdev->name);
3088 
3089 	hci_dev_do_close(hdev);
3090 }
3091 
hci_discov_off(struct work_struct * work)3092 static void hci_discov_off(struct work_struct *work)
3093 {
3094 	struct hci_dev *hdev;
3095 
3096 	hdev = container_of(work, struct hci_dev, discov_off.work);
3097 
3098 	BT_DBG("%s", hdev->name);
3099 
3100 	mgmt_discoverable_timeout(hdev);
3101 }
3102 
hci_uuids_clear(struct hci_dev * hdev)3103 void hci_uuids_clear(struct hci_dev *hdev)
3104 {
3105 	struct bt_uuid *uuid, *tmp;
3106 
3107 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3108 		list_del(&uuid->list);
3109 		kfree(uuid);
3110 	}
3111 }
3112 
hci_link_keys_clear(struct hci_dev * hdev)3113 void hci_link_keys_clear(struct hci_dev *hdev)
3114 {
3115 	struct list_head *p, *n;
3116 
3117 	list_for_each_safe(p, n, &hdev->link_keys) {
3118 		struct link_key *key;
3119 
3120 		key = list_entry(p, struct link_key, list);
3121 
3122 		list_del(p);
3123 		kfree(key);
3124 	}
3125 }
3126 
hci_smp_ltks_clear(struct hci_dev * hdev)3127 void hci_smp_ltks_clear(struct hci_dev *hdev)
3128 {
3129 	struct smp_ltk *k, *tmp;
3130 
3131 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3132 		list_del(&k->list);
3133 		kfree(k);
3134 	}
3135 }
3136 
hci_smp_irks_clear(struct hci_dev * hdev)3137 void hci_smp_irks_clear(struct hci_dev *hdev)
3138 {
3139 	struct smp_irk *k, *tmp;
3140 
3141 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3142 		list_del(&k->list);
3143 		kfree(k);
3144 	}
3145 }
3146 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)3147 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3148 {
3149 	struct link_key *k;
3150 
3151 	list_for_each_entry(k, &hdev->link_keys, list)
3152 		if (bacmp(bdaddr, &k->bdaddr) == 0)
3153 			return k;
3154 
3155 	return NULL;
3156 }
3157 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)3158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3159 			       u8 key_type, u8 old_key_type)
3160 {
3161 	/* Legacy key */
3162 	if (key_type < 0x03)
3163 		return true;
3164 
3165 	/* Debug keys are insecure so don't store them persistently */
3166 	if (key_type == HCI_LK_DEBUG_COMBINATION)
3167 		return false;
3168 
3169 	/* Changed combination key and there's no previous one */
3170 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3171 		return false;
3172 
3173 	/* Security mode 3 case */
3174 	if (!conn)
3175 		return true;
3176 
3177 	/* Neither local nor remote side had no-bonding as requirement */
3178 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3179 		return true;
3180 
3181 	/* Local side had dedicated bonding as requirement */
3182 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3183 		return true;
3184 
3185 	/* Remote side had dedicated bonding as requirement */
3186 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3187 		return true;
3188 
3189 	/* If none of the above criteria match, then don't store the key
3190 	 * persistently */
3191 	return false;
3192 }
3193 
ltk_role(u8 type)3194 static u8 ltk_role(u8 type)
3195 {
3196 	if (type == SMP_LTK)
3197 		return HCI_ROLE_MASTER;
3198 
3199 	return HCI_ROLE_SLAVE;
3200 }
3201 
hci_find_ltk(struct hci_dev * hdev,__le16 ediv,__le64 rand,u8 role)3202 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3203 			     u8 role)
3204 {
3205 	struct smp_ltk *k;
3206 
3207 	list_for_each_entry(k, &hdev->long_term_keys, list) {
3208 		if (k->ediv != ediv || k->rand != rand)
3209 			continue;
3210 
3211 		if (ltk_role(k->type) != role)
3212 			continue;
3213 
3214 		return k;
3215 	}
3216 
3217 	return NULL;
3218 }
3219 
hci_find_ltk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)3220 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3221 				     u8 addr_type, u8 role)
3222 {
3223 	struct smp_ltk *k;
3224 
3225 	list_for_each_entry(k, &hdev->long_term_keys, list)
3226 		if (addr_type == k->bdaddr_type &&
3227 		    bacmp(bdaddr, &k->bdaddr) == 0 &&
3228 		    ltk_role(k->type) == role)
3229 			return k;
3230 
3231 	return NULL;
3232 }
3233 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)3234 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3235 {
3236 	struct smp_irk *irk;
3237 
3238 	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3239 		if (!bacmp(&irk->rpa, rpa))
3240 			return irk;
3241 	}
3242 
3243 	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3244 		if (smp_irk_matches(hdev, irk->val, rpa)) {
3245 			bacpy(&irk->rpa, rpa);
3246 			return irk;
3247 		}
3248 	}
3249 
3250 	return NULL;
3251 }
3252 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)3253 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 				     u8 addr_type)
3255 {
3256 	struct smp_irk *irk;
3257 
3258 	/* Identity Address must be public or static random */
3259 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3260 		return NULL;
3261 
3262 	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3263 		if (addr_type == irk->addr_type &&
3264 		    bacmp(bdaddr, &irk->bdaddr) == 0)
3265 			return irk;
3266 	}
3267 
3268 	return NULL;
3269 }
3270 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)3271 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3272 				  bdaddr_t *bdaddr, u8 *val, u8 type,
3273 				  u8 pin_len, bool *persistent)
3274 {
3275 	struct link_key *key, *old_key;
3276 	u8 old_key_type;
3277 
3278 	old_key = hci_find_link_key(hdev, bdaddr);
3279 	if (old_key) {
3280 		old_key_type = old_key->type;
3281 		key = old_key;
3282 	} else {
3283 		old_key_type = conn ? conn->key_type : 0xff;
3284 		key = kzalloc(sizeof(*key), GFP_KERNEL);
3285 		if (!key)
3286 			return NULL;
3287 		list_add(&key->list, &hdev->link_keys);
3288 	}
3289 
3290 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3291 
3292 	/* Some buggy controller combinations generate a changed
3293 	 * combination key for legacy pairing even when there's no
3294 	 * previous key */
3295 	if (type == HCI_LK_CHANGED_COMBINATION &&
3296 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3297 		type = HCI_LK_COMBINATION;
3298 		if (conn)
3299 			conn->key_type = type;
3300 	}
3301 
3302 	bacpy(&key->bdaddr, bdaddr);
3303 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3304 	key->pin_len = pin_len;
3305 
3306 	if (type == HCI_LK_CHANGED_COMBINATION)
3307 		key->type = old_key_type;
3308 	else
3309 		key->type = type;
3310 
3311 	if (persistent)
3312 		*persistent = hci_persistent_key(hdev, conn, type,
3313 						 old_key_type);
3314 
3315 	return key;
3316 }
3317 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)3318 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3319 			    u8 addr_type, u8 type, u8 authenticated,
3320 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3321 {
3322 	struct smp_ltk *key, *old_key;
3323 	u8 role = ltk_role(type);
3324 
3325 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3326 	if (old_key)
3327 		key = old_key;
3328 	else {
3329 		key = kzalloc(sizeof(*key), GFP_KERNEL);
3330 		if (!key)
3331 			return NULL;
3332 		list_add(&key->list, &hdev->long_term_keys);
3333 	}
3334 
3335 	bacpy(&key->bdaddr, bdaddr);
3336 	key->bdaddr_type = addr_type;
3337 	memcpy(key->val, tk, sizeof(key->val));
3338 	key->authenticated = authenticated;
3339 	key->ediv = ediv;
3340 	key->rand = rand;
3341 	key->enc_size = enc_size;
3342 	key->type = type;
3343 
3344 	return key;
3345 }
3346 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)3347 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3348 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
3349 {
3350 	struct smp_irk *irk;
3351 
3352 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3353 	if (!irk) {
3354 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3355 		if (!irk)
3356 			return NULL;
3357 
3358 		bacpy(&irk->bdaddr, bdaddr);
3359 		irk->addr_type = addr_type;
3360 
3361 		list_add(&irk->list, &hdev->identity_resolving_keys);
3362 	}
3363 
3364 	memcpy(irk->val, val, 16);
3365 	bacpy(&irk->rpa, rpa);
3366 
3367 	return irk;
3368 }
3369 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)3370 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3371 {
3372 	struct link_key *key;
3373 
3374 	key = hci_find_link_key(hdev, bdaddr);
3375 	if (!key)
3376 		return -ENOENT;
3377 
3378 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3379 
3380 	list_del(&key->list);
3381 	kfree(key);
3382 
3383 	return 0;
3384 }
3385 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)3386 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3387 {
3388 	struct smp_ltk *k, *tmp;
3389 	int removed = 0;
3390 
3391 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3392 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3393 			continue;
3394 
3395 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3396 
3397 		list_del(&k->list);
3398 		kfree(k);
3399 		removed++;
3400 	}
3401 
3402 	return removed ? 0 : -ENOENT;
3403 }
3404 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)3405 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3406 {
3407 	struct smp_irk *k, *tmp;
3408 
3409 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3410 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3411 			continue;
3412 
3413 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3414 
3415 		list_del(&k->list);
3416 		kfree(k);
3417 	}
3418 }
3419 
3420 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)3421 static void hci_cmd_timeout(struct work_struct *work)
3422 {
3423 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3424 					    cmd_timer.work);
3425 
3426 	if (hdev->sent_cmd) {
3427 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3428 		u16 opcode = __le16_to_cpu(sent->opcode);
3429 
3430 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3431 	} else {
3432 		BT_ERR("%s command tx timeout", hdev->name);
3433 	}
3434 
3435 	atomic_set(&hdev->cmd_cnt, 1);
3436 	queue_work(hdev->workqueue, &hdev->cmd_work);
3437 }
3438 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)3439 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3440 					  bdaddr_t *bdaddr)
3441 {
3442 	struct oob_data *data;
3443 
3444 	list_for_each_entry(data, &hdev->remote_oob_data, list)
3445 		if (bacmp(bdaddr, &data->bdaddr) == 0)
3446 			return data;
3447 
3448 	return NULL;
3449 }
3450 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)3451 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3452 {
3453 	struct oob_data *data;
3454 
3455 	data = hci_find_remote_oob_data(hdev, bdaddr);
3456 	if (!data)
3457 		return -ENOENT;
3458 
3459 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3460 
3461 	list_del(&data->list);
3462 	kfree(data);
3463 
3464 	return 0;
3465 }
3466 
hci_remote_oob_data_clear(struct hci_dev * hdev)3467 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3468 {
3469 	struct oob_data *data, *n;
3470 
3471 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3472 		list_del(&data->list);
3473 		kfree(data);
3474 	}
3475 }
3476 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * hash,u8 * randomizer)3477 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3478 			    u8 *hash, u8 *randomizer)
3479 {
3480 	struct oob_data *data;
3481 
3482 	data = hci_find_remote_oob_data(hdev, bdaddr);
3483 	if (!data) {
3484 		data = kmalloc(sizeof(*data), GFP_KERNEL);
3485 		if (!data)
3486 			return -ENOMEM;
3487 
3488 		bacpy(&data->bdaddr, bdaddr);
3489 		list_add(&data->list, &hdev->remote_oob_data);
3490 	}
3491 
3492 	memcpy(data->hash192, hash, sizeof(data->hash192));
3493 	memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3494 
3495 	memset(data->hash256, 0, sizeof(data->hash256));
3496 	memset(data->randomizer256, 0, sizeof(data->randomizer256));
3497 
3498 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
3499 
3500 	return 0;
3501 }
3502 
hci_add_remote_oob_ext_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * hash192,u8 * randomizer192,u8 * hash256,u8 * randomizer256)3503 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3504 				u8 *hash192, u8 *randomizer192,
3505 				u8 *hash256, u8 *randomizer256)
3506 {
3507 	struct oob_data *data;
3508 
3509 	data = hci_find_remote_oob_data(hdev, bdaddr);
3510 	if (!data) {
3511 		data = kmalloc(sizeof(*data), GFP_KERNEL);
3512 		if (!data)
3513 			return -ENOMEM;
3514 
3515 		bacpy(&data->bdaddr, bdaddr);
3516 		list_add(&data->list, &hdev->remote_oob_data);
3517 	}
3518 
3519 	memcpy(data->hash192, hash192, sizeof(data->hash192));
3520 	memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3521 
3522 	memcpy(data->hash256, hash256, sizeof(data->hash256));
3523 	memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3524 
3525 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
3526 
3527 	return 0;
3528 }
3529 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)3530 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3531 					 bdaddr_t *bdaddr, u8 type)
3532 {
3533 	struct bdaddr_list *b;
3534 
3535 	list_for_each_entry(b, bdaddr_list, list) {
3536 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3537 			return b;
3538 	}
3539 
3540 	return NULL;
3541 }
3542 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)3543 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3544 {
3545 	struct list_head *p, *n;
3546 
3547 	list_for_each_safe(p, n, bdaddr_list) {
3548 		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3549 
3550 		list_del(p);
3551 		kfree(b);
3552 	}
3553 }
3554 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)3555 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3556 {
3557 	struct bdaddr_list *entry;
3558 
3559 	if (!bacmp(bdaddr, BDADDR_ANY))
3560 		return -EBADF;
3561 
3562 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3563 		return -EEXIST;
3564 
3565 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3566 	if (!entry)
3567 		return -ENOMEM;
3568 
3569 	bacpy(&entry->bdaddr, bdaddr);
3570 	entry->bdaddr_type = type;
3571 
3572 	list_add(&entry->list, list);
3573 
3574 	return 0;
3575 }
3576 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)3577 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3578 {
3579 	struct bdaddr_list *entry;
3580 
3581 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3582 		hci_bdaddr_list_clear(list);
3583 		return 0;
3584 	}
3585 
3586 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3587 	if (!entry)
3588 		return -ENOENT;
3589 
3590 	list_del(&entry->list);
3591 	kfree(entry);
3592 
3593 	return 0;
3594 }
3595 
3596 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3597 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3598 					       bdaddr_t *addr, u8 addr_type)
3599 {
3600 	struct hci_conn_params *params;
3601 
3602 	/* The conn params list only contains identity addresses */
3603 	if (!hci_is_identity_address(addr, addr_type))
3604 		return NULL;
3605 
3606 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3607 		if (bacmp(&params->addr, addr) == 0 &&
3608 		    params->addr_type == addr_type) {
3609 			return params;
3610 		}
3611 	}
3612 
3613 	return NULL;
3614 }
3615 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)3616 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3617 {
3618 	struct hci_conn *conn;
3619 
3620 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3621 	if (!conn)
3622 		return false;
3623 
3624 	if (conn->dst_type != type)
3625 		return false;
3626 
3627 	if (conn->state != BT_CONNECTED)
3628 		return false;
3629 
3630 	return true;
3631 }
3632 
3633 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)3634 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3635 						  bdaddr_t *addr, u8 addr_type)
3636 {
3637 	struct hci_conn_params *param;
3638 
3639 	/* The list only contains identity addresses */
3640 	if (!hci_is_identity_address(addr, addr_type))
3641 		return NULL;
3642 
3643 	list_for_each_entry(param, list, action) {
3644 		if (bacmp(&param->addr, addr) == 0 &&
3645 		    param->addr_type == addr_type)
3646 			return param;
3647 	}
3648 
3649 	return NULL;
3650 }
3651 
3652 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3653 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3654 					    bdaddr_t *addr, u8 addr_type)
3655 {
3656 	struct hci_conn_params *params;
3657 
3658 	if (!hci_is_identity_address(addr, addr_type))
3659 		return NULL;
3660 
3661 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3662 	if (params)
3663 		return params;
3664 
3665 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3666 	if (!params) {
3667 		BT_ERR("Out of memory");
3668 		return NULL;
3669 	}
3670 
3671 	bacpy(&params->addr, addr);
3672 	params->addr_type = addr_type;
3673 
3674 	list_add(&params->list, &hdev->le_conn_params);
3675 	INIT_LIST_HEAD(&params->action);
3676 
3677 	params->conn_min_interval = hdev->le_conn_min_interval;
3678 	params->conn_max_interval = hdev->le_conn_max_interval;
3679 	params->conn_latency = hdev->le_conn_latency;
3680 	params->supervision_timeout = hdev->le_supv_timeout;
3681 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3682 
3683 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3684 
3685 	return params;
3686 }
3687 
3688 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)3689 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3690 			u8 auto_connect)
3691 {
3692 	struct hci_conn_params *params;
3693 
3694 	params = hci_conn_params_add(hdev, addr, addr_type);
3695 	if (!params)
3696 		return -EIO;
3697 
3698 	if (params->auto_connect == auto_connect)
3699 		return 0;
3700 
3701 	list_del_init(&params->action);
3702 
3703 	switch (auto_connect) {
3704 	case HCI_AUTO_CONN_DISABLED:
3705 	case HCI_AUTO_CONN_LINK_LOSS:
3706 		hci_update_background_scan(hdev);
3707 		break;
3708 	case HCI_AUTO_CONN_REPORT:
3709 		list_add(&params->action, &hdev->pend_le_reports);
3710 		hci_update_background_scan(hdev);
3711 		break;
3712 	case HCI_AUTO_CONN_DIRECT:
3713 	case HCI_AUTO_CONN_ALWAYS:
3714 		if (!is_connected(hdev, addr, addr_type)) {
3715 			list_add(&params->action, &hdev->pend_le_conns);
3716 			hci_update_background_scan(hdev);
3717 		}
3718 		break;
3719 	}
3720 
3721 	params->auto_connect = auto_connect;
3722 
3723 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3724 	       auto_connect);
3725 
3726 	return 0;
3727 }
3728 
hci_conn_params_free(struct hci_conn_params * params)3729 static void hci_conn_params_free(struct hci_conn_params *params)
3730 {
3731 	if (params->conn) {
3732 		hci_conn_drop(params->conn);
3733 		hci_conn_put(params->conn);
3734 	}
3735 
3736 	list_del(&params->action);
3737 	list_del(&params->list);
3738 	kfree(params);
3739 }
3740 
3741 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)3742 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3743 {
3744 	struct hci_conn_params *params;
3745 
3746 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3747 	if (!params)
3748 		return;
3749 
3750 	hci_conn_params_free(params);
3751 
3752 	hci_update_background_scan(hdev);
3753 
3754 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3755 }
3756 
3757 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)3758 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3759 {
3760 	struct hci_conn_params *params, *tmp;
3761 
3762 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3763 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3764 			continue;
3765 		list_del(&params->list);
3766 		kfree(params);
3767 	}
3768 
3769 	BT_DBG("All LE disabled connection parameters were removed");
3770 }
3771 
3772 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)3773 void hci_conn_params_clear_all(struct hci_dev *hdev)
3774 {
3775 	struct hci_conn_params *params, *tmp;
3776 
3777 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3778 		hci_conn_params_free(params);
3779 
3780 	hci_update_background_scan(hdev);
3781 
3782 	BT_DBG("All LE connection parameters were removed");
3783 }
3784 
inquiry_complete(struct hci_dev * hdev,u8 status)3785 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3786 {
3787 	if (status) {
3788 		BT_ERR("Failed to start inquiry: status %d", status);
3789 
3790 		hci_dev_lock(hdev);
3791 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3792 		hci_dev_unlock(hdev);
3793 		return;
3794 	}
3795 }
3796 
le_scan_disable_work_complete(struct hci_dev * hdev,u8 status)3797 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3798 {
3799 	/* General inquiry access code (GIAC) */
3800 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3801 	struct hci_request req;
3802 	struct hci_cp_inquiry cp;
3803 	int err;
3804 
3805 	if (status) {
3806 		BT_ERR("Failed to disable LE scanning: status %d", status);
3807 		return;
3808 	}
3809 
3810 	switch (hdev->discovery.type) {
3811 	case DISCOV_TYPE_LE:
3812 		hci_dev_lock(hdev);
3813 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3814 		hci_dev_unlock(hdev);
3815 		break;
3816 
3817 	case DISCOV_TYPE_INTERLEAVED:
3818 		hci_req_init(&req, hdev);
3819 
3820 		memset(&cp, 0, sizeof(cp));
3821 		memcpy(&cp.lap, lap, sizeof(cp.lap));
3822 		cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3823 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3824 
3825 		hci_dev_lock(hdev);
3826 
3827 		hci_inquiry_cache_flush(hdev);
3828 
3829 		err = hci_req_run(&req, inquiry_complete);
3830 		if (err) {
3831 			BT_ERR("Inquiry request failed: err %d", err);
3832 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3833 		}
3834 
3835 		hci_dev_unlock(hdev);
3836 		break;
3837 	}
3838 }
3839 
le_scan_disable_work(struct work_struct * work)3840 static void le_scan_disable_work(struct work_struct *work)
3841 {
3842 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3843 					    le_scan_disable.work);
3844 	struct hci_request req;
3845 	int err;
3846 
3847 	BT_DBG("%s", hdev->name);
3848 
3849 	hci_req_init(&req, hdev);
3850 
3851 	hci_req_add_le_scan_disable(&req);
3852 
3853 	err = hci_req_run(&req, le_scan_disable_work_complete);
3854 	if (err)
3855 		BT_ERR("Disable LE scanning request failed: err %d", err);
3856 }
3857 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)3858 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3859 {
3860 	struct hci_dev *hdev = req->hdev;
3861 
3862 	/* If we're advertising or initiating an LE connection we can't
3863 	 * go ahead and change the random address at this time. This is
3864 	 * because the eventual initiator address used for the
3865 	 * subsequently created connection will be undefined (some
3866 	 * controllers use the new address and others the one we had
3867 	 * when the operation started).
3868 	 *
3869 	 * In this kind of scenario skip the update and let the random
3870 	 * address be updated at the next cycle.
3871 	 */
3872 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3873 	    hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3874 		BT_DBG("Deferring random address update");
3875 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3876 		return;
3877 	}
3878 
3879 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3880 }
3881 
hci_update_random_address(struct hci_request * req,bool require_privacy,u8 * own_addr_type)3882 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3883 			      u8 *own_addr_type)
3884 {
3885 	struct hci_dev *hdev = req->hdev;
3886 	int err;
3887 
3888 	/* If privacy is enabled use a resolvable private address. If
3889 	 * current RPA has expired or there is something else than
3890 	 * the current RPA in use, then generate a new one.
3891 	 */
3892 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3893 		int to;
3894 
3895 		*own_addr_type = ADDR_LE_DEV_RANDOM;
3896 
3897 		if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3898 		    !bacmp(&hdev->random_addr, &hdev->rpa))
3899 			return 0;
3900 
3901 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3902 		if (err < 0) {
3903 			BT_ERR("%s failed to generate new RPA", hdev->name);
3904 			return err;
3905 		}
3906 
3907 		set_random_addr(req, &hdev->rpa);
3908 
3909 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3910 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3911 
3912 		return 0;
3913 	}
3914 
3915 	/* In case of required privacy without resolvable private address,
3916 	 * use an unresolvable private address. This is useful for active
3917 	 * scanning and non-connectable advertising.
3918 	 */
3919 	if (require_privacy) {
3920 		bdaddr_t urpa;
3921 
3922 		get_random_bytes(&urpa, 6);
3923 		urpa.b[5] &= 0x3f;	/* Clear two most significant bits */
3924 
3925 		*own_addr_type = ADDR_LE_DEV_RANDOM;
3926 		set_random_addr(req, &urpa);
3927 		return 0;
3928 	}
3929 
3930 	/* If forcing static address is in use or there is no public
3931 	 * address use the static address as random address (but skip
3932 	 * the HCI command if the current random address is already the
3933 	 * static one.
3934 	 */
3935 	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3936 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3937 		*own_addr_type = ADDR_LE_DEV_RANDOM;
3938 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
3939 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3940 				    &hdev->static_addr);
3941 		return 0;
3942 	}
3943 
3944 	/* Neither privacy nor static address is being used so use a
3945 	 * public address.
3946 	 */
3947 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
3948 
3949 	return 0;
3950 }
3951 
3952 /* Copy the Identity Address of the controller.
3953  *
3954  * If the controller has a public BD_ADDR, then by default use that one.
3955  * If this is a LE only controller without a public address, default to
3956  * the static random address.
3957  *
3958  * For debugging purposes it is possible to force controllers with a
3959  * public address to use the static random address instead.
3960  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)3961 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3962 			       u8 *bdaddr_type)
3963 {
3964 	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3965 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3966 		bacpy(bdaddr, &hdev->static_addr);
3967 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3968 	} else {
3969 		bacpy(bdaddr, &hdev->bdaddr);
3970 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3971 	}
3972 }
3973 
3974 /* Alloc HCI device */
hci_alloc_dev(void)3975 struct hci_dev *hci_alloc_dev(void)
3976 {
3977 	struct hci_dev *hdev;
3978 
3979 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3980 	if (!hdev)
3981 		return NULL;
3982 
3983 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3984 	hdev->esco_type = (ESCO_HV1);
3985 	hdev->link_mode = (HCI_LM_ACCEPT);
3986 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3987 	hdev->io_capability = 0x03;	/* No Input No Output */
3988 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3989 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3990 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3991 
3992 	hdev->sniff_max_interval = 800;
3993 	hdev->sniff_min_interval = 80;
3994 
3995 	hdev->le_adv_channel_map = 0x07;
3996 	hdev->le_adv_min_interval = 0x0800;
3997 	hdev->le_adv_max_interval = 0x0800;
3998 	hdev->le_scan_interval = 0x0060;
3999 	hdev->le_scan_window = 0x0030;
4000 	hdev->le_conn_min_interval = 0x0028;
4001 	hdev->le_conn_max_interval = 0x0038;
4002 	hdev->le_conn_latency = 0x0000;
4003 	hdev->le_supv_timeout = 0x002a;
4004 
4005 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4006 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4007 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4008 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4009 
4010 	mutex_init(&hdev->lock);
4011 	mutex_init(&hdev->req_lock);
4012 
4013 	INIT_LIST_HEAD(&hdev->mgmt_pending);
4014 	INIT_LIST_HEAD(&hdev->blacklist);
4015 	INIT_LIST_HEAD(&hdev->whitelist);
4016 	INIT_LIST_HEAD(&hdev->uuids);
4017 	INIT_LIST_HEAD(&hdev->link_keys);
4018 	INIT_LIST_HEAD(&hdev->long_term_keys);
4019 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4020 	INIT_LIST_HEAD(&hdev->remote_oob_data);
4021 	INIT_LIST_HEAD(&hdev->le_white_list);
4022 	INIT_LIST_HEAD(&hdev->le_conn_params);
4023 	INIT_LIST_HEAD(&hdev->pend_le_conns);
4024 	INIT_LIST_HEAD(&hdev->pend_le_reports);
4025 	INIT_LIST_HEAD(&hdev->conn_hash.list);
4026 
4027 	INIT_WORK(&hdev->rx_work, hci_rx_work);
4028 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4029 	INIT_WORK(&hdev->tx_work, hci_tx_work);
4030 	INIT_WORK(&hdev->power_on, hci_power_on);
4031 
4032 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4033 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4034 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4035 
4036 	skb_queue_head_init(&hdev->rx_q);
4037 	skb_queue_head_init(&hdev->cmd_q);
4038 	skb_queue_head_init(&hdev->raw_q);
4039 
4040 	init_waitqueue_head(&hdev->req_wait_q);
4041 
4042 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4043 
4044 	hci_init_sysfs(hdev);
4045 	discovery_init(hdev);
4046 
4047 	return hdev;
4048 }
4049 EXPORT_SYMBOL(hci_alloc_dev);
4050 
4051 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)4052 void hci_free_dev(struct hci_dev *hdev)
4053 {
4054 	/* will free via device release */
4055 	put_device(&hdev->dev);
4056 }
4057 EXPORT_SYMBOL(hci_free_dev);
4058 
4059 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)4060 int hci_register_dev(struct hci_dev *hdev)
4061 {
4062 	int id, error;
4063 
4064 	if (!hdev->open || !hdev->close || !hdev->send)
4065 		return -EINVAL;
4066 
4067 	/* Do not allow HCI_AMP devices to register at index 0,
4068 	 * so the index can be used as the AMP controller ID.
4069 	 */
4070 	switch (hdev->dev_type) {
4071 	case HCI_BREDR:
4072 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4073 		break;
4074 	case HCI_AMP:
4075 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4076 		break;
4077 	default:
4078 		return -EINVAL;
4079 	}
4080 
4081 	if (id < 0)
4082 		return id;
4083 
4084 	sprintf(hdev->name, "hci%d", id);
4085 	hdev->id = id;
4086 
4087 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4088 
4089 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4090 					  WQ_MEM_RECLAIM, 1, hdev->name);
4091 	if (!hdev->workqueue) {
4092 		error = -ENOMEM;
4093 		goto err;
4094 	}
4095 
4096 	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4097 					      WQ_MEM_RECLAIM, 1, hdev->name);
4098 	if (!hdev->req_workqueue) {
4099 		destroy_workqueue(hdev->workqueue);
4100 		error = -ENOMEM;
4101 		goto err;
4102 	}
4103 
4104 	if (!IS_ERR_OR_NULL(bt_debugfs))
4105 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4106 
4107 	dev_set_name(&hdev->dev, "%s", hdev->name);
4108 
4109 	error = device_add(&hdev->dev);
4110 	if (error < 0)
4111 		goto err_wqueue;
4112 
4113 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4114 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4115 				    hdev);
4116 	if (hdev->rfkill) {
4117 		if (rfkill_register(hdev->rfkill) < 0) {
4118 			rfkill_destroy(hdev->rfkill);
4119 			hdev->rfkill = NULL;
4120 		}
4121 	}
4122 
4123 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4124 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
4125 
4126 	set_bit(HCI_SETUP, &hdev->dev_flags);
4127 	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4128 
4129 	if (hdev->dev_type == HCI_BREDR) {
4130 		/* Assume BR/EDR support until proven otherwise (such as
4131 		 * through reading supported features during init.
4132 		 */
4133 		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4134 	}
4135 
4136 	write_lock(&hci_dev_list_lock);
4137 	list_add(&hdev->list, &hci_dev_list);
4138 	write_unlock(&hci_dev_list_lock);
4139 
4140 	/* Devices that are marked for raw-only usage are unconfigured
4141 	 * and should not be included in normal operation.
4142 	 */
4143 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4144 		set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4145 
4146 	hci_notify(hdev, HCI_DEV_REG);
4147 	hci_dev_hold(hdev);
4148 
4149 	queue_work(hdev->req_workqueue, &hdev->power_on);
4150 
4151 	return id;
4152 
4153 err_wqueue:
4154 	destroy_workqueue(hdev->workqueue);
4155 	destroy_workqueue(hdev->req_workqueue);
4156 err:
4157 	ida_simple_remove(&hci_index_ida, hdev->id);
4158 
4159 	return error;
4160 }
4161 EXPORT_SYMBOL(hci_register_dev);
4162 
4163 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)4164 void hci_unregister_dev(struct hci_dev *hdev)
4165 {
4166 	int i, id;
4167 
4168 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4169 
4170 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4171 
4172 	id = hdev->id;
4173 
4174 	write_lock(&hci_dev_list_lock);
4175 	list_del(&hdev->list);
4176 	write_unlock(&hci_dev_list_lock);
4177 
4178 	hci_dev_do_close(hdev);
4179 
4180 	for (i = 0; i < NUM_REASSEMBLY; i++)
4181 		kfree_skb(hdev->reassembly[i]);
4182 
4183 	cancel_work_sync(&hdev->power_on);
4184 
4185 	if (!test_bit(HCI_INIT, &hdev->flags) &&
4186 	    !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4187 	    !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4188 		hci_dev_lock(hdev);
4189 		mgmt_index_removed(hdev);
4190 		hci_dev_unlock(hdev);
4191 	}
4192 
4193 	/* mgmt_index_removed should take care of emptying the
4194 	 * pending list */
4195 	BUG_ON(!list_empty(&hdev->mgmt_pending));
4196 
4197 	hci_notify(hdev, HCI_DEV_UNREG);
4198 
4199 	if (hdev->rfkill) {
4200 		rfkill_unregister(hdev->rfkill);
4201 		rfkill_destroy(hdev->rfkill);
4202 	}
4203 
4204 	smp_unregister(hdev);
4205 
4206 	device_del(&hdev->dev);
4207 
4208 	debugfs_remove_recursive(hdev->debugfs);
4209 
4210 	destroy_workqueue(hdev->workqueue);
4211 	destroy_workqueue(hdev->req_workqueue);
4212 
4213 	hci_dev_lock(hdev);
4214 	hci_bdaddr_list_clear(&hdev->blacklist);
4215 	hci_bdaddr_list_clear(&hdev->whitelist);
4216 	hci_uuids_clear(hdev);
4217 	hci_link_keys_clear(hdev);
4218 	hci_smp_ltks_clear(hdev);
4219 	hci_smp_irks_clear(hdev);
4220 	hci_remote_oob_data_clear(hdev);
4221 	hci_bdaddr_list_clear(&hdev->le_white_list);
4222 	hci_conn_params_clear_all(hdev);
4223 	hci_dev_unlock(hdev);
4224 
4225 	hci_dev_put(hdev);
4226 
4227 	ida_simple_remove(&hci_index_ida, id);
4228 }
4229 EXPORT_SYMBOL(hci_unregister_dev);
4230 
4231 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)4232 int hci_suspend_dev(struct hci_dev *hdev)
4233 {
4234 	hci_notify(hdev, HCI_DEV_SUSPEND);
4235 	return 0;
4236 }
4237 EXPORT_SYMBOL(hci_suspend_dev);
4238 
4239 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)4240 int hci_resume_dev(struct hci_dev *hdev)
4241 {
4242 	hci_notify(hdev, HCI_DEV_RESUME);
4243 	return 0;
4244 }
4245 EXPORT_SYMBOL(hci_resume_dev);
4246 
4247 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)4248 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4249 {
4250 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4251 		      && !test_bit(HCI_INIT, &hdev->flags))) {
4252 		kfree_skb(skb);
4253 		return -ENXIO;
4254 	}
4255 
4256 	/* Incoming skb */
4257 	bt_cb(skb)->incoming = 1;
4258 
4259 	/* Time stamp */
4260 	__net_timestamp(skb);
4261 
4262 	skb_queue_tail(&hdev->rx_q, skb);
4263 	queue_work(hdev->workqueue, &hdev->rx_work);
4264 
4265 	return 0;
4266 }
4267 EXPORT_SYMBOL(hci_recv_frame);
4268 
hci_reassembly(struct hci_dev * hdev,int type,void * data,int count,__u8 index)4269 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4270 			  int count, __u8 index)
4271 {
4272 	int len = 0;
4273 	int hlen = 0;
4274 	int remain = count;
4275 	struct sk_buff *skb;
4276 	struct bt_skb_cb *scb;
4277 
4278 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4279 	    index >= NUM_REASSEMBLY)
4280 		return -EILSEQ;
4281 
4282 	skb = hdev->reassembly[index];
4283 
4284 	if (!skb) {
4285 		switch (type) {
4286 		case HCI_ACLDATA_PKT:
4287 			len = HCI_MAX_FRAME_SIZE;
4288 			hlen = HCI_ACL_HDR_SIZE;
4289 			break;
4290 		case HCI_EVENT_PKT:
4291 			len = HCI_MAX_EVENT_SIZE;
4292 			hlen = HCI_EVENT_HDR_SIZE;
4293 			break;
4294 		case HCI_SCODATA_PKT:
4295 			len = HCI_MAX_SCO_SIZE;
4296 			hlen = HCI_SCO_HDR_SIZE;
4297 			break;
4298 		}
4299 
4300 		skb = bt_skb_alloc(len, GFP_ATOMIC);
4301 		if (!skb)
4302 			return -ENOMEM;
4303 
4304 		scb = (void *) skb->cb;
4305 		scb->expect = hlen;
4306 		scb->pkt_type = type;
4307 
4308 		hdev->reassembly[index] = skb;
4309 	}
4310 
4311 	while (count) {
4312 		scb = (void *) skb->cb;
4313 		len = min_t(uint, scb->expect, count);
4314 
4315 		memcpy(skb_put(skb, len), data, len);
4316 
4317 		count -= len;
4318 		data += len;
4319 		scb->expect -= len;
4320 		remain = count;
4321 
4322 		switch (type) {
4323 		case HCI_EVENT_PKT:
4324 			if (skb->len == HCI_EVENT_HDR_SIZE) {
4325 				struct hci_event_hdr *h = hci_event_hdr(skb);
4326 				scb->expect = h->plen;
4327 
4328 				if (skb_tailroom(skb) < scb->expect) {
4329 					kfree_skb(skb);
4330 					hdev->reassembly[index] = NULL;
4331 					return -ENOMEM;
4332 				}
4333 			}
4334 			break;
4335 
4336 		case HCI_ACLDATA_PKT:
4337 			if (skb->len  == HCI_ACL_HDR_SIZE) {
4338 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
4339 				scb->expect = __le16_to_cpu(h->dlen);
4340 
4341 				if (skb_tailroom(skb) < scb->expect) {
4342 					kfree_skb(skb);
4343 					hdev->reassembly[index] = NULL;
4344 					return -ENOMEM;
4345 				}
4346 			}
4347 			break;
4348 
4349 		case HCI_SCODATA_PKT:
4350 			if (skb->len == HCI_SCO_HDR_SIZE) {
4351 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
4352 				scb->expect = h->dlen;
4353 
4354 				if (skb_tailroom(skb) < scb->expect) {
4355 					kfree_skb(skb);
4356 					hdev->reassembly[index] = NULL;
4357 					return -ENOMEM;
4358 				}
4359 			}
4360 			break;
4361 		}
4362 
4363 		if (scb->expect == 0) {
4364 			/* Complete frame */
4365 
4366 			bt_cb(skb)->pkt_type = type;
4367 			hci_recv_frame(hdev, skb);
4368 
4369 			hdev->reassembly[index] = NULL;
4370 			return remain;
4371 		}
4372 	}
4373 
4374 	return remain;
4375 }
4376 
4377 #define STREAM_REASSEMBLY 0
4378 
hci_recv_stream_fragment(struct hci_dev * hdev,void * data,int count)4379 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4380 {
4381 	int type;
4382 	int rem = 0;
4383 
4384 	while (count) {
4385 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4386 
4387 		if (!skb) {
4388 			struct { char type; } *pkt;
4389 
4390 			/* Start of the frame */
4391 			pkt = data;
4392 			type = pkt->type;
4393 
4394 			data++;
4395 			count--;
4396 		} else
4397 			type = bt_cb(skb)->pkt_type;
4398 
4399 		rem = hci_reassembly(hdev, type, data, count,
4400 				     STREAM_REASSEMBLY);
4401 		if (rem < 0)
4402 			return rem;
4403 
4404 		data += (count - rem);
4405 		count = rem;
4406 	}
4407 
4408 	return rem;
4409 }
4410 EXPORT_SYMBOL(hci_recv_stream_fragment);
4411 
4412 /* ---- Interface to upper protocols ---- */
4413 
hci_register_cb(struct hci_cb * cb)4414 int hci_register_cb(struct hci_cb *cb)
4415 {
4416 	BT_DBG("%p name %s", cb, cb->name);
4417 
4418 	write_lock(&hci_cb_list_lock);
4419 	list_add(&cb->list, &hci_cb_list);
4420 	write_unlock(&hci_cb_list_lock);
4421 
4422 	return 0;
4423 }
4424 EXPORT_SYMBOL(hci_register_cb);
4425 
hci_unregister_cb(struct hci_cb * cb)4426 int hci_unregister_cb(struct hci_cb *cb)
4427 {
4428 	BT_DBG("%p name %s", cb, cb->name);
4429 
4430 	write_lock(&hci_cb_list_lock);
4431 	list_del(&cb->list);
4432 	write_unlock(&hci_cb_list_lock);
4433 
4434 	return 0;
4435 }
4436 EXPORT_SYMBOL(hci_unregister_cb);
4437 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)4438 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4439 {
4440 	int err;
4441 
4442 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4443 
4444 	/* Time stamp */
4445 	__net_timestamp(skb);
4446 
4447 	/* Send copy to monitor */
4448 	hci_send_to_monitor(hdev, skb);
4449 
4450 	if (atomic_read(&hdev->promisc)) {
4451 		/* Send copy to the sockets */
4452 		hci_send_to_sock(hdev, skb);
4453 	}
4454 
4455 	/* Get rid of skb owner, prior to sending to the driver. */
4456 	skb_orphan(skb);
4457 
4458 	err = hdev->send(hdev, skb);
4459 	if (err < 0) {
4460 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4461 		kfree_skb(skb);
4462 	}
4463 }
4464 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)4465 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4466 {
4467 	skb_queue_head_init(&req->cmd_q);
4468 	req->hdev = hdev;
4469 	req->err = 0;
4470 }
4471 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)4472 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4473 {
4474 	struct hci_dev *hdev = req->hdev;
4475 	struct sk_buff *skb;
4476 	unsigned long flags;
4477 
4478 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4479 
4480 	/* If an error occured during request building, remove all HCI
4481 	 * commands queued on the HCI request queue.
4482 	 */
4483 	if (req->err) {
4484 		skb_queue_purge(&req->cmd_q);
4485 		return req->err;
4486 	}
4487 
4488 	/* Do not allow empty requests */
4489 	if (skb_queue_empty(&req->cmd_q))
4490 		return -ENODATA;
4491 
4492 	skb = skb_peek_tail(&req->cmd_q);
4493 	bt_cb(skb)->req.complete = complete;
4494 
4495 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4496 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4497 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4498 
4499 	queue_work(hdev->workqueue, &hdev->cmd_work);
4500 
4501 	return 0;
4502 }
4503 
hci_req_pending(struct hci_dev * hdev)4504 bool hci_req_pending(struct hci_dev *hdev)
4505 {
4506 	return (hdev->req_status == HCI_REQ_PEND);
4507 }
4508 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)4509 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4510 				       u32 plen, const void *param)
4511 {
4512 	int len = HCI_COMMAND_HDR_SIZE + plen;
4513 	struct hci_command_hdr *hdr;
4514 	struct sk_buff *skb;
4515 
4516 	skb = bt_skb_alloc(len, GFP_ATOMIC);
4517 	if (!skb)
4518 		return NULL;
4519 
4520 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4521 	hdr->opcode = cpu_to_le16(opcode);
4522 	hdr->plen   = plen;
4523 
4524 	if (plen)
4525 		memcpy(skb_put(skb, plen), param, plen);
4526 
4527 	BT_DBG("skb len %d", skb->len);
4528 
4529 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4530 	bt_cb(skb)->opcode = opcode;
4531 
4532 	return skb;
4533 }
4534 
4535 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)4536 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4537 		 const void *param)
4538 {
4539 	struct sk_buff *skb;
4540 
4541 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4542 
4543 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
4544 	if (!skb) {
4545 		BT_ERR("%s no memory for command", hdev->name);
4546 		return -ENOMEM;
4547 	}
4548 
4549 	/* Stand-alone HCI commands must be flaged as
4550 	 * single-command requests.
4551 	 */
4552 	bt_cb(skb)->req.start = true;
4553 
4554 	skb_queue_tail(&hdev->cmd_q, skb);
4555 	queue_work(hdev->workqueue, &hdev->cmd_work);
4556 
4557 	return 0;
4558 }
4559 
4560 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)4561 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4562 		    const void *param, u8 event)
4563 {
4564 	struct hci_dev *hdev = req->hdev;
4565 	struct sk_buff *skb;
4566 
4567 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4568 
4569 	/* If an error occured during request building, there is no point in
4570 	 * queueing the HCI command. We can simply return.
4571 	 */
4572 	if (req->err)
4573 		return;
4574 
4575 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
4576 	if (!skb) {
4577 		BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4578 		       hdev->name, opcode);
4579 		req->err = -ENOMEM;
4580 		return;
4581 	}
4582 
4583 	if (skb_queue_empty(&req->cmd_q))
4584 		bt_cb(skb)->req.start = true;
4585 
4586 	bt_cb(skb)->req.event = event;
4587 
4588 	skb_queue_tail(&req->cmd_q, skb);
4589 }
4590 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)4591 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4592 		 const void *param)
4593 {
4594 	hci_req_add_ev(req, opcode, plen, param, 0);
4595 }
4596 
4597 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)4598 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4599 {
4600 	struct hci_command_hdr *hdr;
4601 
4602 	if (!hdev->sent_cmd)
4603 		return NULL;
4604 
4605 	hdr = (void *) hdev->sent_cmd->data;
4606 
4607 	if (hdr->opcode != cpu_to_le16(opcode))
4608 		return NULL;
4609 
4610 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4611 
4612 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4613 }
4614 
4615 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)4616 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4617 {
4618 	struct hci_acl_hdr *hdr;
4619 	int len = skb->len;
4620 
4621 	skb_push(skb, HCI_ACL_HDR_SIZE);
4622 	skb_reset_transport_header(skb);
4623 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4624 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4625 	hdr->dlen   = cpu_to_le16(len);
4626 }
4627 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)4628 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4629 			  struct sk_buff *skb, __u16 flags)
4630 {
4631 	struct hci_conn *conn = chan->conn;
4632 	struct hci_dev *hdev = conn->hdev;
4633 	struct sk_buff *list;
4634 
4635 	skb->len = skb_headlen(skb);
4636 	skb->data_len = 0;
4637 
4638 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4639 
4640 	switch (hdev->dev_type) {
4641 	case HCI_BREDR:
4642 		hci_add_acl_hdr(skb, conn->handle, flags);
4643 		break;
4644 	case HCI_AMP:
4645 		hci_add_acl_hdr(skb, chan->handle, flags);
4646 		break;
4647 	default:
4648 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4649 		return;
4650 	}
4651 
4652 	list = skb_shinfo(skb)->frag_list;
4653 	if (!list) {
4654 		/* Non fragmented */
4655 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4656 
4657 		skb_queue_tail(queue, skb);
4658 	} else {
4659 		/* Fragmented */
4660 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4661 
4662 		skb_shinfo(skb)->frag_list = NULL;
4663 
4664 		/* Queue all fragments atomically */
4665 		spin_lock(&queue->lock);
4666 
4667 		__skb_queue_tail(queue, skb);
4668 
4669 		flags &= ~ACL_START;
4670 		flags |= ACL_CONT;
4671 		do {
4672 			skb = list; list = list->next;
4673 
4674 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4675 			hci_add_acl_hdr(skb, conn->handle, flags);
4676 
4677 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4678 
4679 			__skb_queue_tail(queue, skb);
4680 		} while (list);
4681 
4682 		spin_unlock(&queue->lock);
4683 	}
4684 }
4685 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)4686 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4687 {
4688 	struct hci_dev *hdev = chan->conn->hdev;
4689 
4690 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4691 
4692 	hci_queue_acl(chan, &chan->data_q, skb, flags);
4693 
4694 	queue_work(hdev->workqueue, &hdev->tx_work);
4695 }
4696 
4697 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)4698 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4699 {
4700 	struct hci_dev *hdev = conn->hdev;
4701 	struct hci_sco_hdr hdr;
4702 
4703 	BT_DBG("%s len %d", hdev->name, skb->len);
4704 
4705 	hdr.handle = cpu_to_le16(conn->handle);
4706 	hdr.dlen   = skb->len;
4707 
4708 	skb_push(skb, HCI_SCO_HDR_SIZE);
4709 	skb_reset_transport_header(skb);
4710 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4711 
4712 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4713 
4714 	skb_queue_tail(&conn->data_q, skb);
4715 	queue_work(hdev->workqueue, &hdev->tx_work);
4716 }
4717 
4718 /* ---- HCI TX task (outgoing data) ---- */
4719 
4720 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)4721 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4722 				     int *quote)
4723 {
4724 	struct hci_conn_hash *h = &hdev->conn_hash;
4725 	struct hci_conn *conn = NULL, *c;
4726 	unsigned int num = 0, min = ~0;
4727 
4728 	/* We don't have to lock device here. Connections are always
4729 	 * added and removed with TX task disabled. */
4730 
4731 	rcu_read_lock();
4732 
4733 	list_for_each_entry_rcu(c, &h->list, list) {
4734 		if (c->type != type || skb_queue_empty(&c->data_q))
4735 			continue;
4736 
4737 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4738 			continue;
4739 
4740 		num++;
4741 
4742 		if (c->sent < min) {
4743 			min  = c->sent;
4744 			conn = c;
4745 		}
4746 
4747 		if (hci_conn_num(hdev, type) == num)
4748 			break;
4749 	}
4750 
4751 	rcu_read_unlock();
4752 
4753 	if (conn) {
4754 		int cnt, q;
4755 
4756 		switch (conn->type) {
4757 		case ACL_LINK:
4758 			cnt = hdev->acl_cnt;
4759 			break;
4760 		case SCO_LINK:
4761 		case ESCO_LINK:
4762 			cnt = hdev->sco_cnt;
4763 			break;
4764 		case LE_LINK:
4765 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4766 			break;
4767 		default:
4768 			cnt = 0;
4769 			BT_ERR("Unknown link type");
4770 		}
4771 
4772 		q = cnt / num;
4773 		*quote = q ? q : 1;
4774 	} else
4775 		*quote = 0;
4776 
4777 	BT_DBG("conn %p quote %d", conn, *quote);
4778 	return conn;
4779 }
4780 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)4781 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4782 {
4783 	struct hci_conn_hash *h = &hdev->conn_hash;
4784 	struct hci_conn *c;
4785 
4786 	BT_ERR("%s link tx timeout", hdev->name);
4787 
4788 	rcu_read_lock();
4789 
4790 	/* Kill stalled connections */
4791 	list_for_each_entry_rcu(c, &h->list, list) {
4792 		if (c->type == type && c->sent) {
4793 			BT_ERR("%s killing stalled connection %pMR",
4794 			       hdev->name, &c->dst);
4795 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4796 		}
4797 	}
4798 
4799 	rcu_read_unlock();
4800 }
4801 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)4802 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4803 				      int *quote)
4804 {
4805 	struct hci_conn_hash *h = &hdev->conn_hash;
4806 	struct hci_chan *chan = NULL;
4807 	unsigned int num = 0, min = ~0, cur_prio = 0;
4808 	struct hci_conn *conn;
4809 	int cnt, q, conn_num = 0;
4810 
4811 	BT_DBG("%s", hdev->name);
4812 
4813 	rcu_read_lock();
4814 
4815 	list_for_each_entry_rcu(conn, &h->list, list) {
4816 		struct hci_chan *tmp;
4817 
4818 		if (conn->type != type)
4819 			continue;
4820 
4821 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4822 			continue;
4823 
4824 		conn_num++;
4825 
4826 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4827 			struct sk_buff *skb;
4828 
4829 			if (skb_queue_empty(&tmp->data_q))
4830 				continue;
4831 
4832 			skb = skb_peek(&tmp->data_q);
4833 			if (skb->priority < cur_prio)
4834 				continue;
4835 
4836 			if (skb->priority > cur_prio) {
4837 				num = 0;
4838 				min = ~0;
4839 				cur_prio = skb->priority;
4840 			}
4841 
4842 			num++;
4843 
4844 			if (conn->sent < min) {
4845 				min  = conn->sent;
4846 				chan = tmp;
4847 			}
4848 		}
4849 
4850 		if (hci_conn_num(hdev, type) == conn_num)
4851 			break;
4852 	}
4853 
4854 	rcu_read_unlock();
4855 
4856 	if (!chan)
4857 		return NULL;
4858 
4859 	switch (chan->conn->type) {
4860 	case ACL_LINK:
4861 		cnt = hdev->acl_cnt;
4862 		break;
4863 	case AMP_LINK:
4864 		cnt = hdev->block_cnt;
4865 		break;
4866 	case SCO_LINK:
4867 	case ESCO_LINK:
4868 		cnt = hdev->sco_cnt;
4869 		break;
4870 	case LE_LINK:
4871 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4872 		break;
4873 	default:
4874 		cnt = 0;
4875 		BT_ERR("Unknown link type");
4876 	}
4877 
4878 	q = cnt / num;
4879 	*quote = q ? q : 1;
4880 	BT_DBG("chan %p quote %d", chan, *quote);
4881 	return chan;
4882 }
4883 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)4884 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4885 {
4886 	struct hci_conn_hash *h = &hdev->conn_hash;
4887 	struct hci_conn *conn;
4888 	int num = 0;
4889 
4890 	BT_DBG("%s", hdev->name);
4891 
4892 	rcu_read_lock();
4893 
4894 	list_for_each_entry_rcu(conn, &h->list, list) {
4895 		struct hci_chan *chan;
4896 
4897 		if (conn->type != type)
4898 			continue;
4899 
4900 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4901 			continue;
4902 
4903 		num++;
4904 
4905 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4906 			struct sk_buff *skb;
4907 
4908 			if (chan->sent) {
4909 				chan->sent = 0;
4910 				continue;
4911 			}
4912 
4913 			if (skb_queue_empty(&chan->data_q))
4914 				continue;
4915 
4916 			skb = skb_peek(&chan->data_q);
4917 			if (skb->priority >= HCI_PRIO_MAX - 1)
4918 				continue;
4919 
4920 			skb->priority = HCI_PRIO_MAX - 1;
4921 
4922 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4923 			       skb->priority);
4924 		}
4925 
4926 		if (hci_conn_num(hdev, type) == num)
4927 			break;
4928 	}
4929 
4930 	rcu_read_unlock();
4931 
4932 }
4933 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)4934 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4935 {
4936 	/* Calculate count of blocks used by this packet */
4937 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4938 }
4939 
__check_timeout(struct hci_dev * hdev,unsigned int cnt)4940 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4941 {
4942 	if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4943 		/* ACL tx timeout must be longer than maximum
4944 		 * link supervision timeout (40.9 seconds) */
4945 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4946 				       HCI_ACL_TX_TIMEOUT))
4947 			hci_link_tx_to(hdev, ACL_LINK);
4948 	}
4949 }
4950 
hci_sched_acl_pkt(struct hci_dev * hdev)4951 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4952 {
4953 	unsigned int cnt = hdev->acl_cnt;
4954 	struct hci_chan *chan;
4955 	struct sk_buff *skb;
4956 	int quote;
4957 
4958 	__check_timeout(hdev, cnt);
4959 
4960 	while (hdev->acl_cnt &&
4961 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4962 		u32 priority = (skb_peek(&chan->data_q))->priority;
4963 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4964 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4965 			       skb->len, skb->priority);
4966 
4967 			/* Stop if priority has changed */
4968 			if (skb->priority < priority)
4969 				break;
4970 
4971 			skb = skb_dequeue(&chan->data_q);
4972 
4973 			hci_conn_enter_active_mode(chan->conn,
4974 						   bt_cb(skb)->force_active);
4975 
4976 			hci_send_frame(hdev, skb);
4977 			hdev->acl_last_tx = jiffies;
4978 
4979 			hdev->acl_cnt--;
4980 			chan->sent++;
4981 			chan->conn->sent++;
4982 		}
4983 	}
4984 
4985 	if (cnt != hdev->acl_cnt)
4986 		hci_prio_recalculate(hdev, ACL_LINK);
4987 }
4988 
hci_sched_acl_blk(struct hci_dev * hdev)4989 static void hci_sched_acl_blk(struct hci_dev *hdev)
4990 {
4991 	unsigned int cnt = hdev->block_cnt;
4992 	struct hci_chan *chan;
4993 	struct sk_buff *skb;
4994 	int quote;
4995 	u8 type;
4996 
4997 	__check_timeout(hdev, cnt);
4998 
4999 	BT_DBG("%s", hdev->name);
5000 
5001 	if (hdev->dev_type == HCI_AMP)
5002 		type = AMP_LINK;
5003 	else
5004 		type = ACL_LINK;
5005 
5006 	while (hdev->block_cnt > 0 &&
5007 	       (chan = hci_chan_sent(hdev, type, &quote))) {
5008 		u32 priority = (skb_peek(&chan->data_q))->priority;
5009 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5010 			int blocks;
5011 
5012 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5013 			       skb->len, skb->priority);
5014 
5015 			/* Stop if priority has changed */
5016 			if (skb->priority < priority)
5017 				break;
5018 
5019 			skb = skb_dequeue(&chan->data_q);
5020 
5021 			blocks = __get_blocks(hdev, skb);
5022 			if (blocks > hdev->block_cnt)
5023 				return;
5024 
5025 			hci_conn_enter_active_mode(chan->conn,
5026 						   bt_cb(skb)->force_active);
5027 
5028 			hci_send_frame(hdev, skb);
5029 			hdev->acl_last_tx = jiffies;
5030 
5031 			hdev->block_cnt -= blocks;
5032 			quote -= blocks;
5033 
5034 			chan->sent += blocks;
5035 			chan->conn->sent += blocks;
5036 		}
5037 	}
5038 
5039 	if (cnt != hdev->block_cnt)
5040 		hci_prio_recalculate(hdev, type);
5041 }
5042 
hci_sched_acl(struct hci_dev * hdev)5043 static void hci_sched_acl(struct hci_dev *hdev)
5044 {
5045 	BT_DBG("%s", hdev->name);
5046 
5047 	/* No ACL link over BR/EDR controller */
5048 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5049 		return;
5050 
5051 	/* No AMP link over AMP controller */
5052 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5053 		return;
5054 
5055 	switch (hdev->flow_ctl_mode) {
5056 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
5057 		hci_sched_acl_pkt(hdev);
5058 		break;
5059 
5060 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5061 		hci_sched_acl_blk(hdev);
5062 		break;
5063 	}
5064 }
5065 
5066 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)5067 static void hci_sched_sco(struct hci_dev *hdev)
5068 {
5069 	struct hci_conn *conn;
5070 	struct sk_buff *skb;
5071 	int quote;
5072 
5073 	BT_DBG("%s", hdev->name);
5074 
5075 	if (!hci_conn_num(hdev, SCO_LINK))
5076 		return;
5077 
5078 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5079 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5080 			BT_DBG("skb %p len %d", skb, skb->len);
5081 			hci_send_frame(hdev, skb);
5082 
5083 			conn->sent++;
5084 			if (conn->sent == ~0)
5085 				conn->sent = 0;
5086 		}
5087 	}
5088 }
5089 
hci_sched_esco(struct hci_dev * hdev)5090 static void hci_sched_esco(struct hci_dev *hdev)
5091 {
5092 	struct hci_conn *conn;
5093 	struct sk_buff *skb;
5094 	int quote;
5095 
5096 	BT_DBG("%s", hdev->name);
5097 
5098 	if (!hci_conn_num(hdev, ESCO_LINK))
5099 		return;
5100 
5101 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5102 						     &quote))) {
5103 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5104 			BT_DBG("skb %p len %d", skb, skb->len);
5105 			hci_send_frame(hdev, skb);
5106 
5107 			conn->sent++;
5108 			if (conn->sent == ~0)
5109 				conn->sent = 0;
5110 		}
5111 	}
5112 }
5113 
hci_sched_le(struct hci_dev * hdev)5114 static void hci_sched_le(struct hci_dev *hdev)
5115 {
5116 	struct hci_chan *chan;
5117 	struct sk_buff *skb;
5118 	int quote, cnt, tmp;
5119 
5120 	BT_DBG("%s", hdev->name);
5121 
5122 	if (!hci_conn_num(hdev, LE_LINK))
5123 		return;
5124 
5125 	if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5126 		/* LE tx timeout must be longer than maximum
5127 		 * link supervision timeout (40.9 seconds) */
5128 		if (!hdev->le_cnt && hdev->le_pkts &&
5129 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
5130 			hci_link_tx_to(hdev, LE_LINK);
5131 	}
5132 
5133 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5134 	tmp = cnt;
5135 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5136 		u32 priority = (skb_peek(&chan->data_q))->priority;
5137 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
5138 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5139 			       skb->len, skb->priority);
5140 
5141 			/* Stop if priority has changed */
5142 			if (skb->priority < priority)
5143 				break;
5144 
5145 			skb = skb_dequeue(&chan->data_q);
5146 
5147 			hci_send_frame(hdev, skb);
5148 			hdev->le_last_tx = jiffies;
5149 
5150 			cnt--;
5151 			chan->sent++;
5152 			chan->conn->sent++;
5153 		}
5154 	}
5155 
5156 	if (hdev->le_pkts)
5157 		hdev->le_cnt = cnt;
5158 	else
5159 		hdev->acl_cnt = cnt;
5160 
5161 	if (cnt != tmp)
5162 		hci_prio_recalculate(hdev, LE_LINK);
5163 }
5164 
hci_tx_work(struct work_struct * work)5165 static void hci_tx_work(struct work_struct *work)
5166 {
5167 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5168 	struct sk_buff *skb;
5169 
5170 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5171 	       hdev->sco_cnt, hdev->le_cnt);
5172 
5173 	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5174 		/* Schedule queues and send stuff to HCI driver */
5175 		hci_sched_acl(hdev);
5176 		hci_sched_sco(hdev);
5177 		hci_sched_esco(hdev);
5178 		hci_sched_le(hdev);
5179 	}
5180 
5181 	/* Send next queued raw (unknown type) packet */
5182 	while ((skb = skb_dequeue(&hdev->raw_q)))
5183 		hci_send_frame(hdev, skb);
5184 }
5185 
5186 /* ----- HCI RX task (incoming data processing) ----- */
5187 
5188 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)5189 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5190 {
5191 	struct hci_acl_hdr *hdr = (void *) skb->data;
5192 	struct hci_conn *conn;
5193 	__u16 handle, flags;
5194 
5195 	skb_pull(skb, HCI_ACL_HDR_SIZE);
5196 
5197 	handle = __le16_to_cpu(hdr->handle);
5198 	flags  = hci_flags(handle);
5199 	handle = hci_handle(handle);
5200 
5201 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5202 	       handle, flags);
5203 
5204 	hdev->stat.acl_rx++;
5205 
5206 	hci_dev_lock(hdev);
5207 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5208 	hci_dev_unlock(hdev);
5209 
5210 	if (conn) {
5211 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5212 
5213 		/* Send to upper protocol */
5214 		l2cap_recv_acldata(conn, skb, flags);
5215 		return;
5216 	} else {
5217 		BT_ERR("%s ACL packet for unknown connection handle %d",
5218 		       hdev->name, handle);
5219 	}
5220 
5221 	kfree_skb(skb);
5222 }
5223 
5224 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)5225 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5226 {
5227 	struct hci_sco_hdr *hdr = (void *) skb->data;
5228 	struct hci_conn *conn;
5229 	__u16 handle;
5230 
5231 	skb_pull(skb, HCI_SCO_HDR_SIZE);
5232 
5233 	handle = __le16_to_cpu(hdr->handle);
5234 
5235 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5236 
5237 	hdev->stat.sco_rx++;
5238 
5239 	hci_dev_lock(hdev);
5240 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5241 	hci_dev_unlock(hdev);
5242 
5243 	if (conn) {
5244 		/* Send to upper protocol */
5245 		sco_recv_scodata(conn, skb);
5246 		return;
5247 	} else {
5248 		BT_ERR("%s SCO packet for unknown connection handle %d",
5249 		       hdev->name, handle);
5250 	}
5251 
5252 	kfree_skb(skb);
5253 }
5254 
hci_req_is_complete(struct hci_dev * hdev)5255 static bool hci_req_is_complete(struct hci_dev *hdev)
5256 {
5257 	struct sk_buff *skb;
5258 
5259 	skb = skb_peek(&hdev->cmd_q);
5260 	if (!skb)
5261 		return true;
5262 
5263 	return bt_cb(skb)->req.start;
5264 }
5265 
hci_resend_last(struct hci_dev * hdev)5266 static void hci_resend_last(struct hci_dev *hdev)
5267 {
5268 	struct hci_command_hdr *sent;
5269 	struct sk_buff *skb;
5270 	u16 opcode;
5271 
5272 	if (!hdev->sent_cmd)
5273 		return;
5274 
5275 	sent = (void *) hdev->sent_cmd->data;
5276 	opcode = __le16_to_cpu(sent->opcode);
5277 	if (opcode == HCI_OP_RESET)
5278 		return;
5279 
5280 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5281 	if (!skb)
5282 		return;
5283 
5284 	skb_queue_head(&hdev->cmd_q, skb);
5285 	queue_work(hdev->workqueue, &hdev->cmd_work);
5286 }
5287 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status)5288 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5289 {
5290 	hci_req_complete_t req_complete = NULL;
5291 	struct sk_buff *skb;
5292 	unsigned long flags;
5293 
5294 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5295 
5296 	/* If the completed command doesn't match the last one that was
5297 	 * sent we need to do special handling of it.
5298 	 */
5299 	if (!hci_sent_cmd_data(hdev, opcode)) {
5300 		/* Some CSR based controllers generate a spontaneous
5301 		 * reset complete event during init and any pending
5302 		 * command will never be completed. In such a case we
5303 		 * need to resend whatever was the last sent
5304 		 * command.
5305 		 */
5306 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5307 			hci_resend_last(hdev);
5308 
5309 		return;
5310 	}
5311 
5312 	/* If the command succeeded and there's still more commands in
5313 	 * this request the request is not yet complete.
5314 	 */
5315 	if (!status && !hci_req_is_complete(hdev))
5316 		return;
5317 
5318 	/* If this was the last command in a request the complete
5319 	 * callback would be found in hdev->sent_cmd instead of the
5320 	 * command queue (hdev->cmd_q).
5321 	 */
5322 	if (hdev->sent_cmd) {
5323 		req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5324 
5325 		if (req_complete) {
5326 			/* We must set the complete callback to NULL to
5327 			 * avoid calling the callback more than once if
5328 			 * this function gets called again.
5329 			 */
5330 			bt_cb(hdev->sent_cmd)->req.complete = NULL;
5331 
5332 			goto call_complete;
5333 		}
5334 	}
5335 
5336 	/* Remove all pending commands belonging to this request */
5337 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5338 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5339 		if (bt_cb(skb)->req.start) {
5340 			__skb_queue_head(&hdev->cmd_q, skb);
5341 			break;
5342 		}
5343 
5344 		req_complete = bt_cb(skb)->req.complete;
5345 		kfree_skb(skb);
5346 	}
5347 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5348 
5349 call_complete:
5350 	if (req_complete)
5351 		req_complete(hdev, status);
5352 }
5353 
hci_rx_work(struct work_struct * work)5354 static void hci_rx_work(struct work_struct *work)
5355 {
5356 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5357 	struct sk_buff *skb;
5358 
5359 	BT_DBG("%s", hdev->name);
5360 
5361 	while ((skb = skb_dequeue(&hdev->rx_q))) {
5362 		/* Send copy to monitor */
5363 		hci_send_to_monitor(hdev, skb);
5364 
5365 		if (atomic_read(&hdev->promisc)) {
5366 			/* Send copy to the sockets */
5367 			hci_send_to_sock(hdev, skb);
5368 		}
5369 
5370 		if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5371 			kfree_skb(skb);
5372 			continue;
5373 		}
5374 
5375 		if (test_bit(HCI_INIT, &hdev->flags)) {
5376 			/* Don't process data packets in this states. */
5377 			switch (bt_cb(skb)->pkt_type) {
5378 			case HCI_ACLDATA_PKT:
5379 			case HCI_SCODATA_PKT:
5380 				kfree_skb(skb);
5381 				continue;
5382 			}
5383 		}
5384 
5385 		/* Process frame */
5386 		switch (bt_cb(skb)->pkt_type) {
5387 		case HCI_EVENT_PKT:
5388 			BT_DBG("%s Event packet", hdev->name);
5389 			hci_event_packet(hdev, skb);
5390 			break;
5391 
5392 		case HCI_ACLDATA_PKT:
5393 			BT_DBG("%s ACL data packet", hdev->name);
5394 			hci_acldata_packet(hdev, skb);
5395 			break;
5396 
5397 		case HCI_SCODATA_PKT:
5398 			BT_DBG("%s SCO data packet", hdev->name);
5399 			hci_scodata_packet(hdev, skb);
5400 			break;
5401 
5402 		default:
5403 			kfree_skb(skb);
5404 			break;
5405 		}
5406 	}
5407 }
5408 
hci_cmd_work(struct work_struct * work)5409 static void hci_cmd_work(struct work_struct *work)
5410 {
5411 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5412 	struct sk_buff *skb;
5413 
5414 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5415 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5416 
5417 	/* Send queued commands */
5418 	if (atomic_read(&hdev->cmd_cnt)) {
5419 		skb = skb_dequeue(&hdev->cmd_q);
5420 		if (!skb)
5421 			return;
5422 
5423 		kfree_skb(hdev->sent_cmd);
5424 
5425 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5426 		if (hdev->sent_cmd) {
5427 			atomic_dec(&hdev->cmd_cnt);
5428 			hci_send_frame(hdev, skb);
5429 			if (test_bit(HCI_RESET, &hdev->flags))
5430 				cancel_delayed_work(&hdev->cmd_timer);
5431 			else
5432 				schedule_delayed_work(&hdev->cmd_timer,
5433 						      HCI_CMD_TIMEOUT);
5434 		} else {
5435 			skb_queue_head(&hdev->cmd_q, skb);
5436 			queue_work(hdev->workqueue, &hdev->cmd_work);
5437 		}
5438 	}
5439 }
5440 
hci_req_add_le_scan_disable(struct hci_request * req)5441 void hci_req_add_le_scan_disable(struct hci_request *req)
5442 {
5443 	struct hci_cp_le_set_scan_enable cp;
5444 
5445 	memset(&cp, 0, sizeof(cp));
5446 	cp.enable = LE_SCAN_DISABLE;
5447 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5448 }
5449 
add_to_white_list(struct hci_request * req,struct hci_conn_params * params)5450 static void add_to_white_list(struct hci_request *req,
5451 			      struct hci_conn_params *params)
5452 {
5453 	struct hci_cp_le_add_to_white_list cp;
5454 
5455 	cp.bdaddr_type = params->addr_type;
5456 	bacpy(&cp.bdaddr, &params->addr);
5457 
5458 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5459 }
5460 
update_white_list(struct hci_request * req)5461 static u8 update_white_list(struct hci_request *req)
5462 {
5463 	struct hci_dev *hdev = req->hdev;
5464 	struct hci_conn_params *params;
5465 	struct bdaddr_list *b;
5466 	uint8_t white_list_entries = 0;
5467 
5468 	/* Go through the current white list programmed into the
5469 	 * controller one by one and check if that address is still
5470 	 * in the list of pending connections or list of devices to
5471 	 * report. If not present in either list, then queue the
5472 	 * command to remove it from the controller.
5473 	 */
5474 	list_for_each_entry(b, &hdev->le_white_list, list) {
5475 		struct hci_cp_le_del_from_white_list cp;
5476 
5477 		if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5478 					      &b->bdaddr, b->bdaddr_type) ||
5479 		    hci_pend_le_action_lookup(&hdev->pend_le_reports,
5480 					      &b->bdaddr, b->bdaddr_type)) {
5481 			white_list_entries++;
5482 			continue;
5483 		}
5484 
5485 		cp.bdaddr_type = b->bdaddr_type;
5486 		bacpy(&cp.bdaddr, &b->bdaddr);
5487 
5488 		hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5489 			    sizeof(cp), &cp);
5490 	}
5491 
5492 	/* Since all no longer valid white list entries have been
5493 	 * removed, walk through the list of pending connections
5494 	 * and ensure that any new device gets programmed into
5495 	 * the controller.
5496 	 *
5497 	 * If the list of the devices is larger than the list of
5498 	 * available white list entries in the controller, then
5499 	 * just abort and return filer policy value to not use the
5500 	 * white list.
5501 	 */
5502 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
5503 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5504 					   &params->addr, params->addr_type))
5505 			continue;
5506 
5507 		if (white_list_entries >= hdev->le_white_list_size) {
5508 			/* Select filter policy to accept all advertising */
5509 			return 0x00;
5510 		}
5511 
5512 		if (hci_find_irk_by_addr(hdev, &params->addr,
5513 					 params->addr_type)) {
5514 			/* White list can not be used with RPAs */
5515 			return 0x00;
5516 		}
5517 
5518 		white_list_entries++;
5519 		add_to_white_list(req, params);
5520 	}
5521 
5522 	/* After adding all new pending connections, walk through
5523 	 * the list of pending reports and also add these to the
5524 	 * white list if there is still space.
5525 	 */
5526 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
5527 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5528 					   &params->addr, params->addr_type))
5529 			continue;
5530 
5531 		if (white_list_entries >= hdev->le_white_list_size) {
5532 			/* Select filter policy to accept all advertising */
5533 			return 0x00;
5534 		}
5535 
5536 		if (hci_find_irk_by_addr(hdev, &params->addr,
5537 					 params->addr_type)) {
5538 			/* White list can not be used with RPAs */
5539 			return 0x00;
5540 		}
5541 
5542 		white_list_entries++;
5543 		add_to_white_list(req, params);
5544 	}
5545 
5546 	/* Select filter policy to use white list */
5547 	return 0x01;
5548 }
5549 
hci_req_add_le_passive_scan(struct hci_request * req)5550 void hci_req_add_le_passive_scan(struct hci_request *req)
5551 {
5552 	struct hci_cp_le_set_scan_param param_cp;
5553 	struct hci_cp_le_set_scan_enable enable_cp;
5554 	struct hci_dev *hdev = req->hdev;
5555 	u8 own_addr_type;
5556 	u8 filter_policy;
5557 
5558 	/* Set require_privacy to false since no SCAN_REQ are send
5559 	 * during passive scanning. Not using an unresolvable address
5560 	 * here is important so that peer devices using direct
5561 	 * advertising with our address will be correctly reported
5562 	 * by the controller.
5563 	 */
5564 	if (hci_update_random_address(req, false, &own_addr_type))
5565 		return;
5566 
5567 	/* Adding or removing entries from the white list must
5568 	 * happen before enabling scanning. The controller does
5569 	 * not allow white list modification while scanning.
5570 	 */
5571 	filter_policy = update_white_list(req);
5572 
5573 	memset(&param_cp, 0, sizeof(param_cp));
5574 	param_cp.type = LE_SCAN_PASSIVE;
5575 	param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5576 	param_cp.window = cpu_to_le16(hdev->le_scan_window);
5577 	param_cp.own_address_type = own_addr_type;
5578 	param_cp.filter_policy = filter_policy;
5579 	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5580 		    &param_cp);
5581 
5582 	memset(&enable_cp, 0, sizeof(enable_cp));
5583 	enable_cp.enable = LE_SCAN_ENABLE;
5584 	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5585 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5586 		    &enable_cp);
5587 }
5588 
update_background_scan_complete(struct hci_dev * hdev,u8 status)5589 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5590 {
5591 	if (status)
5592 		BT_DBG("HCI request failed to update background scanning: "
5593 		       "status 0x%2.2x", status);
5594 }
5595 
5596 /* This function controls the background scanning based on hdev->pend_le_conns
5597  * list. If there are pending LE connection we start the background scanning,
5598  * otherwise we stop it.
5599  *
5600  * This function requires the caller holds hdev->lock.
5601  */
hci_update_background_scan(struct hci_dev * hdev)5602 void hci_update_background_scan(struct hci_dev *hdev)
5603 {
5604 	struct hci_request req;
5605 	struct hci_conn *conn;
5606 	int err;
5607 
5608 	if (!test_bit(HCI_UP, &hdev->flags) ||
5609 	    test_bit(HCI_INIT, &hdev->flags) ||
5610 	    test_bit(HCI_SETUP, &hdev->dev_flags) ||
5611 	    test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5612 	    test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5613 	    test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5614 		return;
5615 
5616 	/* No point in doing scanning if LE support hasn't been enabled */
5617 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5618 		return;
5619 
5620 	/* If discovery is active don't interfere with it */
5621 	if (hdev->discovery.state != DISCOVERY_STOPPED)
5622 		return;
5623 
5624 	hci_req_init(&req, hdev);
5625 
5626 	if (list_empty(&hdev->pend_le_conns) &&
5627 	    list_empty(&hdev->pend_le_reports)) {
5628 		/* If there is no pending LE connections or devices
5629 		 * to be scanned for, we should stop the background
5630 		 * scanning.
5631 		 */
5632 
5633 		/* If controller is not scanning we are done. */
5634 		if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5635 			return;
5636 
5637 		hci_req_add_le_scan_disable(&req);
5638 
5639 		BT_DBG("%s stopping background scanning", hdev->name);
5640 	} else {
5641 		/* If there is at least one pending LE connection, we should
5642 		 * keep the background scan running.
5643 		 */
5644 
5645 		/* If controller is connecting, we should not start scanning
5646 		 * since some controllers are not able to scan and connect at
5647 		 * the same time.
5648 		 */
5649 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5650 		if (conn)
5651 			return;
5652 
5653 		/* If controller is currently scanning, we stop it to ensure we
5654 		 * don't miss any advertising (due to duplicates filter).
5655 		 */
5656 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5657 			hci_req_add_le_scan_disable(&req);
5658 
5659 		hci_req_add_le_passive_scan(&req);
5660 
5661 		BT_DBG("%s starting background scanning", hdev->name);
5662 	}
5663 
5664 	err = hci_req_run(&req, update_background_scan_complete);
5665 	if (err)
5666 		BT_ERR("Failed to run HCI request: err %d", err);
5667 }
5668 
disconnected_whitelist_entries(struct hci_dev * hdev)5669 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5670 {
5671 	struct bdaddr_list *b;
5672 
5673 	list_for_each_entry(b, &hdev->whitelist, list) {
5674 		struct hci_conn *conn;
5675 
5676 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5677 		if (!conn)
5678 			return true;
5679 
5680 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5681 			return true;
5682 	}
5683 
5684 	return false;
5685 }
5686 
hci_update_page_scan(struct hci_dev * hdev,struct hci_request * req)5687 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5688 {
5689 	u8 scan;
5690 
5691 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5692 		return;
5693 
5694 	if (!hdev_is_powered(hdev))
5695 		return;
5696 
5697 	if (mgmt_powering_down(hdev))
5698 		return;
5699 
5700 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5701 	    disconnected_whitelist_entries(hdev))
5702 		scan = SCAN_PAGE;
5703 	else
5704 		scan = SCAN_DISABLED;
5705 
5706 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5707 		return;
5708 
5709 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5710 		scan |= SCAN_INQUIRY;
5711 
5712 	if (req)
5713 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5714 	else
5715 		hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5716 }
5717