• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 
43 /* Handle HCI Event packets */
44 
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)45 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46 				  u8 *new_status)
47 {
48 	__u8 status = *((__u8 *) skb->data);
49 
50 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 
52 	/* It is possible that we receive Inquiry Complete event right
53 	 * before we receive Inquiry Cancel Command Complete event, in
54 	 * which case the latter event should have status of Command
55 	 * Disallowed (0x0c). This should not be treated as error, since
56 	 * we actually achieve what Inquiry Cancel wants to achieve,
57 	 * which is to end the last Inquiry session.
58 	 */
59 	if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61 		status = 0x00;
62 	}
63 
64 	*new_status = status;
65 
66 	if (status)
67 		return;
68 
69 	clear_bit(HCI_INQUIRY, &hdev->flags);
70 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
72 
73 	hci_dev_lock(hdev);
74 	/* Set discovery state to stopped if we're not doing LE active
75 	 * scanning.
76 	 */
77 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
79 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80 	hci_dev_unlock(hdev);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)85 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 	__u8 status = *((__u8 *) skb->data);
88 
89 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 
91 	if (status)
92 		return;
93 
94 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95 }
96 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)97 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 {
99 	__u8 status = *((__u8 *) skb->data);
100 
101 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
102 
103 	if (status)
104 		return;
105 
106 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 
108 	hci_conn_check_pending(hdev);
109 }
110 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)111 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112 					  struct sk_buff *skb)
113 {
114 	BT_DBG("%s", hdev->name);
115 }
116 
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)117 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 {
119 	struct hci_rp_role_discovery *rp = (void *) skb->data;
120 	struct hci_conn *conn;
121 
122 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123 
124 	if (rp->status)
125 		return;
126 
127 	hci_dev_lock(hdev);
128 
129 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 	if (conn)
131 		conn->role = rp->role;
132 
133 	hci_dev_unlock(hdev);
134 }
135 
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)136 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 {
138 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
139 	struct hci_conn *conn;
140 
141 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = __le16_to_cpu(rp->policy);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)155 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
158 	struct hci_conn *conn;
159 	void *sent;
160 
161 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 
163 	if (rp->status)
164 		return;
165 
166 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167 	if (!sent)
168 		return;
169 
170 	hci_dev_lock(hdev);
171 
172 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 	if (conn)
174 		conn->link_policy = get_unaligned_le16(sent + 2);
175 
176 	hci_dev_unlock(hdev);
177 }
178 
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)179 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180 					struct sk_buff *skb)
181 {
182 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 
184 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185 
186 	if (rp->status)
187 		return;
188 
189 	hdev->link_policy = __le16_to_cpu(rp->policy);
190 }
191 
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)192 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193 					 struct sk_buff *skb)
194 {
195 	__u8 status = *((__u8 *) skb->data);
196 	void *sent;
197 
198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
199 
200 	if (status)
201 		return;
202 
203 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204 	if (!sent)
205 		return;
206 
207 	hdev->link_policy = get_unaligned_le16(sent);
208 }
209 
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)210 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 {
212 	__u8 status = *((__u8 *) skb->data);
213 
214 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 
216 	clear_bit(HCI_RESET, &hdev->flags);
217 
218 	if (status)
219 		return;
220 
221 	/* Reset all non-persistent flags */
222 	hci_dev_clear_volatile_flags(hdev);
223 
224 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 
226 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 
229 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 	hdev->adv_data_len = 0;
231 
232 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 	hdev->scan_rsp_data_len = 0;
234 
235 	hdev->le_scan_type = LE_SCAN_PASSIVE;
236 
237 	hdev->ssp_debug_mode = 0;
238 
239 	hci_bdaddr_list_clear(&hdev->le_white_list);
240 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
241 }
242 
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)243 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 					struct sk_buff *skb)
245 {
246 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 	struct hci_cp_read_stored_link_key *sent;
248 
249 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250 
251 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252 	if (!sent)
253 		return;
254 
255 	if (!rp->status && sent->read_all == 0x01) {
256 		hdev->stored_max_keys = rp->max_keys;
257 		hdev->stored_num_keys = rp->num_keys;
258 	}
259 }
260 
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)261 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 					  struct sk_buff *skb)
263 {
264 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265 
266 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267 
268 	if (rp->status)
269 		return;
270 
271 	if (rp->num_keys <= hdev->stored_num_keys)
272 		hdev->stored_num_keys -= rp->num_keys;
273 	else
274 		hdev->stored_num_keys = 0;
275 }
276 
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)277 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 	__u8 status = *((__u8 *) skb->data);
280 	void *sent;
281 
282 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 
284 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285 	if (!sent)
286 		return;
287 
288 	hci_dev_lock(hdev);
289 
290 	if (hci_dev_test_flag(hdev, HCI_MGMT))
291 		mgmt_set_local_name_complete(hdev, sent, status);
292 	else if (!status)
293 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294 
295 	hci_dev_unlock(hdev);
296 }
297 
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)298 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299 {
300 	struct hci_rp_read_local_name *rp = (void *) skb->data;
301 
302 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303 
304 	if (rp->status)
305 		return;
306 
307 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 	    hci_dev_test_flag(hdev, HCI_CONFIG))
309 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310 }
311 
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)312 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313 {
314 	__u8 status = *((__u8 *) skb->data);
315 	void *sent;
316 
317 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
318 
319 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320 	if (!sent)
321 		return;
322 
323 	hci_dev_lock(hdev);
324 
325 	if (!status) {
326 		__u8 param = *((__u8 *) sent);
327 
328 		if (param == AUTH_ENABLED)
329 			set_bit(HCI_AUTH, &hdev->flags);
330 		else
331 			clear_bit(HCI_AUTH, &hdev->flags);
332 	}
333 
334 	if (hci_dev_test_flag(hdev, HCI_MGMT))
335 		mgmt_auth_enable_complete(hdev, status);
336 
337 	hci_dev_unlock(hdev);
338 }
339 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)340 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341 {
342 	__u8 status = *((__u8 *) skb->data);
343 	__u8 param;
344 	void *sent;
345 
346 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
347 
348 	if (status)
349 		return;
350 
351 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352 	if (!sent)
353 		return;
354 
355 	param = *((__u8 *) sent);
356 
357 	if (param)
358 		set_bit(HCI_ENCRYPT, &hdev->flags);
359 	else
360 		clear_bit(HCI_ENCRYPT, &hdev->flags);
361 }
362 
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)363 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 	__u8 status = *((__u8 *) skb->data);
366 	__u8 param;
367 	void *sent;
368 
369 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 
371 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372 	if (!sent)
373 		return;
374 
375 	param = *((__u8 *) sent);
376 
377 	hci_dev_lock(hdev);
378 
379 	if (status) {
380 		hdev->discov_timeout = 0;
381 		goto done;
382 	}
383 
384 	if (param & SCAN_INQUIRY)
385 		set_bit(HCI_ISCAN, &hdev->flags);
386 	else
387 		clear_bit(HCI_ISCAN, &hdev->flags);
388 
389 	if (param & SCAN_PAGE)
390 		set_bit(HCI_PSCAN, &hdev->flags);
391 	else
392 		clear_bit(HCI_PSCAN, &hdev->flags);
393 
394 done:
395 	hci_dev_unlock(hdev);
396 }
397 
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)398 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401 
402 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
403 
404 	if (rp->status)
405 		return;
406 
407 	memcpy(hdev->dev_class, rp->dev_class, 3);
408 
409 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
410 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411 }
412 
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)413 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 	__u8 status = *((__u8 *) skb->data);
416 	void *sent;
417 
418 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
419 
420 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
421 	if (!sent)
422 		return;
423 
424 	hci_dev_lock(hdev);
425 
426 	if (status == 0)
427 		memcpy(hdev->dev_class, sent, 3);
428 
429 	if (hci_dev_test_flag(hdev, HCI_MGMT))
430 		mgmt_set_class_of_dev_complete(hdev, sent, status);
431 
432 	hci_dev_unlock(hdev);
433 }
434 
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)435 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 	__u16 setting;
439 
440 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
441 
442 	if (rp->status)
443 		return;
444 
445 	setting = __le16_to_cpu(rp->voice_setting);
446 
447 	if (hdev->voice_setting == setting)
448 		return;
449 
450 	hdev->voice_setting = setting;
451 
452 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453 
454 	if (hdev->notify)
455 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456 }
457 
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)458 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 				       struct sk_buff *skb)
460 {
461 	__u8 status = *((__u8 *) skb->data);
462 	__u16 setting;
463 	void *sent;
464 
465 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
466 
467 	if (status)
468 		return;
469 
470 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
471 	if (!sent)
472 		return;
473 
474 	setting = get_unaligned_le16(sent);
475 
476 	if (hdev->voice_setting == setting)
477 		return;
478 
479 	hdev->voice_setting = setting;
480 
481 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 
483 	if (hdev->notify)
484 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 }
486 
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)487 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 					  struct sk_buff *skb)
489 {
490 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491 
492 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
493 
494 	if (rp->status)
495 		return;
496 
497 	hdev->num_iac = rp->num_iac;
498 
499 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500 }
501 
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)502 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503 {
504 	__u8 status = *((__u8 *) skb->data);
505 	struct hci_cp_write_ssp_mode *sent;
506 
507 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
508 
509 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
510 	if (!sent)
511 		return;
512 
513 	hci_dev_lock(hdev);
514 
515 	if (!status) {
516 		if (sent->mode)
517 			hdev->features[1][0] |= LMP_HOST_SSP;
518 		else
519 			hdev->features[1][0] &= ~LMP_HOST_SSP;
520 	}
521 
522 	if (hci_dev_test_flag(hdev, HCI_MGMT))
523 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 	else if (!status) {
525 		if (sent->mode)
526 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527 		else
528 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 	}
530 
531 	hci_dev_unlock(hdev);
532 }
533 
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)534 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535 {
536 	u8 status = *((u8 *) skb->data);
537 	struct hci_cp_write_sc_support *sent;
538 
539 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
540 
541 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
542 	if (!sent)
543 		return;
544 
545 	hci_dev_lock(hdev);
546 
547 	if (!status) {
548 		if (sent->support)
549 			hdev->features[1][0] |= LMP_HOST_SC;
550 		else
551 			hdev->features[1][0] &= ~LMP_HOST_SC;
552 	}
553 
554 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555 		if (sent->support)
556 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557 		else
558 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 	}
560 
561 	hci_dev_unlock(hdev);
562 }
563 
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)564 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565 {
566 	struct hci_rp_read_local_version *rp = (void *) skb->data;
567 
568 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569 
570 	if (rp->status)
571 		return;
572 
573 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
574 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
575 		hdev->hci_ver = rp->hci_ver;
576 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
577 		hdev->lmp_ver = rp->lmp_ver;
578 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
579 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
580 	}
581 }
582 
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)583 static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 				       struct sk_buff *skb)
585 {
586 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
587 
588 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589 
590 	if (rp->status)
591 		return;
592 
593 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
594 	    hci_dev_test_flag(hdev, HCI_CONFIG))
595 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596 }
597 
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)598 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
599 					     struct sk_buff *skb)
600 {
601 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
602 	struct hci_conn *conn;
603 
604 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
605 
606 	if (rp->status)
607 		return;
608 
609 	hci_dev_lock(hdev);
610 
611 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
612 	if (conn)
613 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
614 
615 	hci_dev_unlock(hdev);
616 }
617 
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)618 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
619 					      struct sk_buff *skb)
620 {
621 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
622 	struct hci_conn *conn;
623 	void *sent;
624 
625 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
626 
627 	if (rp->status)
628 		return;
629 
630 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
631 	if (!sent)
632 		return;
633 
634 	hci_dev_lock(hdev);
635 
636 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 	if (conn)
638 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
639 
640 	hci_dev_unlock(hdev);
641 }
642 
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)643 static void hci_cc_read_local_features(struct hci_dev *hdev,
644 				       struct sk_buff *skb)
645 {
646 	struct hci_rp_read_local_features *rp = (void *) skb->data;
647 
648 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649 
650 	if (rp->status)
651 		return;
652 
653 	memcpy(hdev->features, rp->features, 8);
654 
655 	/* Adjust default settings according to features
656 	 * supported by device. */
657 
658 	if (hdev->features[0][0] & LMP_3SLOT)
659 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
660 
661 	if (hdev->features[0][0] & LMP_5SLOT)
662 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
663 
664 	if (hdev->features[0][1] & LMP_HV2) {
665 		hdev->pkt_type  |= (HCI_HV2);
666 		hdev->esco_type |= (ESCO_HV2);
667 	}
668 
669 	if (hdev->features[0][1] & LMP_HV3) {
670 		hdev->pkt_type  |= (HCI_HV3);
671 		hdev->esco_type |= (ESCO_HV3);
672 	}
673 
674 	if (lmp_esco_capable(hdev))
675 		hdev->esco_type |= (ESCO_EV3);
676 
677 	if (hdev->features[0][4] & LMP_EV4)
678 		hdev->esco_type |= (ESCO_EV4);
679 
680 	if (hdev->features[0][4] & LMP_EV5)
681 		hdev->esco_type |= (ESCO_EV5);
682 
683 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
684 		hdev->esco_type |= (ESCO_2EV3);
685 
686 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
687 		hdev->esco_type |= (ESCO_3EV3);
688 
689 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
690 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
691 }
692 
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)693 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
694 					   struct sk_buff *skb)
695 {
696 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
697 
698 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
699 
700 	if (rp->status)
701 		return;
702 
703 	if (hdev->max_page < rp->max_page)
704 		hdev->max_page = rp->max_page;
705 
706 	if (rp->page < HCI_MAX_PAGES)
707 		memcpy(hdev->features[rp->page], rp->features, 8);
708 }
709 
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)710 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
711 					  struct sk_buff *skb)
712 {
713 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
714 
715 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 
717 	if (rp->status)
718 		return;
719 
720 	hdev->flow_ctl_mode = rp->mode;
721 }
722 
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)723 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
724 {
725 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
726 
727 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 
729 	if (rp->status)
730 		return;
731 
732 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
733 	hdev->sco_mtu  = rp->sco_mtu;
734 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
735 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
736 
737 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
738 		hdev->sco_mtu  = 64;
739 		hdev->sco_pkts = 8;
740 	}
741 
742 	hdev->acl_cnt = hdev->acl_pkts;
743 	hdev->sco_cnt = hdev->sco_pkts;
744 
745 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
746 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
747 }
748 
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)749 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
750 {
751 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
752 
753 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
754 
755 	if (rp->status)
756 		return;
757 
758 	if (test_bit(HCI_INIT, &hdev->flags))
759 		bacpy(&hdev->bdaddr, &rp->bdaddr);
760 
761 	if (hci_dev_test_flag(hdev, HCI_SETUP))
762 		bacpy(&hdev->setup_addr, &rp->bdaddr);
763 }
764 
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,struct sk_buff * skb)765 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
766 					   struct sk_buff *skb)
767 {
768 	struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
769 
770 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771 
772 	if (rp->status)
773 		return;
774 
775 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
776 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
777 		hdev->pairing_opts = rp->pairing_opts;
778 		hdev->max_enc_key_size = rp->max_key_size;
779 	}
780 }
781 
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)782 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
783 					   struct sk_buff *skb)
784 {
785 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
786 
787 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788 
789 	if (rp->status)
790 		return;
791 
792 	if (test_bit(HCI_INIT, &hdev->flags)) {
793 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
794 		hdev->page_scan_window = __le16_to_cpu(rp->window);
795 	}
796 }
797 
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)798 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
799 					    struct sk_buff *skb)
800 {
801 	u8 status = *((u8 *) skb->data);
802 	struct hci_cp_write_page_scan_activity *sent;
803 
804 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
805 
806 	if (status)
807 		return;
808 
809 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
810 	if (!sent)
811 		return;
812 
813 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
814 	hdev->page_scan_window = __le16_to_cpu(sent->window);
815 }
816 
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)817 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
818 					   struct sk_buff *skb)
819 {
820 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
821 
822 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823 
824 	if (rp->status)
825 		return;
826 
827 	if (test_bit(HCI_INIT, &hdev->flags))
828 		hdev->page_scan_type = rp->type;
829 }
830 
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)831 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
832 					struct sk_buff *skb)
833 {
834 	u8 status = *((u8 *) skb->data);
835 	u8 *type;
836 
837 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
838 
839 	if (status)
840 		return;
841 
842 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
843 	if (type)
844 		hdev->page_scan_type = *type;
845 }
846 
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)847 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
848 					struct sk_buff *skb)
849 {
850 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
851 
852 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 
854 	if (rp->status)
855 		return;
856 
857 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
858 	hdev->block_len = __le16_to_cpu(rp->block_len);
859 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
860 
861 	hdev->block_cnt = hdev->num_blocks;
862 
863 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
864 	       hdev->block_cnt, hdev->block_len);
865 }
866 
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)867 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
868 {
869 	struct hci_rp_read_clock *rp = (void *) skb->data;
870 	struct hci_cp_read_clock *cp;
871 	struct hci_conn *conn;
872 
873 	BT_DBG("%s", hdev->name);
874 
875 	if (skb->len < sizeof(*rp))
876 		return;
877 
878 	if (rp->status)
879 		return;
880 
881 	hci_dev_lock(hdev);
882 
883 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
884 	if (!cp)
885 		goto unlock;
886 
887 	if (cp->which == 0x00) {
888 		hdev->clock = le32_to_cpu(rp->clock);
889 		goto unlock;
890 	}
891 
892 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
893 	if (conn) {
894 		conn->clock = le32_to_cpu(rp->clock);
895 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
896 	}
897 
898 unlock:
899 	hci_dev_unlock(hdev);
900 }
901 
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)902 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
903 				       struct sk_buff *skb)
904 {
905 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
906 
907 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 
909 	if (rp->status)
910 		return;
911 
912 	hdev->amp_status = rp->amp_status;
913 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
914 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
915 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
916 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
917 	hdev->amp_type = rp->amp_type;
918 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
919 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
920 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
921 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
922 }
923 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)924 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
925 					 struct sk_buff *skb)
926 {
927 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
928 
929 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 
931 	if (rp->status)
932 		return;
933 
934 	hdev->inq_tx_power = rp->tx_power;
935 }
936 
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)937 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
938 					       struct sk_buff *skb)
939 {
940 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
941 
942 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 
944 	if (rp->status)
945 		return;
946 
947 	hdev->err_data_reporting = rp->err_data_reporting;
948 }
949 
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)950 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
951 						struct sk_buff *skb)
952 {
953 	__u8 status = *((__u8 *)skb->data);
954 	struct hci_cp_write_def_err_data_reporting *cp;
955 
956 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
957 
958 	if (status)
959 		return;
960 
961 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
962 	if (!cp)
963 		return;
964 
965 	hdev->err_data_reporting = cp->err_data_reporting;
966 }
967 
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)968 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
969 {
970 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
971 	struct hci_cp_pin_code_reply *cp;
972 	struct hci_conn *conn;
973 
974 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975 
976 	hci_dev_lock(hdev);
977 
978 	if (hci_dev_test_flag(hdev, HCI_MGMT))
979 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
980 
981 	if (rp->status)
982 		goto unlock;
983 
984 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
985 	if (!cp)
986 		goto unlock;
987 
988 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
989 	if (conn)
990 		conn->pin_length = cp->pin_len;
991 
992 unlock:
993 	hci_dev_unlock(hdev);
994 }
995 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)996 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
997 {
998 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
999 
1000 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 
1002 	hci_dev_lock(hdev);
1003 
1004 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1006 						 rp->status);
1007 
1008 	hci_dev_unlock(hdev);
1009 }
1010 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)1011 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1012 				       struct sk_buff *skb)
1013 {
1014 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1015 
1016 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017 
1018 	if (rp->status)
1019 		return;
1020 
1021 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1022 	hdev->le_pkts = rp->le_max_pkt;
1023 
1024 	hdev->le_cnt = hdev->le_pkts;
1025 
1026 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1027 }
1028 
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)1029 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1030 					  struct sk_buff *skb)
1031 {
1032 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1033 
1034 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035 
1036 	if (rp->status)
1037 		return;
1038 
1039 	memcpy(hdev->le_features, rp->features, 8);
1040 }
1041 
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1042 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1043 					struct sk_buff *skb)
1044 {
1045 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1046 
1047 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048 
1049 	if (rp->status)
1050 		return;
1051 
1052 	hdev->adv_tx_power = rp->tx_power;
1053 }
1054 
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1055 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1056 {
1057 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1058 
1059 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060 
1061 	hci_dev_lock(hdev);
1062 
1063 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1064 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1065 						 rp->status);
1066 
1067 	hci_dev_unlock(hdev);
1068 }
1069 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1070 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1071 					  struct sk_buff *skb)
1072 {
1073 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1074 
1075 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076 
1077 	hci_dev_lock(hdev);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1080 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1081 						     ACL_LINK, 0, rp->status);
1082 
1083 	hci_dev_unlock(hdev);
1084 }
1085 
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1086 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087 {
1088 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1089 
1090 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1091 
1092 	hci_dev_lock(hdev);
1093 
1094 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1095 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1096 						 0, rp->status);
1097 
1098 	hci_dev_unlock(hdev);
1099 }
1100 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1101 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1102 					  struct sk_buff *skb)
1103 {
1104 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1105 
1106 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1107 
1108 	hci_dev_lock(hdev);
1109 
1110 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1111 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1112 						     ACL_LINK, 0, rp->status);
1113 
1114 	hci_dev_unlock(hdev);
1115 }
1116 
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1117 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1118 				       struct sk_buff *skb)
1119 {
1120 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1121 
1122 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123 }
1124 
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1125 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1126 					   struct sk_buff *skb)
1127 {
1128 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1129 
1130 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131 }
1132 
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1133 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1134 {
1135 	__u8 status = *((__u8 *) skb->data);
1136 	bdaddr_t *sent;
1137 
1138 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139 
1140 	if (status)
1141 		return;
1142 
1143 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1144 	if (!sent)
1145 		return;
1146 
1147 	hci_dev_lock(hdev);
1148 
1149 	bacpy(&hdev->random_addr, sent);
1150 
1151 	hci_dev_unlock(hdev);
1152 }
1153 
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1154 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1155 {
1156 	__u8 status = *((__u8 *) skb->data);
1157 	struct hci_cp_le_set_default_phy *cp;
1158 
1159 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160 
1161 	if (status)
1162 		return;
1163 
1164 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1165 	if (!cp)
1166 		return;
1167 
1168 	hci_dev_lock(hdev);
1169 
1170 	hdev->le_tx_def_phys = cp->tx_phys;
1171 	hdev->le_rx_def_phys = cp->rx_phys;
1172 
1173 	hci_dev_unlock(hdev);
1174 }
1175 
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1176 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1177                                               struct sk_buff *skb)
1178 {
1179 	__u8 status = *((__u8 *) skb->data);
1180 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1181 	struct adv_info *adv_instance;
1182 
1183 	if (status)
1184 		return;
1185 
1186 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1187 	if (!cp)
1188 		return;
1189 
1190 	hci_dev_lock(hdev);
1191 
1192 	if (!hdev->cur_adv_instance) {
1193 		/* Store in hdev for instance 0 (Set adv and Directed advs) */
1194 		bacpy(&hdev->random_addr, &cp->bdaddr);
1195 	} else {
1196 		adv_instance = hci_find_adv_instance(hdev,
1197 						     hdev->cur_adv_instance);
1198 		if (adv_instance)
1199 			bacpy(&adv_instance->random_addr, &cp->bdaddr);
1200 	}
1201 
1202 	hci_dev_unlock(hdev);
1203 }
1204 
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1205 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1206 {
1207 	__u8 *sent, status = *((__u8 *) skb->data);
1208 
1209 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 
1211 	if (status)
1212 		return;
1213 
1214 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1215 	if (!sent)
1216 		return;
1217 
1218 	hci_dev_lock(hdev);
1219 
1220 	/* If we're doing connection initiation as peripheral. Set a
1221 	 * timeout in case something goes wrong.
1222 	 */
1223 	if (*sent) {
1224 		struct hci_conn *conn;
1225 
1226 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1227 
1228 		conn = hci_lookup_le_connect(hdev);
1229 		if (conn)
1230 			queue_delayed_work(hdev->workqueue,
1231 					   &conn->le_conn_timeout,
1232 					   conn->conn_timeout);
1233 	} else {
1234 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1235 	}
1236 
1237 	hci_dev_unlock(hdev);
1238 }
1239 
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1240 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1241 					 struct sk_buff *skb)
1242 {
1243 	struct hci_cp_le_set_ext_adv_enable *cp;
1244 	__u8 status = *((__u8 *) skb->data);
1245 
1246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 
1248 	if (status)
1249 		return;
1250 
1251 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1252 	if (!cp)
1253 		return;
1254 
1255 	hci_dev_lock(hdev);
1256 
1257 	if (cp->enable) {
1258 		struct hci_conn *conn;
1259 
1260 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1261 
1262 		conn = hci_lookup_le_connect(hdev);
1263 		if (conn)
1264 			queue_delayed_work(hdev->workqueue,
1265 					   &conn->le_conn_timeout,
1266 					   conn->conn_timeout);
1267 	} else {
1268 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1269 	}
1270 
1271 	hci_dev_unlock(hdev);
1272 }
1273 
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1274 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1275 {
1276 	struct hci_cp_le_set_scan_param *cp;
1277 	__u8 status = *((__u8 *) skb->data);
1278 
1279 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1280 
1281 	if (status)
1282 		return;
1283 
1284 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1285 	if (!cp)
1286 		return;
1287 
1288 	hci_dev_lock(hdev);
1289 
1290 	hdev->le_scan_type = cp->type;
1291 
1292 	hci_dev_unlock(hdev);
1293 }
1294 
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1295 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1296 					 struct sk_buff *skb)
1297 {
1298 	struct hci_cp_le_set_ext_scan_params *cp;
1299 	__u8 status = *((__u8 *) skb->data);
1300 	struct hci_cp_le_scan_phy_params *phy_param;
1301 
1302 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 
1304 	if (status)
1305 		return;
1306 
1307 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1308 	if (!cp)
1309 		return;
1310 
1311 	phy_param = (void *)cp->data;
1312 
1313 	hci_dev_lock(hdev);
1314 
1315 	hdev->le_scan_type = phy_param->type;
1316 
1317 	hci_dev_unlock(hdev);
1318 }
1319 
has_pending_adv_report(struct hci_dev * hdev)1320 static bool has_pending_adv_report(struct hci_dev *hdev)
1321 {
1322 	struct discovery_state *d = &hdev->discovery;
1323 
1324 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1325 }
1326 
clear_pending_adv_report(struct hci_dev * hdev)1327 static void clear_pending_adv_report(struct hci_dev *hdev)
1328 {
1329 	struct discovery_state *d = &hdev->discovery;
1330 
1331 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1332 	d->last_adv_data_len = 0;
1333 }
1334 
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1335 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336 				     u8 bdaddr_type, s8 rssi, u32 flags,
1337 				     u8 *data, u8 len)
1338 {
1339 	struct discovery_state *d = &hdev->discovery;
1340 
1341 	if (len > HCI_MAX_AD_LENGTH)
1342 		return;
1343 
1344 	bacpy(&d->last_adv_addr, bdaddr);
1345 	d->last_adv_addr_type = bdaddr_type;
1346 	d->last_adv_rssi = rssi;
1347 	d->last_adv_flags = flags;
1348 	memcpy(d->last_adv_data, data, len);
1349 	d->last_adv_data_len = len;
1350 }
1351 
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1352 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1353 {
1354 	hci_dev_lock(hdev);
1355 
1356 	switch (enable) {
1357 	case LE_SCAN_ENABLE:
1358 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1359 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1360 			clear_pending_adv_report(hdev);
1361 		break;
1362 
1363 	case LE_SCAN_DISABLE:
1364 		/* We do this here instead of when setting DISCOVERY_STOPPED
1365 		 * since the latter would potentially require waiting for
1366 		 * inquiry to stop too.
1367 		 */
1368 		if (has_pending_adv_report(hdev)) {
1369 			struct discovery_state *d = &hdev->discovery;
1370 
1371 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1372 					  d->last_adv_addr_type, NULL,
1373 					  d->last_adv_rssi, d->last_adv_flags,
1374 					  d->last_adv_data,
1375 					  d->last_adv_data_len, NULL, 0);
1376 		}
1377 
1378 		/* Cancel this timer so that we don't try to disable scanning
1379 		 * when it's already disabled.
1380 		 */
1381 		cancel_delayed_work(&hdev->le_scan_disable);
1382 
1383 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1384 
1385 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1386 		 * interrupted scanning due to a connect request. Mark
1387 		 * therefore discovery as stopped. If this was not
1388 		 * because of a connect request advertising might have
1389 		 * been disabled because of active scanning, so
1390 		 * re-enable it again if necessary.
1391 		 */
1392 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1393 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1394 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1395 			 hdev->discovery.state == DISCOVERY_FINDING)
1396 			hci_req_reenable_advertising(hdev);
1397 
1398 		break;
1399 
1400 	default:
1401 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1402 			   enable);
1403 		break;
1404 	}
1405 
1406 	hci_dev_unlock(hdev);
1407 }
1408 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1409 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1410 				      struct sk_buff *skb)
1411 {
1412 	struct hci_cp_le_set_scan_enable *cp;
1413 	__u8 status = *((__u8 *) skb->data);
1414 
1415 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1416 
1417 	if (status)
1418 		return;
1419 
1420 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1421 	if (!cp)
1422 		return;
1423 
1424 	le_set_scan_enable_complete(hdev, cp->enable);
1425 }
1426 
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1427 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1428 				      struct sk_buff *skb)
1429 {
1430 	struct hci_cp_le_set_ext_scan_enable *cp;
1431 	__u8 status = *((__u8 *) skb->data);
1432 
1433 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434 
1435 	if (status)
1436 		return;
1437 
1438 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1439 	if (!cp)
1440 		return;
1441 
1442 	le_set_scan_enable_complete(hdev, cp->enable);
1443 }
1444 
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1445 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1446 				      struct sk_buff *skb)
1447 {
1448 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1449 
1450 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1451 	       rp->num_of_sets);
1452 
1453 	if (rp->status)
1454 		return;
1455 
1456 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1457 }
1458 
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1459 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1460 					   struct sk_buff *skb)
1461 {
1462 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1463 
1464 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1465 
1466 	if (rp->status)
1467 		return;
1468 
1469 	hdev->le_white_list_size = rp->size;
1470 }
1471 
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1472 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1473 				       struct sk_buff *skb)
1474 {
1475 	__u8 status = *((__u8 *) skb->data);
1476 
1477 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478 
1479 	if (status)
1480 		return;
1481 
1482 	hci_bdaddr_list_clear(&hdev->le_white_list);
1483 }
1484 
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1485 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1486 					struct sk_buff *skb)
1487 {
1488 	struct hci_cp_le_add_to_white_list *sent;
1489 	__u8 status = *((__u8 *) skb->data);
1490 
1491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1492 
1493 	if (status)
1494 		return;
1495 
1496 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1497 	if (!sent)
1498 		return;
1499 
1500 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1501 			   sent->bdaddr_type);
1502 }
1503 
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1504 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1505 					  struct sk_buff *skb)
1506 {
1507 	struct hci_cp_le_del_from_white_list *sent;
1508 	__u8 status = *((__u8 *) skb->data);
1509 
1510 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511 
1512 	if (status)
1513 		return;
1514 
1515 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1516 	if (!sent)
1517 		return;
1518 
1519 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1520 			    sent->bdaddr_type);
1521 }
1522 
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1523 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1524 					    struct sk_buff *skb)
1525 {
1526 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1527 
1528 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1529 
1530 	if (rp->status)
1531 		return;
1532 
1533 	memcpy(hdev->le_states, rp->le_states, 8);
1534 }
1535 
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1536 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1537 					struct sk_buff *skb)
1538 {
1539 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1540 
1541 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1542 
1543 	if (rp->status)
1544 		return;
1545 
1546 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1547 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1548 }
1549 
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1550 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1551 					 struct sk_buff *skb)
1552 {
1553 	struct hci_cp_le_write_def_data_len *sent;
1554 	__u8 status = *((__u8 *) skb->data);
1555 
1556 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557 
1558 	if (status)
1559 		return;
1560 
1561 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1562 	if (!sent)
1563 		return;
1564 
1565 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1566 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1567 }
1568 
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1569 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1570 					 struct sk_buff *skb)
1571 {
1572 	struct hci_cp_le_add_to_resolv_list *sent;
1573 	__u8 status = *((__u8 *) skb->data);
1574 
1575 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1576 
1577 	if (status)
1578 		return;
1579 
1580 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1581 	if (!sent)
1582 		return;
1583 
1584 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1585 				sent->bdaddr_type, sent->peer_irk,
1586 				sent->local_irk);
1587 }
1588 
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1589 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1590 					  struct sk_buff *skb)
1591 {
1592 	struct hci_cp_le_del_from_resolv_list *sent;
1593 	__u8 status = *((__u8 *) skb->data);
1594 
1595 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596 
1597 	if (status)
1598 		return;
1599 
1600 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1601 	if (!sent)
1602 		return;
1603 
1604 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1605 			    sent->bdaddr_type);
1606 }
1607 
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1608 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1609 				       struct sk_buff *skb)
1610 {
1611 	__u8 status = *((__u8 *) skb->data);
1612 
1613 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614 
1615 	if (status)
1616 		return;
1617 
1618 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1619 }
1620 
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1621 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1622 					   struct sk_buff *skb)
1623 {
1624 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1625 
1626 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1627 
1628 	if (rp->status)
1629 		return;
1630 
1631 	hdev->le_resolv_list_size = rp->size;
1632 }
1633 
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1634 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1635 						struct sk_buff *skb)
1636 {
1637 	__u8 *sent, status = *((__u8 *) skb->data);
1638 
1639 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1640 
1641 	if (status)
1642 		return;
1643 
1644 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1645 	if (!sent)
1646 		return;
1647 
1648 	hci_dev_lock(hdev);
1649 
1650 	if (*sent)
1651 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1652 	else
1653 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1654 
1655 	hci_dev_unlock(hdev);
1656 }
1657 
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1658 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1659 					struct sk_buff *skb)
1660 {
1661 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1662 
1663 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1664 
1665 	if (rp->status)
1666 		return;
1667 
1668 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1669 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1670 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1671 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1672 }
1673 
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1674 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1675 					   struct sk_buff *skb)
1676 {
1677 	struct hci_cp_write_le_host_supported *sent;
1678 	__u8 status = *((__u8 *) skb->data);
1679 
1680 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1681 
1682 	if (status)
1683 		return;
1684 
1685 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1686 	if (!sent)
1687 		return;
1688 
1689 	hci_dev_lock(hdev);
1690 
1691 	if (sent->le) {
1692 		hdev->features[1][0] |= LMP_HOST_LE;
1693 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1694 	} else {
1695 		hdev->features[1][0] &= ~LMP_HOST_LE;
1696 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1697 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1698 	}
1699 
1700 	if (sent->simul)
1701 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1702 	else
1703 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1704 
1705 	hci_dev_unlock(hdev);
1706 }
1707 
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1708 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1709 {
1710 	struct hci_cp_le_set_adv_param *cp;
1711 	u8 status = *((u8 *) skb->data);
1712 
1713 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1714 
1715 	if (status)
1716 		return;
1717 
1718 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1719 	if (!cp)
1720 		return;
1721 
1722 	hci_dev_lock(hdev);
1723 	hdev->adv_addr_type = cp->own_address_type;
1724 	hci_dev_unlock(hdev);
1725 }
1726 
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1727 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1728 {
1729 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1730 	struct hci_cp_le_set_ext_adv_params *cp;
1731 	struct adv_info *adv_instance;
1732 
1733 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1734 
1735 	if (rp->status)
1736 		return;
1737 
1738 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1739 	if (!cp)
1740 		return;
1741 
1742 	hci_dev_lock(hdev);
1743 	hdev->adv_addr_type = cp->own_addr_type;
1744 	if (!hdev->cur_adv_instance) {
1745 		/* Store in hdev for instance 0 */
1746 		hdev->adv_tx_power = rp->tx_power;
1747 	} else {
1748 		adv_instance = hci_find_adv_instance(hdev,
1749 						     hdev->cur_adv_instance);
1750 		if (adv_instance)
1751 			adv_instance->tx_power = rp->tx_power;
1752 	}
1753 	/* Update adv data as tx power is known now */
1754 	hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1755 	hci_dev_unlock(hdev);
1756 }
1757 
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1758 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1759 {
1760 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1761 	struct hci_conn *conn;
1762 
1763 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1764 
1765 	if (rp->status)
1766 		return;
1767 
1768 	hci_dev_lock(hdev);
1769 
1770 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1771 	if (conn)
1772 		conn->rssi = rp->rssi;
1773 
1774 	hci_dev_unlock(hdev);
1775 }
1776 
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1777 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1778 {
1779 	struct hci_cp_read_tx_power *sent;
1780 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1781 	struct hci_conn *conn;
1782 
1783 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1784 
1785 	if (rp->status)
1786 		return;
1787 
1788 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1789 	if (!sent)
1790 		return;
1791 
1792 	hci_dev_lock(hdev);
1793 
1794 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1795 	if (!conn)
1796 		goto unlock;
1797 
1798 	switch (sent->type) {
1799 	case 0x00:
1800 		conn->tx_power = rp->tx_power;
1801 		break;
1802 	case 0x01:
1803 		conn->max_tx_power = rp->tx_power;
1804 		break;
1805 	}
1806 
1807 unlock:
1808 	hci_dev_unlock(hdev);
1809 }
1810 
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1811 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1812 {
1813 	u8 status = *((u8 *) skb->data);
1814 	u8 *mode;
1815 
1816 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817 
1818 	if (status)
1819 		return;
1820 
1821 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1822 	if (mode)
1823 		hdev->ssp_debug_mode = *mode;
1824 }
1825 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1826 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1827 {
1828 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829 
1830 	if (status) {
1831 		hci_conn_check_pending(hdev);
1832 		return;
1833 	}
1834 
1835 	set_bit(HCI_INQUIRY, &hdev->flags);
1836 }
1837 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1838 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1839 {
1840 	struct hci_cp_create_conn *cp;
1841 	struct hci_conn *conn;
1842 
1843 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1844 
1845 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1846 	if (!cp)
1847 		return;
1848 
1849 	hci_dev_lock(hdev);
1850 
1851 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1852 
1853 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1854 
1855 	if (status) {
1856 		if (conn && conn->state == BT_CONNECT) {
1857 			if (status != 0x0c || conn->attempt > 2) {
1858 				conn->state = BT_CLOSED;
1859 				hci_connect_cfm(conn, status);
1860 				hci_conn_del(conn);
1861 			} else
1862 				conn->state = BT_CONNECT2;
1863 		}
1864 	} else {
1865 		if (!conn) {
1866 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1867 					    HCI_ROLE_MASTER);
1868 			if (!conn)
1869 				bt_dev_err(hdev, "no memory for new connection");
1870 		}
1871 	}
1872 
1873 	hci_dev_unlock(hdev);
1874 }
1875 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1876 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1877 {
1878 	struct hci_cp_add_sco *cp;
1879 	struct hci_conn *acl, *sco;
1880 	__u16 handle;
1881 
1882 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1883 
1884 	if (!status)
1885 		return;
1886 
1887 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1888 	if (!cp)
1889 		return;
1890 
1891 	handle = __le16_to_cpu(cp->handle);
1892 
1893 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1894 
1895 	hci_dev_lock(hdev);
1896 
1897 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1898 	if (acl) {
1899 		sco = acl->link;
1900 		if (sco) {
1901 			sco->state = BT_CLOSED;
1902 
1903 			hci_connect_cfm(sco, status);
1904 			hci_conn_del(sco);
1905 		}
1906 	}
1907 
1908 	hci_dev_unlock(hdev);
1909 }
1910 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1911 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1912 {
1913 	struct hci_cp_auth_requested *cp;
1914 	struct hci_conn *conn;
1915 
1916 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1917 
1918 	if (!status)
1919 		return;
1920 
1921 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1922 	if (!cp)
1923 		return;
1924 
1925 	hci_dev_lock(hdev);
1926 
1927 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1928 	if (conn) {
1929 		if (conn->state == BT_CONFIG) {
1930 			hci_connect_cfm(conn, status);
1931 			hci_conn_drop(conn);
1932 		}
1933 	}
1934 
1935 	hci_dev_unlock(hdev);
1936 }
1937 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1938 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1939 {
1940 	struct hci_cp_set_conn_encrypt *cp;
1941 	struct hci_conn *conn;
1942 
1943 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1944 
1945 	if (!status)
1946 		return;
1947 
1948 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1949 	if (!cp)
1950 		return;
1951 
1952 	hci_dev_lock(hdev);
1953 
1954 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1955 	if (conn) {
1956 		if (conn->state == BT_CONFIG) {
1957 			hci_connect_cfm(conn, status);
1958 			hci_conn_drop(conn);
1959 		}
1960 	}
1961 
1962 	hci_dev_unlock(hdev);
1963 }
1964 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1965 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1966 				    struct hci_conn *conn)
1967 {
1968 	if (conn->state != BT_CONFIG || !conn->out)
1969 		return 0;
1970 
1971 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1972 		return 0;
1973 
1974 	/* Only request authentication for SSP connections or non-SSP
1975 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1976 	 * is requested.
1977 	 */
1978 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1979 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1980 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1981 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1982 		return 0;
1983 
1984 	return 1;
1985 }
1986 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1987 static int hci_resolve_name(struct hci_dev *hdev,
1988 				   struct inquiry_entry *e)
1989 {
1990 	struct hci_cp_remote_name_req cp;
1991 
1992 	memset(&cp, 0, sizeof(cp));
1993 
1994 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1995 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1996 	cp.pscan_mode = e->data.pscan_mode;
1997 	cp.clock_offset = e->data.clock_offset;
1998 
1999 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2000 }
2001 
hci_resolve_next_name(struct hci_dev * hdev)2002 static bool hci_resolve_next_name(struct hci_dev *hdev)
2003 {
2004 	struct discovery_state *discov = &hdev->discovery;
2005 	struct inquiry_entry *e;
2006 
2007 	if (list_empty(&discov->resolve))
2008 		return false;
2009 
2010 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2011 	if (!e)
2012 		return false;
2013 
2014 	if (hci_resolve_name(hdev, e) == 0) {
2015 		e->name_state = NAME_PENDING;
2016 		return true;
2017 	}
2018 
2019 	return false;
2020 }
2021 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2022 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2023 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2024 {
2025 	struct discovery_state *discov = &hdev->discovery;
2026 	struct inquiry_entry *e;
2027 
2028 	/* Update the mgmt connected state if necessary. Be careful with
2029 	 * conn objects that exist but are not (yet) connected however.
2030 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2031 	 * considered connected.
2032 	 */
2033 	if (conn &&
2034 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2035 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2036 		mgmt_device_connected(hdev, conn, 0, name, name_len);
2037 
2038 	if (discov->state == DISCOVERY_STOPPED)
2039 		return;
2040 
2041 	if (discov->state == DISCOVERY_STOPPING)
2042 		goto discov_complete;
2043 
2044 	if (discov->state != DISCOVERY_RESOLVING)
2045 		return;
2046 
2047 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2048 	/* If the device was not found in a list of found devices names of which
2049 	 * are pending. there is no need to continue resolving a next name as it
2050 	 * will be done upon receiving another Remote Name Request Complete
2051 	 * Event */
2052 	if (!e)
2053 		return;
2054 
2055 	list_del(&e->list);
2056 	if (name) {
2057 		e->name_state = NAME_KNOWN;
2058 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2059 				 e->data.rssi, name, name_len);
2060 	} else {
2061 		e->name_state = NAME_NOT_KNOWN;
2062 	}
2063 
2064 	if (hci_resolve_next_name(hdev))
2065 		return;
2066 
2067 discov_complete:
2068 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069 }
2070 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2071 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2072 {
2073 	struct hci_cp_remote_name_req *cp;
2074 	struct hci_conn *conn;
2075 
2076 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2077 
2078 	/* If successful wait for the name req complete event before
2079 	 * checking for the need to do authentication */
2080 	if (!status)
2081 		return;
2082 
2083 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2084 	if (!cp)
2085 		return;
2086 
2087 	hci_dev_lock(hdev);
2088 
2089 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2090 
2091 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2092 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2093 
2094 	if (!conn)
2095 		goto unlock;
2096 
2097 	if (!hci_outgoing_auth_needed(hdev, conn))
2098 		goto unlock;
2099 
2100 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2101 		struct hci_cp_auth_requested auth_cp;
2102 
2103 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2104 
2105 		auth_cp.handle = __cpu_to_le16(conn->handle);
2106 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2107 			     sizeof(auth_cp), &auth_cp);
2108 	}
2109 
2110 unlock:
2111 	hci_dev_unlock(hdev);
2112 }
2113 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2114 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2115 {
2116 	struct hci_cp_read_remote_features *cp;
2117 	struct hci_conn *conn;
2118 
2119 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2120 
2121 	if (!status)
2122 		return;
2123 
2124 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2125 	if (!cp)
2126 		return;
2127 
2128 	hci_dev_lock(hdev);
2129 
2130 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2131 	if (conn) {
2132 		if (conn->state == BT_CONFIG) {
2133 			hci_connect_cfm(conn, status);
2134 			hci_conn_drop(conn);
2135 		}
2136 	}
2137 
2138 	hci_dev_unlock(hdev);
2139 }
2140 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2141 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2142 {
2143 	struct hci_cp_read_remote_ext_features *cp;
2144 	struct hci_conn *conn;
2145 
2146 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2147 
2148 	if (!status)
2149 		return;
2150 
2151 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2152 	if (!cp)
2153 		return;
2154 
2155 	hci_dev_lock(hdev);
2156 
2157 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2158 	if (conn) {
2159 		if (conn->state == BT_CONFIG) {
2160 			hci_connect_cfm(conn, status);
2161 			hci_conn_drop(conn);
2162 		}
2163 	}
2164 
2165 	hci_dev_unlock(hdev);
2166 }
2167 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2168 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2169 {
2170 	struct hci_cp_setup_sync_conn *cp;
2171 	struct hci_conn *acl, *sco;
2172 	__u16 handle;
2173 
2174 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2175 
2176 	if (!status)
2177 		return;
2178 
2179 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2180 	if (!cp)
2181 		return;
2182 
2183 	handle = __le16_to_cpu(cp->handle);
2184 
2185 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2186 
2187 	hci_dev_lock(hdev);
2188 
2189 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2190 	if (acl) {
2191 		sco = acl->link;
2192 		if (sco) {
2193 			sco->state = BT_CLOSED;
2194 
2195 			hci_connect_cfm(sco, status);
2196 			hci_conn_del(sco);
2197 		}
2198 	}
2199 
2200 	hci_dev_unlock(hdev);
2201 }
2202 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2203 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2204 {
2205 	struct hci_cp_sniff_mode *cp;
2206 	struct hci_conn *conn;
2207 
2208 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2209 
2210 	if (!status)
2211 		return;
2212 
2213 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2214 	if (!cp)
2215 		return;
2216 
2217 	hci_dev_lock(hdev);
2218 
2219 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2220 	if (conn) {
2221 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2222 
2223 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2224 			hci_sco_setup(conn, status);
2225 	}
2226 
2227 	hci_dev_unlock(hdev);
2228 }
2229 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2230 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2231 {
2232 	struct hci_cp_exit_sniff_mode *cp;
2233 	struct hci_conn *conn;
2234 
2235 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2236 
2237 	if (!status)
2238 		return;
2239 
2240 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2241 	if (!cp)
2242 		return;
2243 
2244 	hci_dev_lock(hdev);
2245 
2246 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2247 	if (conn) {
2248 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2249 
2250 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2251 			hci_sco_setup(conn, status);
2252 	}
2253 
2254 	hci_dev_unlock(hdev);
2255 }
2256 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2257 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2258 {
2259 	struct hci_cp_disconnect *cp;
2260 	struct hci_conn *conn;
2261 
2262 	if (!status)
2263 		return;
2264 
2265 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2266 	if (!cp)
2267 		return;
2268 
2269 	hci_dev_lock(hdev);
2270 
2271 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2272 	if (conn) {
2273 		u8 type = conn->type;
2274 
2275 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276 				       conn->dst_type, status);
2277 
2278 		/* If the disconnection failed for any reason, the upper layer
2279 		 * does not retry to disconnect in current implementation.
2280 		 * Hence, we need to do some basic cleanup here and re-enable
2281 		 * advertising if necessary.
2282 		 */
2283 		hci_conn_del(conn);
2284 		if (type == LE_LINK)
2285 			hci_req_reenable_advertising(hdev);
2286 	}
2287 
2288 	hci_dev_unlock(hdev);
2289 }
2290 
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2291 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2292 			      u8 peer_addr_type, u8 own_address_type,
2293 			      u8 filter_policy)
2294 {
2295 	struct hci_conn *conn;
2296 
2297 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2298 				       peer_addr_type);
2299 	if (!conn)
2300 		return;
2301 
2302 	/* When using controller based address resolution, then the new
2303 	 * address types 0x02 and 0x03 are used. These types need to be
2304 	 * converted back into either public address or random address type
2305 	 */
2306 	if (use_ll_privacy(hdev) &&
2307 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2308 		switch (own_address_type) {
2309 		case ADDR_LE_DEV_PUBLIC_RESOLVED:
2310 			own_address_type = ADDR_LE_DEV_PUBLIC;
2311 			break;
2312 		case ADDR_LE_DEV_RANDOM_RESOLVED:
2313 			own_address_type = ADDR_LE_DEV_RANDOM;
2314 			break;
2315 		}
2316 	}
2317 
2318 	/* Store the initiator and responder address information which
2319 	 * is needed for SMP. These values will not change during the
2320 	 * lifetime of the connection.
2321 	 */
2322 	conn->init_addr_type = own_address_type;
2323 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2324 		bacpy(&conn->init_addr, &hdev->random_addr);
2325 	else
2326 		bacpy(&conn->init_addr, &hdev->bdaddr);
2327 
2328 	conn->resp_addr_type = peer_addr_type;
2329 	bacpy(&conn->resp_addr, peer_addr);
2330 
2331 	/* We don't want the connection attempt to stick around
2332 	 * indefinitely since LE doesn't have a page timeout concept
2333 	 * like BR/EDR. Set a timer for any connection that doesn't use
2334 	 * the white list for connecting.
2335 	 */
2336 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2337 		queue_delayed_work(conn->hdev->workqueue,
2338 				   &conn->le_conn_timeout,
2339 				   conn->conn_timeout);
2340 }
2341 
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2342 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2343 {
2344 	struct hci_cp_le_create_conn *cp;
2345 
2346 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2347 
2348 	/* All connection failure handling is taken care of by the
2349 	 * hci_le_conn_failed function which is triggered by the HCI
2350 	 * request completion callbacks used for connecting.
2351 	 */
2352 	if (status)
2353 		return;
2354 
2355 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2356 	if (!cp)
2357 		return;
2358 
2359 	hci_dev_lock(hdev);
2360 
2361 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2362 			  cp->own_address_type, cp->filter_policy);
2363 
2364 	hci_dev_unlock(hdev);
2365 }
2366 
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2367 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2368 {
2369 	struct hci_cp_le_ext_create_conn *cp;
2370 
2371 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2372 
2373 	/* All connection failure handling is taken care of by the
2374 	 * hci_le_conn_failed function which is triggered by the HCI
2375 	 * request completion callbacks used for connecting.
2376 	 */
2377 	if (status)
2378 		return;
2379 
2380 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2381 	if (!cp)
2382 		return;
2383 
2384 	hci_dev_lock(hdev);
2385 
2386 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2387 			  cp->own_addr_type, cp->filter_policy);
2388 
2389 	hci_dev_unlock(hdev);
2390 }
2391 
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2392 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2393 {
2394 	struct hci_cp_le_read_remote_features *cp;
2395 	struct hci_conn *conn;
2396 
2397 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2398 
2399 	if (!status)
2400 		return;
2401 
2402 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2403 	if (!cp)
2404 		return;
2405 
2406 	hci_dev_lock(hdev);
2407 
2408 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2409 	if (conn) {
2410 		if (conn->state == BT_CONFIG) {
2411 			hci_connect_cfm(conn, status);
2412 			hci_conn_drop(conn);
2413 		}
2414 	}
2415 
2416 	hci_dev_unlock(hdev);
2417 }
2418 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2419 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2420 {
2421 	struct hci_cp_le_start_enc *cp;
2422 	struct hci_conn *conn;
2423 
2424 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2425 
2426 	if (!status)
2427 		return;
2428 
2429 	hci_dev_lock(hdev);
2430 
2431 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2432 	if (!cp)
2433 		goto unlock;
2434 
2435 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2436 	if (!conn)
2437 		goto unlock;
2438 
2439 	if (conn->state != BT_CONNECTED)
2440 		goto unlock;
2441 
2442 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2443 	hci_conn_drop(conn);
2444 
2445 unlock:
2446 	hci_dev_unlock(hdev);
2447 }
2448 
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2449 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2450 {
2451 	struct hci_cp_switch_role *cp;
2452 	struct hci_conn *conn;
2453 
2454 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2455 
2456 	if (!status)
2457 		return;
2458 
2459 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2460 	if (!cp)
2461 		return;
2462 
2463 	hci_dev_lock(hdev);
2464 
2465 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2466 	if (conn)
2467 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2468 
2469 	hci_dev_unlock(hdev);
2470 }
2471 
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2472 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2473 {
2474 	__u8 status = *((__u8 *) skb->data);
2475 	struct discovery_state *discov = &hdev->discovery;
2476 	struct inquiry_entry *e;
2477 
2478 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2479 
2480 	hci_conn_check_pending(hdev);
2481 
2482 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2483 		return;
2484 
2485 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2486 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2487 
2488 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2489 		return;
2490 
2491 	hci_dev_lock(hdev);
2492 
2493 	if (discov->state != DISCOVERY_FINDING)
2494 		goto unlock;
2495 
2496 	if (list_empty(&discov->resolve)) {
2497 		/* When BR/EDR inquiry is active and no LE scanning is in
2498 		 * progress, then change discovery state to indicate completion.
2499 		 *
2500 		 * When running LE scanning and BR/EDR inquiry simultaneously
2501 		 * and the LE scan already finished, then change the discovery
2502 		 * state to indicate completion.
2503 		 */
2504 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2505 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2506 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2507 		goto unlock;
2508 	}
2509 
2510 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2511 	if (e && hci_resolve_name(hdev, e) == 0) {
2512 		e->name_state = NAME_PENDING;
2513 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2514 	} else {
2515 		/* When BR/EDR inquiry is active and no LE scanning is in
2516 		 * progress, then change discovery state to indicate completion.
2517 		 *
2518 		 * When running LE scanning and BR/EDR inquiry simultaneously
2519 		 * and the LE scan already finished, then change the discovery
2520 		 * state to indicate completion.
2521 		 */
2522 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2523 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2524 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2525 	}
2526 
2527 unlock:
2528 	hci_dev_unlock(hdev);
2529 }
2530 
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2531 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2532 {
2533 	struct inquiry_data data;
2534 	struct inquiry_info *info = (void *) (skb->data + 1);
2535 	int num_rsp = *((__u8 *) skb->data);
2536 
2537 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2538 
2539 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2540 		return;
2541 
2542 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2543 		return;
2544 
2545 	hci_dev_lock(hdev);
2546 
2547 	for (; num_rsp; num_rsp--, info++) {
2548 		u32 flags;
2549 
2550 		bacpy(&data.bdaddr, &info->bdaddr);
2551 		data.pscan_rep_mode	= info->pscan_rep_mode;
2552 		data.pscan_period_mode	= info->pscan_period_mode;
2553 		data.pscan_mode		= info->pscan_mode;
2554 		memcpy(data.dev_class, info->dev_class, 3);
2555 		data.clock_offset	= info->clock_offset;
2556 		data.rssi		= HCI_RSSI_INVALID;
2557 		data.ssp_mode		= 0x00;
2558 
2559 		flags = hci_inquiry_cache_update(hdev, &data, false);
2560 
2561 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2562 				  info->dev_class, HCI_RSSI_INVALID,
2563 				  flags, NULL, 0, NULL, 0);
2564 	}
2565 
2566 	hci_dev_unlock(hdev);
2567 }
2568 
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2569 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2570 {
2571 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2572 	struct hci_conn *conn;
2573 
2574 	BT_DBG("%s", hdev->name);
2575 
2576 	hci_dev_lock(hdev);
2577 
2578 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2579 	if (!conn) {
2580 		/* Connection may not exist if auto-connected. Check the bredr
2581 		 * allowlist to see if this device is allowed to auto connect.
2582 		 * If link is an ACL type, create a connection class
2583 		 * automatically.
2584 		 *
2585 		 * Auto-connect will only occur if the event filter is
2586 		 * programmed with a given address. Right now, event filter is
2587 		 * only used during suspend.
2588 		 */
2589 		if (ev->link_type == ACL_LINK &&
2590 		    hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
2591 						      &ev->bdaddr,
2592 						      BDADDR_BREDR)) {
2593 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2594 					    HCI_ROLE_SLAVE);
2595 			if (!conn) {
2596 				bt_dev_err(hdev, "no memory for new conn");
2597 				goto unlock;
2598 			}
2599 		} else {
2600 			if (ev->link_type != SCO_LINK)
2601 				goto unlock;
2602 
2603 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2604 						       &ev->bdaddr);
2605 			if (!conn)
2606 				goto unlock;
2607 
2608 			conn->type = SCO_LINK;
2609 		}
2610 	}
2611 
2612 	if (!ev->status) {
2613 		conn->handle = __le16_to_cpu(ev->handle);
2614 
2615 		if (conn->type == ACL_LINK) {
2616 			conn->state = BT_CONFIG;
2617 			hci_conn_hold(conn);
2618 
2619 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2620 			    !hci_find_link_key(hdev, &ev->bdaddr))
2621 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2622 			else
2623 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2624 		} else
2625 			conn->state = BT_CONNECTED;
2626 
2627 		hci_debugfs_create_conn(conn);
2628 		hci_conn_add_sysfs(conn);
2629 
2630 		if (test_bit(HCI_AUTH, &hdev->flags))
2631 			set_bit(HCI_CONN_AUTH, &conn->flags);
2632 
2633 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2634 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2635 
2636 		/* Get remote features */
2637 		if (conn->type == ACL_LINK) {
2638 			struct hci_cp_read_remote_features cp;
2639 			cp.handle = ev->handle;
2640 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2641 				     sizeof(cp), &cp);
2642 
2643 			hci_req_update_scan(hdev);
2644 		}
2645 
2646 		/* Set packet type for incoming connection */
2647 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2648 			struct hci_cp_change_conn_ptype cp;
2649 			cp.handle = ev->handle;
2650 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2651 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2652 				     &cp);
2653 		}
2654 	} else {
2655 		conn->state = BT_CLOSED;
2656 		if (conn->type == ACL_LINK)
2657 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2658 					    conn->dst_type, ev->status);
2659 	}
2660 
2661 	if (conn->type == ACL_LINK)
2662 		hci_sco_setup(conn, ev->status);
2663 
2664 	if (ev->status) {
2665 		hci_connect_cfm(conn, ev->status);
2666 		hci_conn_del(conn);
2667 	} else if (ev->link_type == SCO_LINK) {
2668 		switch (conn->setting & SCO_AIRMODE_MASK) {
2669 		case SCO_AIRMODE_CVSD:
2670 			if (hdev->notify)
2671 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2672 			break;
2673 		}
2674 
2675 		hci_connect_cfm(conn, ev->status);
2676 	}
2677 
2678 unlock:
2679 	hci_dev_unlock(hdev);
2680 
2681 	hci_conn_check_pending(hdev);
2682 }
2683 
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2684 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2685 {
2686 	struct hci_cp_reject_conn_req cp;
2687 
2688 	bacpy(&cp.bdaddr, bdaddr);
2689 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2690 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2691 }
2692 
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2693 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694 {
2695 	struct hci_ev_conn_request *ev = (void *) skb->data;
2696 	int mask = hdev->link_mode;
2697 	struct inquiry_entry *ie;
2698 	struct hci_conn *conn;
2699 	__u8 flags = 0;
2700 
2701 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2702 	       ev->link_type);
2703 
2704 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2705 				      &flags);
2706 
2707 	if (!(mask & HCI_LM_ACCEPT)) {
2708 		hci_reject_conn(hdev, &ev->bdaddr);
2709 		return;
2710 	}
2711 
2712 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2713 				   BDADDR_BREDR)) {
2714 		hci_reject_conn(hdev, &ev->bdaddr);
2715 		return;
2716 	}
2717 
2718 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2719 	 * connection. These features are only touched through mgmt so
2720 	 * only do the checks if HCI_MGMT is set.
2721 	 */
2722 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2723 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2724 	    !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
2725 					       BDADDR_BREDR)) {
2726 		hci_reject_conn(hdev, &ev->bdaddr);
2727 		return;
2728 	}
2729 
2730 	/* Connection accepted */
2731 
2732 	hci_dev_lock(hdev);
2733 
2734 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2735 	if (ie)
2736 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2737 
2738 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2739 			&ev->bdaddr);
2740 	if (!conn) {
2741 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2742 				    HCI_ROLE_SLAVE);
2743 		if (!conn) {
2744 			bt_dev_err(hdev, "no memory for new connection");
2745 			hci_dev_unlock(hdev);
2746 			return;
2747 		}
2748 	}
2749 
2750 	memcpy(conn->dev_class, ev->dev_class, 3);
2751 
2752 	hci_dev_unlock(hdev);
2753 
2754 	if (ev->link_type == ACL_LINK ||
2755 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2756 		struct hci_cp_accept_conn_req cp;
2757 		conn->state = BT_CONNECT;
2758 
2759 		bacpy(&cp.bdaddr, &ev->bdaddr);
2760 
2761 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2762 			cp.role = 0x00; /* Become master */
2763 		else
2764 			cp.role = 0x01; /* Remain slave */
2765 
2766 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2767 	} else if (!(flags & HCI_PROTO_DEFER)) {
2768 		struct hci_cp_accept_sync_conn_req cp;
2769 		conn->state = BT_CONNECT;
2770 
2771 		bacpy(&cp.bdaddr, &ev->bdaddr);
2772 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2773 
2774 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2775 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2776 		cp.max_latency    = cpu_to_le16(0xffff);
2777 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2778 		cp.retrans_effort = 0xff;
2779 
2780 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2781 			     &cp);
2782 	} else {
2783 		conn->state = BT_CONNECT2;
2784 		hci_connect_cfm(conn, 0);
2785 	}
2786 }
2787 
hci_to_mgmt_reason(u8 err)2788 static u8 hci_to_mgmt_reason(u8 err)
2789 {
2790 	switch (err) {
2791 	case HCI_ERROR_CONNECTION_TIMEOUT:
2792 		return MGMT_DEV_DISCONN_TIMEOUT;
2793 	case HCI_ERROR_REMOTE_USER_TERM:
2794 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2795 	case HCI_ERROR_REMOTE_POWER_OFF:
2796 		return MGMT_DEV_DISCONN_REMOTE;
2797 	case HCI_ERROR_LOCAL_HOST_TERM:
2798 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2799 	default:
2800 		return MGMT_DEV_DISCONN_UNKNOWN;
2801 	}
2802 }
2803 
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2804 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2805 {
2806 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2807 	u8 reason;
2808 	struct hci_conn_params *params;
2809 	struct hci_conn *conn;
2810 	bool mgmt_connected;
2811 	u8 type;
2812 
2813 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2814 
2815 	hci_dev_lock(hdev);
2816 
2817 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2818 	if (!conn)
2819 		goto unlock;
2820 
2821 	if (ev->status) {
2822 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2823 				       conn->dst_type, ev->status);
2824 		goto unlock;
2825 	}
2826 
2827 	conn->state = BT_CLOSED;
2828 
2829 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2830 
2831 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2832 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2833 	else
2834 		reason = hci_to_mgmt_reason(ev->reason);
2835 
2836 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2837 				reason, mgmt_connected);
2838 
2839 	if (conn->type == ACL_LINK) {
2840 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2841 			hci_remove_link_key(hdev, &conn->dst);
2842 
2843 		hci_req_update_scan(hdev);
2844 	}
2845 
2846 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2847 	if (params) {
2848 		switch (params->auto_connect) {
2849 		case HCI_AUTO_CONN_LINK_LOSS:
2850 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2851 				break;
2852 			fallthrough;
2853 
2854 		case HCI_AUTO_CONN_DIRECT:
2855 		case HCI_AUTO_CONN_ALWAYS:
2856 			list_del_init(&params->action);
2857 			list_add(&params->action, &hdev->pend_le_conns);
2858 			hci_update_background_scan(hdev);
2859 			break;
2860 
2861 		default:
2862 			break;
2863 		}
2864 	}
2865 
2866 	type = conn->type;
2867 
2868 	hci_disconn_cfm(conn, ev->reason);
2869 	hci_conn_del(conn);
2870 
2871 	/* The suspend notifier is waiting for all devices to disconnect so
2872 	 * clear the bit from pending tasks and inform the wait queue.
2873 	 */
2874 	if (list_empty(&hdev->conn_hash.list) &&
2875 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2876 		wake_up(&hdev->suspend_wait_q);
2877 	}
2878 
2879 	/* Re-enable advertising if necessary, since it might
2880 	 * have been disabled by the connection. From the
2881 	 * HCI_LE_Set_Advertise_Enable command description in
2882 	 * the core specification (v4.0):
2883 	 * "The Controller shall continue advertising until the Host
2884 	 * issues an LE_Set_Advertise_Enable command with
2885 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2886 	 * or until a connection is created or until the Advertising
2887 	 * is timed out due to Directed Advertising."
2888 	 */
2889 	if (type == LE_LINK)
2890 		hci_req_reenable_advertising(hdev);
2891 
2892 unlock:
2893 	hci_dev_unlock(hdev);
2894 }
2895 
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2896 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2897 {
2898 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2899 	struct hci_conn *conn;
2900 
2901 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2902 
2903 	hci_dev_lock(hdev);
2904 
2905 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2906 	if (!conn)
2907 		goto unlock;
2908 
2909 	if (!ev->status) {
2910 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2911 
2912 		if (!hci_conn_ssp_enabled(conn) &&
2913 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2914 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2915 		} else {
2916 			set_bit(HCI_CONN_AUTH, &conn->flags);
2917 			conn->sec_level = conn->pending_sec_level;
2918 		}
2919 	} else {
2920 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2921 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2922 
2923 		mgmt_auth_failed(conn, ev->status);
2924 	}
2925 
2926 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2927 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2928 
2929 	if (conn->state == BT_CONFIG) {
2930 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2931 			struct hci_cp_set_conn_encrypt cp;
2932 			cp.handle  = ev->handle;
2933 			cp.encrypt = 0x01;
2934 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2935 				     &cp);
2936 		} else {
2937 			conn->state = BT_CONNECTED;
2938 			hci_connect_cfm(conn, ev->status);
2939 			hci_conn_drop(conn);
2940 		}
2941 	} else {
2942 		hci_auth_cfm(conn, ev->status);
2943 
2944 		hci_conn_hold(conn);
2945 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2946 		hci_conn_drop(conn);
2947 	}
2948 
2949 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2950 		if (!ev->status) {
2951 			struct hci_cp_set_conn_encrypt cp;
2952 			cp.handle  = ev->handle;
2953 			cp.encrypt = 0x01;
2954 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2955 				     &cp);
2956 		} else {
2957 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2958 			hci_encrypt_cfm(conn, ev->status);
2959 		}
2960 	}
2961 
2962 unlock:
2963 	hci_dev_unlock(hdev);
2964 }
2965 
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2966 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2967 {
2968 	struct hci_ev_remote_name *ev = (void *) skb->data;
2969 	struct hci_conn *conn;
2970 
2971 	BT_DBG("%s", hdev->name);
2972 
2973 	hci_conn_check_pending(hdev);
2974 
2975 	hci_dev_lock(hdev);
2976 
2977 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2978 
2979 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2980 		goto check_auth;
2981 
2982 	if (ev->status == 0)
2983 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2984 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2985 	else
2986 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2987 
2988 check_auth:
2989 	if (!conn)
2990 		goto unlock;
2991 
2992 	if (!hci_outgoing_auth_needed(hdev, conn))
2993 		goto unlock;
2994 
2995 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2996 		struct hci_cp_auth_requested cp;
2997 
2998 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2999 
3000 		cp.handle = __cpu_to_le16(conn->handle);
3001 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3002 	}
3003 
3004 unlock:
3005 	hci_dev_unlock(hdev);
3006 }
3007 
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3008 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3009 				       u16 opcode, struct sk_buff *skb)
3010 {
3011 	const struct hci_rp_read_enc_key_size *rp;
3012 	struct hci_conn *conn;
3013 	u16 handle;
3014 
3015 	BT_DBG("%s status 0x%02x", hdev->name, status);
3016 
3017 	if (!skb || skb->len < sizeof(*rp)) {
3018 		bt_dev_err(hdev, "invalid read key size response");
3019 		return;
3020 	}
3021 
3022 	rp = (void *)skb->data;
3023 	handle = le16_to_cpu(rp->handle);
3024 
3025 	hci_dev_lock(hdev);
3026 
3027 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3028 	if (!conn)
3029 		goto unlock;
3030 
3031 	/* While unexpected, the read_enc_key_size command may fail. The most
3032 	 * secure approach is to then assume the key size is 0 to force a
3033 	 * disconnection.
3034 	 */
3035 	if (rp->status) {
3036 		bt_dev_err(hdev, "failed to read key size for handle %u",
3037 			   handle);
3038 		conn->enc_key_size = 0;
3039 	} else {
3040 		conn->enc_key_size = rp->key_size;
3041 	}
3042 
3043 	hci_encrypt_cfm(conn, 0);
3044 
3045 unlock:
3046 	hci_dev_unlock(hdev);
3047 }
3048 
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3049 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3050 {
3051 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3052 	struct hci_conn *conn;
3053 
3054 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3055 
3056 	hci_dev_lock(hdev);
3057 
3058 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3059 	if (!conn)
3060 		goto unlock;
3061 
3062 	if (!ev->status) {
3063 		if (ev->encrypt) {
3064 			/* Encryption implies authentication */
3065 			set_bit(HCI_CONN_AUTH, &conn->flags);
3066 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3067 			conn->sec_level = conn->pending_sec_level;
3068 
3069 			/* P-256 authentication key implies FIPS */
3070 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3071 				set_bit(HCI_CONN_FIPS, &conn->flags);
3072 
3073 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3074 			    conn->type == LE_LINK)
3075 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3076 		} else {
3077 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3078 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3079 		}
3080 	}
3081 
3082 	/* We should disregard the current RPA and generate a new one
3083 	 * whenever the encryption procedure fails.
3084 	 */
3085 	if (ev->status && conn->type == LE_LINK) {
3086 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3087 		hci_adv_instances_set_rpa_expired(hdev, true);
3088 	}
3089 
3090 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3091 
3092 	/* Check link security requirements are met */
3093 	if (!hci_conn_check_link_mode(conn))
3094 		ev->status = HCI_ERROR_AUTH_FAILURE;
3095 
3096 	if (ev->status && conn->state == BT_CONNECTED) {
3097 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3098 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3099 
3100 		/* Notify upper layers so they can cleanup before
3101 		 * disconnecting.
3102 		 */
3103 		hci_encrypt_cfm(conn, ev->status);
3104 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3105 		hci_conn_drop(conn);
3106 		goto unlock;
3107 	}
3108 
3109 	/* Try reading the encryption key size for encrypted ACL links */
3110 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3111 		struct hci_cp_read_enc_key_size cp;
3112 		struct hci_request req;
3113 
3114 		/* Only send HCI_Read_Encryption_Key_Size if the
3115 		 * controller really supports it. If it doesn't, assume
3116 		 * the default size (16).
3117 		 */
3118 		if (!(hdev->commands[20] & 0x10)) {
3119 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3120 			goto notify;
3121 		}
3122 
3123 		hci_req_init(&req, hdev);
3124 
3125 		cp.handle = cpu_to_le16(conn->handle);
3126 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3127 
3128 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3129 			bt_dev_err(hdev, "sending read key size failed");
3130 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3131 			goto notify;
3132 		}
3133 
3134 		goto unlock;
3135 	}
3136 
3137 	/* Set the default Authenticated Payload Timeout after
3138 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3139 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3140 	 * sent when the link is active and Encryption is enabled, the conn
3141 	 * type can be either LE or ACL and controller must support LMP Ping.
3142 	 * Ensure for AES-CCM encryption as well.
3143 	 */
3144 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3145 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3146 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3147 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3148 		struct hci_cp_write_auth_payload_to cp;
3149 
3150 		cp.handle = cpu_to_le16(conn->handle);
3151 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3152 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3153 			     sizeof(cp), &cp);
3154 	}
3155 
3156 notify:
3157 	hci_encrypt_cfm(conn, ev->status);
3158 
3159 unlock:
3160 	hci_dev_unlock(hdev);
3161 }
3162 
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3163 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3164 					     struct sk_buff *skb)
3165 {
3166 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3167 	struct hci_conn *conn;
3168 
3169 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3170 
3171 	hci_dev_lock(hdev);
3172 
3173 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3174 	if (conn) {
3175 		if (!ev->status)
3176 			set_bit(HCI_CONN_SECURE, &conn->flags);
3177 
3178 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3179 
3180 		hci_key_change_cfm(conn, ev->status);
3181 	}
3182 
3183 	hci_dev_unlock(hdev);
3184 }
3185 
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3186 static void hci_remote_features_evt(struct hci_dev *hdev,
3187 				    struct sk_buff *skb)
3188 {
3189 	struct hci_ev_remote_features *ev = (void *) skb->data;
3190 	struct hci_conn *conn;
3191 
3192 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3197 	if (!conn)
3198 		goto unlock;
3199 
3200 	if (!ev->status)
3201 		memcpy(conn->features[0], ev->features, 8);
3202 
3203 	if (conn->state != BT_CONFIG)
3204 		goto unlock;
3205 
3206 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3207 	    lmp_ext_feat_capable(conn)) {
3208 		struct hci_cp_read_remote_ext_features cp;
3209 		cp.handle = ev->handle;
3210 		cp.page = 0x01;
3211 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3212 			     sizeof(cp), &cp);
3213 		goto unlock;
3214 	}
3215 
3216 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3217 		struct hci_cp_remote_name_req cp;
3218 		memset(&cp, 0, sizeof(cp));
3219 		bacpy(&cp.bdaddr, &conn->dst);
3220 		cp.pscan_rep_mode = 0x02;
3221 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3222 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3223 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3224 
3225 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3226 		conn->state = BT_CONNECTED;
3227 		hci_connect_cfm(conn, ev->status);
3228 		hci_conn_drop(conn);
3229 	}
3230 
3231 unlock:
3232 	hci_dev_unlock(hdev);
3233 }
3234 
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3235 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3236 				 u16 *opcode, u8 *status,
3237 				 hci_req_complete_t *req_complete,
3238 				 hci_req_complete_skb_t *req_complete_skb)
3239 {
3240 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3241 
3242 	*opcode = __le16_to_cpu(ev->opcode);
3243 	*status = skb->data[sizeof(*ev)];
3244 
3245 	skb_pull(skb, sizeof(*ev));
3246 
3247 	switch (*opcode) {
3248 	case HCI_OP_INQUIRY_CANCEL:
3249 		hci_cc_inquiry_cancel(hdev, skb, status);
3250 		break;
3251 
3252 	case HCI_OP_PERIODIC_INQ:
3253 		hci_cc_periodic_inq(hdev, skb);
3254 		break;
3255 
3256 	case HCI_OP_EXIT_PERIODIC_INQ:
3257 		hci_cc_exit_periodic_inq(hdev, skb);
3258 		break;
3259 
3260 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3261 		hci_cc_remote_name_req_cancel(hdev, skb);
3262 		break;
3263 
3264 	case HCI_OP_ROLE_DISCOVERY:
3265 		hci_cc_role_discovery(hdev, skb);
3266 		break;
3267 
3268 	case HCI_OP_READ_LINK_POLICY:
3269 		hci_cc_read_link_policy(hdev, skb);
3270 		break;
3271 
3272 	case HCI_OP_WRITE_LINK_POLICY:
3273 		hci_cc_write_link_policy(hdev, skb);
3274 		break;
3275 
3276 	case HCI_OP_READ_DEF_LINK_POLICY:
3277 		hci_cc_read_def_link_policy(hdev, skb);
3278 		break;
3279 
3280 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3281 		hci_cc_write_def_link_policy(hdev, skb);
3282 		break;
3283 
3284 	case HCI_OP_RESET:
3285 		hci_cc_reset(hdev, skb);
3286 		break;
3287 
3288 	case HCI_OP_READ_STORED_LINK_KEY:
3289 		hci_cc_read_stored_link_key(hdev, skb);
3290 		break;
3291 
3292 	case HCI_OP_DELETE_STORED_LINK_KEY:
3293 		hci_cc_delete_stored_link_key(hdev, skb);
3294 		break;
3295 
3296 	case HCI_OP_WRITE_LOCAL_NAME:
3297 		hci_cc_write_local_name(hdev, skb);
3298 		break;
3299 
3300 	case HCI_OP_READ_LOCAL_NAME:
3301 		hci_cc_read_local_name(hdev, skb);
3302 		break;
3303 
3304 	case HCI_OP_WRITE_AUTH_ENABLE:
3305 		hci_cc_write_auth_enable(hdev, skb);
3306 		break;
3307 
3308 	case HCI_OP_WRITE_ENCRYPT_MODE:
3309 		hci_cc_write_encrypt_mode(hdev, skb);
3310 		break;
3311 
3312 	case HCI_OP_WRITE_SCAN_ENABLE:
3313 		hci_cc_write_scan_enable(hdev, skb);
3314 		break;
3315 
3316 	case HCI_OP_READ_CLASS_OF_DEV:
3317 		hci_cc_read_class_of_dev(hdev, skb);
3318 		break;
3319 
3320 	case HCI_OP_WRITE_CLASS_OF_DEV:
3321 		hci_cc_write_class_of_dev(hdev, skb);
3322 		break;
3323 
3324 	case HCI_OP_READ_VOICE_SETTING:
3325 		hci_cc_read_voice_setting(hdev, skb);
3326 		break;
3327 
3328 	case HCI_OP_WRITE_VOICE_SETTING:
3329 		hci_cc_write_voice_setting(hdev, skb);
3330 		break;
3331 
3332 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3333 		hci_cc_read_num_supported_iac(hdev, skb);
3334 		break;
3335 
3336 	case HCI_OP_WRITE_SSP_MODE:
3337 		hci_cc_write_ssp_mode(hdev, skb);
3338 		break;
3339 
3340 	case HCI_OP_WRITE_SC_SUPPORT:
3341 		hci_cc_write_sc_support(hdev, skb);
3342 		break;
3343 
3344 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3345 		hci_cc_read_auth_payload_timeout(hdev, skb);
3346 		break;
3347 
3348 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3349 		hci_cc_write_auth_payload_timeout(hdev, skb);
3350 		break;
3351 
3352 	case HCI_OP_READ_LOCAL_VERSION:
3353 		hci_cc_read_local_version(hdev, skb);
3354 		break;
3355 
3356 	case HCI_OP_READ_LOCAL_COMMANDS:
3357 		hci_cc_read_local_commands(hdev, skb);
3358 		break;
3359 
3360 	case HCI_OP_READ_LOCAL_FEATURES:
3361 		hci_cc_read_local_features(hdev, skb);
3362 		break;
3363 
3364 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3365 		hci_cc_read_local_ext_features(hdev, skb);
3366 		break;
3367 
3368 	case HCI_OP_READ_BUFFER_SIZE:
3369 		hci_cc_read_buffer_size(hdev, skb);
3370 		break;
3371 
3372 	case HCI_OP_READ_BD_ADDR:
3373 		hci_cc_read_bd_addr(hdev, skb);
3374 		break;
3375 
3376 	case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3377 		hci_cc_read_local_pairing_opts(hdev, skb);
3378 		break;
3379 
3380 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3381 		hci_cc_read_page_scan_activity(hdev, skb);
3382 		break;
3383 
3384 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3385 		hci_cc_write_page_scan_activity(hdev, skb);
3386 		break;
3387 
3388 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3389 		hci_cc_read_page_scan_type(hdev, skb);
3390 		break;
3391 
3392 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3393 		hci_cc_write_page_scan_type(hdev, skb);
3394 		break;
3395 
3396 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3397 		hci_cc_read_data_block_size(hdev, skb);
3398 		break;
3399 
3400 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3401 		hci_cc_read_flow_control_mode(hdev, skb);
3402 		break;
3403 
3404 	case HCI_OP_READ_LOCAL_AMP_INFO:
3405 		hci_cc_read_local_amp_info(hdev, skb);
3406 		break;
3407 
3408 	case HCI_OP_READ_CLOCK:
3409 		hci_cc_read_clock(hdev, skb);
3410 		break;
3411 
3412 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3413 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3414 		break;
3415 
3416 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3417 		hci_cc_read_def_err_data_reporting(hdev, skb);
3418 		break;
3419 
3420 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3421 		hci_cc_write_def_err_data_reporting(hdev, skb);
3422 		break;
3423 
3424 	case HCI_OP_PIN_CODE_REPLY:
3425 		hci_cc_pin_code_reply(hdev, skb);
3426 		break;
3427 
3428 	case HCI_OP_PIN_CODE_NEG_REPLY:
3429 		hci_cc_pin_code_neg_reply(hdev, skb);
3430 		break;
3431 
3432 	case HCI_OP_READ_LOCAL_OOB_DATA:
3433 		hci_cc_read_local_oob_data(hdev, skb);
3434 		break;
3435 
3436 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3437 		hci_cc_read_local_oob_ext_data(hdev, skb);
3438 		break;
3439 
3440 	case HCI_OP_LE_READ_BUFFER_SIZE:
3441 		hci_cc_le_read_buffer_size(hdev, skb);
3442 		break;
3443 
3444 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3445 		hci_cc_le_read_local_features(hdev, skb);
3446 		break;
3447 
3448 	case HCI_OP_LE_READ_ADV_TX_POWER:
3449 		hci_cc_le_read_adv_tx_power(hdev, skb);
3450 		break;
3451 
3452 	case HCI_OP_USER_CONFIRM_REPLY:
3453 		hci_cc_user_confirm_reply(hdev, skb);
3454 		break;
3455 
3456 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3457 		hci_cc_user_confirm_neg_reply(hdev, skb);
3458 		break;
3459 
3460 	case HCI_OP_USER_PASSKEY_REPLY:
3461 		hci_cc_user_passkey_reply(hdev, skb);
3462 		break;
3463 
3464 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3465 		hci_cc_user_passkey_neg_reply(hdev, skb);
3466 		break;
3467 
3468 	case HCI_OP_LE_SET_RANDOM_ADDR:
3469 		hci_cc_le_set_random_addr(hdev, skb);
3470 		break;
3471 
3472 	case HCI_OP_LE_SET_ADV_ENABLE:
3473 		hci_cc_le_set_adv_enable(hdev, skb);
3474 		break;
3475 
3476 	case HCI_OP_LE_SET_SCAN_PARAM:
3477 		hci_cc_le_set_scan_param(hdev, skb);
3478 		break;
3479 
3480 	case HCI_OP_LE_SET_SCAN_ENABLE:
3481 		hci_cc_le_set_scan_enable(hdev, skb);
3482 		break;
3483 
3484 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3485 		hci_cc_le_read_white_list_size(hdev, skb);
3486 		break;
3487 
3488 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3489 		hci_cc_le_clear_white_list(hdev, skb);
3490 		break;
3491 
3492 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3493 		hci_cc_le_add_to_white_list(hdev, skb);
3494 		break;
3495 
3496 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3497 		hci_cc_le_del_from_white_list(hdev, skb);
3498 		break;
3499 
3500 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3501 		hci_cc_le_read_supported_states(hdev, skb);
3502 		break;
3503 
3504 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3505 		hci_cc_le_read_def_data_len(hdev, skb);
3506 		break;
3507 
3508 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3509 		hci_cc_le_write_def_data_len(hdev, skb);
3510 		break;
3511 
3512 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3513 		hci_cc_le_add_to_resolv_list(hdev, skb);
3514 		break;
3515 
3516 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3517 		hci_cc_le_del_from_resolv_list(hdev, skb);
3518 		break;
3519 
3520 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3521 		hci_cc_le_clear_resolv_list(hdev, skb);
3522 		break;
3523 
3524 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3525 		hci_cc_le_read_resolv_list_size(hdev, skb);
3526 		break;
3527 
3528 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3529 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3530 		break;
3531 
3532 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3533 		hci_cc_le_read_max_data_len(hdev, skb);
3534 		break;
3535 
3536 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3537 		hci_cc_write_le_host_supported(hdev, skb);
3538 		break;
3539 
3540 	case HCI_OP_LE_SET_ADV_PARAM:
3541 		hci_cc_set_adv_param(hdev, skb);
3542 		break;
3543 
3544 	case HCI_OP_READ_RSSI:
3545 		hci_cc_read_rssi(hdev, skb);
3546 		break;
3547 
3548 	case HCI_OP_READ_TX_POWER:
3549 		hci_cc_read_tx_power(hdev, skb);
3550 		break;
3551 
3552 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3553 		hci_cc_write_ssp_debug_mode(hdev, skb);
3554 		break;
3555 
3556 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3557 		hci_cc_le_set_ext_scan_param(hdev, skb);
3558 		break;
3559 
3560 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3561 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3562 		break;
3563 
3564 	case HCI_OP_LE_SET_DEFAULT_PHY:
3565 		hci_cc_le_set_default_phy(hdev, skb);
3566 		break;
3567 
3568 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3569 		hci_cc_le_read_num_adv_sets(hdev, skb);
3570 		break;
3571 
3572 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3573 		hci_cc_set_ext_adv_param(hdev, skb);
3574 		break;
3575 
3576 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3577 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3578 		break;
3579 
3580 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3581 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3582 		break;
3583 
3584 	default:
3585 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3586 		break;
3587 	}
3588 
3589 	if (*opcode != HCI_OP_NOP)
3590 		cancel_delayed_work(&hdev->cmd_timer);
3591 
3592 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3593 		atomic_set(&hdev->cmd_cnt, 1);
3594 
3595 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3596 			     req_complete_skb);
3597 
3598 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3599 		bt_dev_err(hdev,
3600 			   "unexpected event for opcode 0x%4.4x", *opcode);
3601 		return;
3602 	}
3603 
3604 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3605 		queue_work(hdev->workqueue, &hdev->cmd_work);
3606 }
3607 
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3608 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3609 			       u16 *opcode, u8 *status,
3610 			       hci_req_complete_t *req_complete,
3611 			       hci_req_complete_skb_t *req_complete_skb)
3612 {
3613 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3614 
3615 	skb_pull(skb, sizeof(*ev));
3616 
3617 	*opcode = __le16_to_cpu(ev->opcode);
3618 	*status = ev->status;
3619 
3620 	switch (*opcode) {
3621 	case HCI_OP_INQUIRY:
3622 		hci_cs_inquiry(hdev, ev->status);
3623 		break;
3624 
3625 	case HCI_OP_CREATE_CONN:
3626 		hci_cs_create_conn(hdev, ev->status);
3627 		break;
3628 
3629 	case HCI_OP_DISCONNECT:
3630 		hci_cs_disconnect(hdev, ev->status);
3631 		break;
3632 
3633 	case HCI_OP_ADD_SCO:
3634 		hci_cs_add_sco(hdev, ev->status);
3635 		break;
3636 
3637 	case HCI_OP_AUTH_REQUESTED:
3638 		hci_cs_auth_requested(hdev, ev->status);
3639 		break;
3640 
3641 	case HCI_OP_SET_CONN_ENCRYPT:
3642 		hci_cs_set_conn_encrypt(hdev, ev->status);
3643 		break;
3644 
3645 	case HCI_OP_REMOTE_NAME_REQ:
3646 		hci_cs_remote_name_req(hdev, ev->status);
3647 		break;
3648 
3649 	case HCI_OP_READ_REMOTE_FEATURES:
3650 		hci_cs_read_remote_features(hdev, ev->status);
3651 		break;
3652 
3653 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3654 		hci_cs_read_remote_ext_features(hdev, ev->status);
3655 		break;
3656 
3657 	case HCI_OP_SETUP_SYNC_CONN:
3658 		hci_cs_setup_sync_conn(hdev, ev->status);
3659 		break;
3660 
3661 	case HCI_OP_SNIFF_MODE:
3662 		hci_cs_sniff_mode(hdev, ev->status);
3663 		break;
3664 
3665 	case HCI_OP_EXIT_SNIFF_MODE:
3666 		hci_cs_exit_sniff_mode(hdev, ev->status);
3667 		break;
3668 
3669 	case HCI_OP_SWITCH_ROLE:
3670 		hci_cs_switch_role(hdev, ev->status);
3671 		break;
3672 
3673 	case HCI_OP_LE_CREATE_CONN:
3674 		hci_cs_le_create_conn(hdev, ev->status);
3675 		break;
3676 
3677 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3678 		hci_cs_le_read_remote_features(hdev, ev->status);
3679 		break;
3680 
3681 	case HCI_OP_LE_START_ENC:
3682 		hci_cs_le_start_enc(hdev, ev->status);
3683 		break;
3684 
3685 	case HCI_OP_LE_EXT_CREATE_CONN:
3686 		hci_cs_le_ext_create_conn(hdev, ev->status);
3687 		break;
3688 
3689 	default:
3690 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3691 		break;
3692 	}
3693 
3694 	if (*opcode != HCI_OP_NOP)
3695 		cancel_delayed_work(&hdev->cmd_timer);
3696 
3697 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3698 		atomic_set(&hdev->cmd_cnt, 1);
3699 
3700 	/* Indicate request completion if the command failed. Also, if
3701 	 * we're not waiting for a special event and we get a success
3702 	 * command status we should try to flag the request as completed
3703 	 * (since for this kind of commands there will not be a command
3704 	 * complete event).
3705 	 */
3706 	if (ev->status ||
3707 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3708 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3709 				     req_complete_skb);
3710 
3711 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3712 		bt_dev_err(hdev,
3713 			   "unexpected event for opcode 0x%4.4x", *opcode);
3714 		return;
3715 	}
3716 
3717 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3718 		queue_work(hdev->workqueue, &hdev->cmd_work);
3719 }
3720 
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3721 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3722 {
3723 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3724 
3725 	hdev->hw_error_code = ev->code;
3726 
3727 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3728 }
3729 
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3730 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3731 {
3732 	struct hci_ev_role_change *ev = (void *) skb->data;
3733 	struct hci_conn *conn;
3734 
3735 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3736 
3737 	hci_dev_lock(hdev);
3738 
3739 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3740 	if (conn) {
3741 		if (!ev->status)
3742 			conn->role = ev->role;
3743 
3744 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3745 
3746 		hci_role_switch_cfm(conn, ev->status, ev->role);
3747 	}
3748 
3749 	hci_dev_unlock(hdev);
3750 }
3751 
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3752 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3753 {
3754 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3755 	int i;
3756 
3757 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3758 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3759 		return;
3760 	}
3761 
3762 	if (skb->len < sizeof(*ev) ||
3763 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3764 		BT_DBG("%s bad parameters", hdev->name);
3765 		return;
3766 	}
3767 
3768 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3769 
3770 	for (i = 0; i < ev->num_hndl; i++) {
3771 		struct hci_comp_pkts_info *info = &ev->handles[i];
3772 		struct hci_conn *conn;
3773 		__u16  handle, count;
3774 
3775 		handle = __le16_to_cpu(info->handle);
3776 		count  = __le16_to_cpu(info->count);
3777 
3778 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3779 		if (!conn)
3780 			continue;
3781 
3782 		conn->sent -= count;
3783 
3784 		switch (conn->type) {
3785 		case ACL_LINK:
3786 			hdev->acl_cnt += count;
3787 			if (hdev->acl_cnt > hdev->acl_pkts)
3788 				hdev->acl_cnt = hdev->acl_pkts;
3789 			break;
3790 
3791 		case LE_LINK:
3792 			if (hdev->le_pkts) {
3793 				hdev->le_cnt += count;
3794 				if (hdev->le_cnt > hdev->le_pkts)
3795 					hdev->le_cnt = hdev->le_pkts;
3796 			} else {
3797 				hdev->acl_cnt += count;
3798 				if (hdev->acl_cnt > hdev->acl_pkts)
3799 					hdev->acl_cnt = hdev->acl_pkts;
3800 			}
3801 			break;
3802 
3803 		case SCO_LINK:
3804 			hdev->sco_cnt += count;
3805 			if (hdev->sco_cnt > hdev->sco_pkts)
3806 				hdev->sco_cnt = hdev->sco_pkts;
3807 			break;
3808 
3809 		default:
3810 			bt_dev_err(hdev, "unknown type %d conn %p",
3811 				   conn->type, conn);
3812 			break;
3813 		}
3814 	}
3815 
3816 	queue_work(hdev->workqueue, &hdev->tx_work);
3817 }
3818 
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3819 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3820 						 __u16 handle)
3821 {
3822 	struct hci_chan *chan;
3823 
3824 	switch (hdev->dev_type) {
3825 	case HCI_PRIMARY:
3826 		return hci_conn_hash_lookup_handle(hdev, handle);
3827 	case HCI_AMP:
3828 		chan = hci_chan_lookup_handle(hdev, handle);
3829 		if (chan)
3830 			return chan->conn;
3831 		break;
3832 	default:
3833 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3834 		break;
3835 	}
3836 
3837 	return NULL;
3838 }
3839 
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3840 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3841 {
3842 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3843 	int i;
3844 
3845 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3846 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3847 		return;
3848 	}
3849 
3850 	if (skb->len < sizeof(*ev) ||
3851 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3852 		BT_DBG("%s bad parameters", hdev->name);
3853 		return;
3854 	}
3855 
3856 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3857 	       ev->num_hndl);
3858 
3859 	for (i = 0; i < ev->num_hndl; i++) {
3860 		struct hci_comp_blocks_info *info = &ev->handles[i];
3861 		struct hci_conn *conn = NULL;
3862 		__u16  handle, block_count;
3863 
3864 		handle = __le16_to_cpu(info->handle);
3865 		block_count = __le16_to_cpu(info->blocks);
3866 
3867 		conn = __hci_conn_lookup_handle(hdev, handle);
3868 		if (!conn)
3869 			continue;
3870 
3871 		conn->sent -= block_count;
3872 
3873 		switch (conn->type) {
3874 		case ACL_LINK:
3875 		case AMP_LINK:
3876 			hdev->block_cnt += block_count;
3877 			if (hdev->block_cnt > hdev->num_blocks)
3878 				hdev->block_cnt = hdev->num_blocks;
3879 			break;
3880 
3881 		default:
3882 			bt_dev_err(hdev, "unknown type %d conn %p",
3883 				   conn->type, conn);
3884 			break;
3885 		}
3886 	}
3887 
3888 	queue_work(hdev->workqueue, &hdev->tx_work);
3889 }
3890 
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3891 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3892 {
3893 	struct hci_ev_mode_change *ev = (void *) skb->data;
3894 	struct hci_conn *conn;
3895 
3896 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3897 
3898 	hci_dev_lock(hdev);
3899 
3900 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3901 	if (conn) {
3902 		conn->mode = ev->mode;
3903 
3904 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3905 					&conn->flags)) {
3906 			if (conn->mode == HCI_CM_ACTIVE)
3907 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3908 			else
3909 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3910 		}
3911 
3912 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3913 			hci_sco_setup(conn, ev->status);
3914 	}
3915 
3916 	hci_dev_unlock(hdev);
3917 }
3918 
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3919 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3920 {
3921 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3922 	struct hci_conn *conn;
3923 
3924 	BT_DBG("%s", hdev->name);
3925 
3926 	hci_dev_lock(hdev);
3927 
3928 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3929 	if (!conn)
3930 		goto unlock;
3931 
3932 	if (conn->state == BT_CONNECTED) {
3933 		hci_conn_hold(conn);
3934 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3935 		hci_conn_drop(conn);
3936 	}
3937 
3938 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3939 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3940 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3941 			     sizeof(ev->bdaddr), &ev->bdaddr);
3942 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3943 		u8 secure;
3944 
3945 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3946 			secure = 1;
3947 		else
3948 			secure = 0;
3949 
3950 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3951 	}
3952 
3953 unlock:
3954 	hci_dev_unlock(hdev);
3955 }
3956 
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)3957 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3958 {
3959 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3960 		return;
3961 
3962 	conn->pin_length = pin_len;
3963 	conn->key_type = key_type;
3964 
3965 	switch (key_type) {
3966 	case HCI_LK_LOCAL_UNIT:
3967 	case HCI_LK_REMOTE_UNIT:
3968 	case HCI_LK_DEBUG_COMBINATION:
3969 		return;
3970 	case HCI_LK_COMBINATION:
3971 		if (pin_len == 16)
3972 			conn->pending_sec_level = BT_SECURITY_HIGH;
3973 		else
3974 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3975 		break;
3976 	case HCI_LK_UNAUTH_COMBINATION_P192:
3977 	case HCI_LK_UNAUTH_COMBINATION_P256:
3978 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3979 		break;
3980 	case HCI_LK_AUTH_COMBINATION_P192:
3981 		conn->pending_sec_level = BT_SECURITY_HIGH;
3982 		break;
3983 	case HCI_LK_AUTH_COMBINATION_P256:
3984 		conn->pending_sec_level = BT_SECURITY_FIPS;
3985 		break;
3986 	}
3987 }
3988 
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3989 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3990 {
3991 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3992 	struct hci_cp_link_key_reply cp;
3993 	struct hci_conn *conn;
3994 	struct link_key *key;
3995 
3996 	BT_DBG("%s", hdev->name);
3997 
3998 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3999 		return;
4000 
4001 	hci_dev_lock(hdev);
4002 
4003 	key = hci_find_link_key(hdev, &ev->bdaddr);
4004 	if (!key) {
4005 		BT_DBG("%s link key not found for %pMR", hdev->name,
4006 		       &ev->bdaddr);
4007 		goto not_found;
4008 	}
4009 
4010 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4011 	       &ev->bdaddr);
4012 
4013 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4014 	if (conn) {
4015 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4016 
4017 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4018 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4019 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4020 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
4021 			goto not_found;
4022 		}
4023 
4024 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4025 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4026 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4027 			BT_DBG("%s ignoring key unauthenticated for high security",
4028 			       hdev->name);
4029 			goto not_found;
4030 		}
4031 
4032 		conn_set_key(conn, key->type, key->pin_len);
4033 	}
4034 
4035 	bacpy(&cp.bdaddr, &ev->bdaddr);
4036 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4037 
4038 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4039 
4040 	hci_dev_unlock(hdev);
4041 
4042 	return;
4043 
4044 not_found:
4045 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4046 	hci_dev_unlock(hdev);
4047 }
4048 
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4049 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4050 {
4051 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4052 	struct hci_conn *conn;
4053 	struct link_key *key;
4054 	bool persistent;
4055 	u8 pin_len = 0;
4056 
4057 	BT_DBG("%s", hdev->name);
4058 
4059 	hci_dev_lock(hdev);
4060 
4061 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4062 	if (!conn)
4063 		goto unlock;
4064 
4065 	hci_conn_hold(conn);
4066 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4067 	hci_conn_drop(conn);
4068 
4069 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4070 	conn_set_key(conn, ev->key_type, conn->pin_length);
4071 
4072 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4073 		goto unlock;
4074 
4075 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4076 			        ev->key_type, pin_len, &persistent);
4077 	if (!key)
4078 		goto unlock;
4079 
4080 	/* Update connection information since adding the key will have
4081 	 * fixed up the type in the case of changed combination keys.
4082 	 */
4083 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4084 		conn_set_key(conn, key->type, key->pin_len);
4085 
4086 	mgmt_new_link_key(hdev, key, persistent);
4087 
4088 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4089 	 * is set. If it's not set simply remove the key from the kernel
4090 	 * list (we've still notified user space about it but with
4091 	 * store_hint being 0).
4092 	 */
4093 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4094 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4095 		list_del_rcu(&key->list);
4096 		kfree_rcu(key, rcu);
4097 		goto unlock;
4098 	}
4099 
4100 	if (persistent)
4101 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4102 	else
4103 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4104 
4105 unlock:
4106 	hci_dev_unlock(hdev);
4107 }
4108 
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4109 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4110 {
4111 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4112 	struct hci_conn *conn;
4113 
4114 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4115 
4116 	hci_dev_lock(hdev);
4117 
4118 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4119 	if (conn && !ev->status) {
4120 		struct inquiry_entry *ie;
4121 
4122 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4123 		if (ie) {
4124 			ie->data.clock_offset = ev->clock_offset;
4125 			ie->timestamp = jiffies;
4126 		}
4127 	}
4128 
4129 	hci_dev_unlock(hdev);
4130 }
4131 
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4132 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4133 {
4134 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4135 	struct hci_conn *conn;
4136 
4137 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4138 
4139 	hci_dev_lock(hdev);
4140 
4141 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4142 	if (conn && !ev->status)
4143 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4144 
4145 	hci_dev_unlock(hdev);
4146 }
4147 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4148 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4149 {
4150 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4151 	struct inquiry_entry *ie;
4152 
4153 	BT_DBG("%s", hdev->name);
4154 
4155 	hci_dev_lock(hdev);
4156 
4157 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4158 	if (ie) {
4159 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4160 		ie->timestamp = jiffies;
4161 	}
4162 
4163 	hci_dev_unlock(hdev);
4164 }
4165 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4166 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4167 					     struct sk_buff *skb)
4168 {
4169 	struct inquiry_data data;
4170 	int num_rsp = *((__u8 *) skb->data);
4171 
4172 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4173 
4174 	if (!num_rsp)
4175 		return;
4176 
4177 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4178 		return;
4179 
4180 	hci_dev_lock(hdev);
4181 
4182 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4183 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4184 		info = (void *) (skb->data + 1);
4185 
4186 		if (skb->len < num_rsp * sizeof(*info) + 1)
4187 			goto unlock;
4188 
4189 		for (; num_rsp; num_rsp--, info++) {
4190 			u32 flags;
4191 
4192 			bacpy(&data.bdaddr, &info->bdaddr);
4193 			data.pscan_rep_mode	= info->pscan_rep_mode;
4194 			data.pscan_period_mode	= info->pscan_period_mode;
4195 			data.pscan_mode		= info->pscan_mode;
4196 			memcpy(data.dev_class, info->dev_class, 3);
4197 			data.clock_offset	= info->clock_offset;
4198 			data.rssi		= info->rssi;
4199 			data.ssp_mode		= 0x00;
4200 
4201 			flags = hci_inquiry_cache_update(hdev, &data, false);
4202 
4203 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4204 					  info->dev_class, info->rssi,
4205 					  flags, NULL, 0, NULL, 0);
4206 		}
4207 	} else {
4208 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4209 
4210 		if (skb->len < num_rsp * sizeof(*info) + 1)
4211 			goto unlock;
4212 
4213 		for (; num_rsp; num_rsp--, info++) {
4214 			u32 flags;
4215 
4216 			bacpy(&data.bdaddr, &info->bdaddr);
4217 			data.pscan_rep_mode	= info->pscan_rep_mode;
4218 			data.pscan_period_mode	= info->pscan_period_mode;
4219 			data.pscan_mode		= 0x00;
4220 			memcpy(data.dev_class, info->dev_class, 3);
4221 			data.clock_offset	= info->clock_offset;
4222 			data.rssi		= info->rssi;
4223 			data.ssp_mode		= 0x00;
4224 
4225 			flags = hci_inquiry_cache_update(hdev, &data, false);
4226 
4227 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4228 					  info->dev_class, info->rssi,
4229 					  flags, NULL, 0, NULL, 0);
4230 		}
4231 	}
4232 
4233 unlock:
4234 	hci_dev_unlock(hdev);
4235 }
4236 
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4237 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4238 					struct sk_buff *skb)
4239 {
4240 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4241 	struct hci_conn *conn;
4242 
4243 	BT_DBG("%s", hdev->name);
4244 
4245 	hci_dev_lock(hdev);
4246 
4247 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4248 	if (!conn)
4249 		goto unlock;
4250 
4251 	if (ev->page < HCI_MAX_PAGES)
4252 		memcpy(conn->features[ev->page], ev->features, 8);
4253 
4254 	if (!ev->status && ev->page == 0x01) {
4255 		struct inquiry_entry *ie;
4256 
4257 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4258 		if (ie)
4259 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4260 
4261 		if (ev->features[0] & LMP_HOST_SSP) {
4262 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4263 		} else {
4264 			/* It is mandatory by the Bluetooth specification that
4265 			 * Extended Inquiry Results are only used when Secure
4266 			 * Simple Pairing is enabled, but some devices violate
4267 			 * this.
4268 			 *
4269 			 * To make these devices work, the internal SSP
4270 			 * enabled flag needs to be cleared if the remote host
4271 			 * features do not indicate SSP support */
4272 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4273 		}
4274 
4275 		if (ev->features[0] & LMP_HOST_SC)
4276 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4277 	}
4278 
4279 	if (conn->state != BT_CONFIG)
4280 		goto unlock;
4281 
4282 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4283 		struct hci_cp_remote_name_req cp;
4284 		memset(&cp, 0, sizeof(cp));
4285 		bacpy(&cp.bdaddr, &conn->dst);
4286 		cp.pscan_rep_mode = 0x02;
4287 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4288 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4289 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4290 
4291 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4292 		conn->state = BT_CONNECTED;
4293 		hci_connect_cfm(conn, ev->status);
4294 		hci_conn_drop(conn);
4295 	}
4296 
4297 unlock:
4298 	hci_dev_unlock(hdev);
4299 }
4300 
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4301 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4302 				       struct sk_buff *skb)
4303 {
4304 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4305 	struct hci_conn *conn;
4306 
4307 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4308 
4309 	hci_dev_lock(hdev);
4310 
4311 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4312 	if (!conn) {
4313 		if (ev->link_type == ESCO_LINK)
4314 			goto unlock;
4315 
4316 		/* When the link type in the event indicates SCO connection
4317 		 * and lookup of the connection object fails, then check
4318 		 * if an eSCO connection object exists.
4319 		 *
4320 		 * The core limits the synchronous connections to either
4321 		 * SCO or eSCO. The eSCO connection is preferred and tried
4322 		 * to be setup first and until successfully established,
4323 		 * the link type will be hinted as eSCO.
4324 		 */
4325 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4326 		if (!conn)
4327 			goto unlock;
4328 	}
4329 
4330 	switch (ev->status) {
4331 	case 0x00:
4332 		/* The synchronous connection complete event should only be
4333 		 * sent once per new connection. Receiving a successful
4334 		 * complete event when the connection status is already
4335 		 * BT_CONNECTED means that the device is misbehaving and sent
4336 		 * multiple complete event packets for the same new connection.
4337 		 *
4338 		 * Registering the device more than once can corrupt kernel
4339 		 * memory, hence upon detecting this invalid event, we report
4340 		 * an error and ignore the packet.
4341 		 */
4342 		if (conn->state == BT_CONNECTED) {
4343 			bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4344 			goto unlock;
4345 		}
4346 
4347 		conn->handle = __le16_to_cpu(ev->handle);
4348 		conn->state  = BT_CONNECTED;
4349 		conn->type   = ev->link_type;
4350 
4351 		hci_debugfs_create_conn(conn);
4352 		hci_conn_add_sysfs(conn);
4353 		break;
4354 
4355 	case 0x10:	/* Connection Accept Timeout */
4356 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4357 	case 0x11:	/* Unsupported Feature or Parameter Value */
4358 	case 0x1c:	/* SCO interval rejected */
4359 	case 0x1a:	/* Unsupported Remote Feature */
4360 	case 0x1e:	/* Invalid LMP Parameters */
4361 	case 0x1f:	/* Unspecified error */
4362 	case 0x20:	/* Unsupported LMP Parameter value */
4363 		if (conn->out) {
4364 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4365 					(hdev->esco_type & EDR_ESCO_MASK);
4366 			if (hci_setup_sync(conn, conn->link->handle))
4367 				goto unlock;
4368 		}
4369 		fallthrough;
4370 
4371 	default:
4372 		conn->state = BT_CLOSED;
4373 		break;
4374 	}
4375 
4376 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4377 
4378 	switch (ev->air_mode) {
4379 	case 0x02:
4380 		if (hdev->notify)
4381 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4382 		break;
4383 	case 0x03:
4384 		if (hdev->notify)
4385 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4386 		break;
4387 	}
4388 
4389 	hci_connect_cfm(conn, ev->status);
4390 	if (ev->status)
4391 		hci_conn_del(conn);
4392 
4393 unlock:
4394 	hci_dev_unlock(hdev);
4395 }
4396 
eir_get_length(u8 * eir,size_t eir_len)4397 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4398 {
4399 	size_t parsed = 0;
4400 
4401 	while (parsed < eir_len) {
4402 		u8 field_len = eir[0];
4403 
4404 		if (field_len == 0)
4405 			return parsed;
4406 
4407 		parsed += field_len + 1;
4408 		eir += field_len + 1;
4409 	}
4410 
4411 	return eir_len;
4412 }
4413 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4414 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4415 					    struct sk_buff *skb)
4416 {
4417 	struct inquiry_data data;
4418 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4419 	int num_rsp = *((__u8 *) skb->data);
4420 	size_t eir_len;
4421 
4422 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4423 
4424 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4425 		return;
4426 
4427 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4428 		return;
4429 
4430 	hci_dev_lock(hdev);
4431 
4432 	for (; num_rsp; num_rsp--, info++) {
4433 		u32 flags;
4434 		bool name_known;
4435 
4436 		bacpy(&data.bdaddr, &info->bdaddr);
4437 		data.pscan_rep_mode	= info->pscan_rep_mode;
4438 		data.pscan_period_mode	= info->pscan_period_mode;
4439 		data.pscan_mode		= 0x00;
4440 		memcpy(data.dev_class, info->dev_class, 3);
4441 		data.clock_offset	= info->clock_offset;
4442 		data.rssi		= info->rssi;
4443 		data.ssp_mode		= 0x01;
4444 
4445 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4446 			name_known = eir_get_data(info->data,
4447 						  sizeof(info->data),
4448 						  EIR_NAME_COMPLETE, NULL);
4449 		else
4450 			name_known = true;
4451 
4452 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4453 
4454 		eir_len = eir_get_length(info->data, sizeof(info->data));
4455 
4456 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4457 				  info->dev_class, info->rssi,
4458 				  flags, info->data, eir_len, NULL, 0);
4459 	}
4460 
4461 	hci_dev_unlock(hdev);
4462 }
4463 
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4464 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4465 					 struct sk_buff *skb)
4466 {
4467 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4468 	struct hci_conn *conn;
4469 
4470 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4471 	       __le16_to_cpu(ev->handle));
4472 
4473 	hci_dev_lock(hdev);
4474 
4475 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4476 	if (!conn)
4477 		goto unlock;
4478 
4479 	/* For BR/EDR the necessary steps are taken through the
4480 	 * auth_complete event.
4481 	 */
4482 	if (conn->type != LE_LINK)
4483 		goto unlock;
4484 
4485 	if (!ev->status)
4486 		conn->sec_level = conn->pending_sec_level;
4487 
4488 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4489 
4490 	if (ev->status && conn->state == BT_CONNECTED) {
4491 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4492 		hci_conn_drop(conn);
4493 		goto unlock;
4494 	}
4495 
4496 	if (conn->state == BT_CONFIG) {
4497 		if (!ev->status)
4498 			conn->state = BT_CONNECTED;
4499 
4500 		hci_connect_cfm(conn, ev->status);
4501 		hci_conn_drop(conn);
4502 	} else {
4503 		hci_auth_cfm(conn, ev->status);
4504 
4505 		hci_conn_hold(conn);
4506 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4507 		hci_conn_drop(conn);
4508 	}
4509 
4510 unlock:
4511 	hci_dev_unlock(hdev);
4512 }
4513 
hci_get_auth_req(struct hci_conn * conn)4514 static u8 hci_get_auth_req(struct hci_conn *conn)
4515 {
4516 	/* If remote requests no-bonding follow that lead */
4517 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4518 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4519 		return conn->remote_auth | (conn->auth_type & 0x01);
4520 
4521 	/* If both remote and local have enough IO capabilities, require
4522 	 * MITM protection
4523 	 */
4524 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4525 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4526 		return conn->remote_auth | 0x01;
4527 
4528 	/* No MITM protection possible so ignore remote requirement */
4529 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4530 }
4531 
bredr_oob_data_present(struct hci_conn * conn)4532 static u8 bredr_oob_data_present(struct hci_conn *conn)
4533 {
4534 	struct hci_dev *hdev = conn->hdev;
4535 	struct oob_data *data;
4536 
4537 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4538 	if (!data)
4539 		return 0x00;
4540 
4541 	if (bredr_sc_enabled(hdev)) {
4542 		/* When Secure Connections is enabled, then just
4543 		 * return the present value stored with the OOB
4544 		 * data. The stored value contains the right present
4545 		 * information. However it can only be trusted when
4546 		 * not in Secure Connection Only mode.
4547 		 */
4548 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4549 			return data->present;
4550 
4551 		/* When Secure Connections Only mode is enabled, then
4552 		 * the P-256 values are required. If they are not
4553 		 * available, then do not declare that OOB data is
4554 		 * present.
4555 		 */
4556 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4557 		    !memcmp(data->hash256, ZERO_KEY, 16))
4558 			return 0x00;
4559 
4560 		return 0x02;
4561 	}
4562 
4563 	/* When Secure Connections is not enabled or actually
4564 	 * not supported by the hardware, then check that if
4565 	 * P-192 data values are present.
4566 	 */
4567 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4568 	    !memcmp(data->hash192, ZERO_KEY, 16))
4569 		return 0x00;
4570 
4571 	return 0x01;
4572 }
4573 
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4574 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4575 {
4576 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4577 	struct hci_conn *conn;
4578 
4579 	BT_DBG("%s", hdev->name);
4580 
4581 	hci_dev_lock(hdev);
4582 
4583 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4584 	if (!conn)
4585 		goto unlock;
4586 
4587 	hci_conn_hold(conn);
4588 
4589 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4590 		goto unlock;
4591 
4592 	/* Allow pairing if we're pairable, the initiators of the
4593 	 * pairing or if the remote is not requesting bonding.
4594 	 */
4595 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4596 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4597 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4598 		struct hci_cp_io_capability_reply cp;
4599 
4600 		bacpy(&cp.bdaddr, &ev->bdaddr);
4601 		/* Change the IO capability from KeyboardDisplay
4602 		 * to DisplayYesNo as it is not supported by BT spec. */
4603 		cp.capability = (conn->io_capability == 0x04) ?
4604 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4605 
4606 		/* If we are initiators, there is no remote information yet */
4607 		if (conn->remote_auth == 0xff) {
4608 			/* Request MITM protection if our IO caps allow it
4609 			 * except for the no-bonding case.
4610 			 */
4611 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4612 			    conn->auth_type != HCI_AT_NO_BONDING)
4613 				conn->auth_type |= 0x01;
4614 		} else {
4615 			conn->auth_type = hci_get_auth_req(conn);
4616 		}
4617 
4618 		/* If we're not bondable, force one of the non-bondable
4619 		 * authentication requirement values.
4620 		 */
4621 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4622 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4623 
4624 		cp.authentication = conn->auth_type;
4625 		cp.oob_data = bredr_oob_data_present(conn);
4626 
4627 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4628 			     sizeof(cp), &cp);
4629 	} else {
4630 		struct hci_cp_io_capability_neg_reply cp;
4631 
4632 		bacpy(&cp.bdaddr, &ev->bdaddr);
4633 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4634 
4635 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4636 			     sizeof(cp), &cp);
4637 	}
4638 
4639 unlock:
4640 	hci_dev_unlock(hdev);
4641 }
4642 
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4643 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4644 {
4645 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4646 	struct hci_conn *conn;
4647 
4648 	BT_DBG("%s", hdev->name);
4649 
4650 	hci_dev_lock(hdev);
4651 
4652 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4653 	if (!conn)
4654 		goto unlock;
4655 
4656 	conn->remote_cap = ev->capability;
4657 	conn->remote_auth = ev->authentication;
4658 
4659 unlock:
4660 	hci_dev_unlock(hdev);
4661 }
4662 
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4663 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4664 					 struct sk_buff *skb)
4665 {
4666 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4667 	int loc_mitm, rem_mitm, confirm_hint = 0;
4668 	struct hci_conn *conn;
4669 
4670 	BT_DBG("%s", hdev->name);
4671 
4672 	hci_dev_lock(hdev);
4673 
4674 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4675 		goto unlock;
4676 
4677 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4678 	if (!conn)
4679 		goto unlock;
4680 
4681 	loc_mitm = (conn->auth_type & 0x01);
4682 	rem_mitm = (conn->remote_auth & 0x01);
4683 
4684 	/* If we require MITM but the remote device can't provide that
4685 	 * (it has NoInputNoOutput) then reject the confirmation
4686 	 * request. We check the security level here since it doesn't
4687 	 * necessarily match conn->auth_type.
4688 	 */
4689 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4690 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4691 		BT_DBG("Rejecting request: remote device can't provide MITM");
4692 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4693 			     sizeof(ev->bdaddr), &ev->bdaddr);
4694 		goto unlock;
4695 	}
4696 
4697 	/* If no side requires MITM protection; auto-accept */
4698 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4699 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4700 
4701 		/* If we're not the initiators request authorization to
4702 		 * proceed from user space (mgmt_user_confirm with
4703 		 * confirm_hint set to 1). The exception is if neither
4704 		 * side had MITM or if the local IO capability is
4705 		 * NoInputNoOutput, in which case we do auto-accept
4706 		 */
4707 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4708 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4709 		    (loc_mitm || rem_mitm)) {
4710 			BT_DBG("Confirming auto-accept as acceptor");
4711 			confirm_hint = 1;
4712 			goto confirm;
4713 		}
4714 
4715 		/* If there already exists link key in local host, leave the
4716 		 * decision to user space since the remote device could be
4717 		 * legitimate or malicious.
4718 		 */
4719 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4720 			bt_dev_dbg(hdev, "Local host already has link key");
4721 			confirm_hint = 1;
4722 			goto confirm;
4723 		}
4724 
4725 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4726 		       hdev->auto_accept_delay);
4727 
4728 		if (hdev->auto_accept_delay > 0) {
4729 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4730 			queue_delayed_work(conn->hdev->workqueue,
4731 					   &conn->auto_accept_work, delay);
4732 			goto unlock;
4733 		}
4734 
4735 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4736 			     sizeof(ev->bdaddr), &ev->bdaddr);
4737 		goto unlock;
4738 	}
4739 
4740 confirm:
4741 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4742 				  le32_to_cpu(ev->passkey), confirm_hint);
4743 
4744 unlock:
4745 	hci_dev_unlock(hdev);
4746 }
4747 
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4748 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4749 					 struct sk_buff *skb)
4750 {
4751 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4752 
4753 	BT_DBG("%s", hdev->name);
4754 
4755 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4756 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4757 }
4758 
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4759 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4760 					struct sk_buff *skb)
4761 {
4762 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4763 	struct hci_conn *conn;
4764 
4765 	BT_DBG("%s", hdev->name);
4766 
4767 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4768 	if (!conn)
4769 		return;
4770 
4771 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4772 	conn->passkey_entered = 0;
4773 
4774 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4775 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4776 					 conn->dst_type, conn->passkey_notify,
4777 					 conn->passkey_entered);
4778 }
4779 
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4780 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4781 {
4782 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4783 	struct hci_conn *conn;
4784 
4785 	BT_DBG("%s", hdev->name);
4786 
4787 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4788 	if (!conn)
4789 		return;
4790 
4791 	switch (ev->type) {
4792 	case HCI_KEYPRESS_STARTED:
4793 		conn->passkey_entered = 0;
4794 		return;
4795 
4796 	case HCI_KEYPRESS_ENTERED:
4797 		conn->passkey_entered++;
4798 		break;
4799 
4800 	case HCI_KEYPRESS_ERASED:
4801 		conn->passkey_entered--;
4802 		break;
4803 
4804 	case HCI_KEYPRESS_CLEARED:
4805 		conn->passkey_entered = 0;
4806 		break;
4807 
4808 	case HCI_KEYPRESS_COMPLETED:
4809 		return;
4810 	}
4811 
4812 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4813 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4814 					 conn->dst_type, conn->passkey_notify,
4815 					 conn->passkey_entered);
4816 }
4817 
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4818 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4819 					 struct sk_buff *skb)
4820 {
4821 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4822 	struct hci_conn *conn;
4823 
4824 	BT_DBG("%s", hdev->name);
4825 
4826 	hci_dev_lock(hdev);
4827 
4828 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4829 	if (!conn)
4830 		goto unlock;
4831 
4832 	/* Reset the authentication requirement to unknown */
4833 	conn->remote_auth = 0xff;
4834 
4835 	/* To avoid duplicate auth_failed events to user space we check
4836 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4837 	 * initiated the authentication. A traditional auth_complete
4838 	 * event gets always produced as initiator and is also mapped to
4839 	 * the mgmt_auth_failed event */
4840 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4841 		mgmt_auth_failed(conn, ev->status);
4842 
4843 	hci_conn_drop(conn);
4844 
4845 unlock:
4846 	hci_dev_unlock(hdev);
4847 }
4848 
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4849 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4850 					 struct sk_buff *skb)
4851 {
4852 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4853 	struct inquiry_entry *ie;
4854 	struct hci_conn *conn;
4855 
4856 	BT_DBG("%s", hdev->name);
4857 
4858 	hci_dev_lock(hdev);
4859 
4860 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4861 	if (conn)
4862 		memcpy(conn->features[1], ev->features, 8);
4863 
4864 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4865 	if (ie)
4866 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4867 
4868 	hci_dev_unlock(hdev);
4869 }
4870 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4871 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4872 					    struct sk_buff *skb)
4873 {
4874 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4875 	struct oob_data *data;
4876 
4877 	BT_DBG("%s", hdev->name);
4878 
4879 	hci_dev_lock(hdev);
4880 
4881 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4882 		goto unlock;
4883 
4884 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4885 	if (!data) {
4886 		struct hci_cp_remote_oob_data_neg_reply cp;
4887 
4888 		bacpy(&cp.bdaddr, &ev->bdaddr);
4889 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4890 			     sizeof(cp), &cp);
4891 		goto unlock;
4892 	}
4893 
4894 	if (bredr_sc_enabled(hdev)) {
4895 		struct hci_cp_remote_oob_ext_data_reply cp;
4896 
4897 		bacpy(&cp.bdaddr, &ev->bdaddr);
4898 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4899 			memset(cp.hash192, 0, sizeof(cp.hash192));
4900 			memset(cp.rand192, 0, sizeof(cp.rand192));
4901 		} else {
4902 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4903 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4904 		}
4905 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4906 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4907 
4908 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4909 			     sizeof(cp), &cp);
4910 	} else {
4911 		struct hci_cp_remote_oob_data_reply cp;
4912 
4913 		bacpy(&cp.bdaddr, &ev->bdaddr);
4914 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4915 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4916 
4917 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4918 			     sizeof(cp), &cp);
4919 	}
4920 
4921 unlock:
4922 	hci_dev_unlock(hdev);
4923 }
4924 
4925 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)4926 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4927 {
4928 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4929 	struct hci_conn *hcon;
4930 
4931 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4932 
4933 	skb_pull(skb, sizeof(*ev));
4934 
4935 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4936 	if (!hcon)
4937 		return;
4938 
4939 	amp_read_loc_assoc_final_data(hdev, hcon);
4940 }
4941 
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4942 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4943 				      struct sk_buff *skb)
4944 {
4945 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4946 	struct hci_conn *hcon, *bredr_hcon;
4947 
4948 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4949 	       ev->status);
4950 
4951 	hci_dev_lock(hdev);
4952 
4953 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4954 	if (!hcon) {
4955 		hci_dev_unlock(hdev);
4956 		return;
4957 	}
4958 
4959 	if (!hcon->amp_mgr) {
4960 		hci_dev_unlock(hdev);
4961 		return;
4962 	}
4963 
4964 	if (ev->status) {
4965 		hci_conn_del(hcon);
4966 		hci_dev_unlock(hdev);
4967 		return;
4968 	}
4969 
4970 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4971 
4972 	hcon->state = BT_CONNECTED;
4973 	bacpy(&hcon->dst, &bredr_hcon->dst);
4974 
4975 	hci_conn_hold(hcon);
4976 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4977 	hci_conn_drop(hcon);
4978 
4979 	hci_debugfs_create_conn(hcon);
4980 	hci_conn_add_sysfs(hcon);
4981 
4982 	amp_physical_cfm(bredr_hcon, hcon);
4983 
4984 	hci_dev_unlock(hdev);
4985 }
4986 
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4987 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4988 {
4989 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4990 	struct hci_conn *hcon;
4991 	struct hci_chan *hchan;
4992 	struct amp_mgr *mgr;
4993 
4994 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4995 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4996 	       ev->status);
4997 
4998 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4999 	if (!hcon)
5000 		return;
5001 
5002 	/* Create AMP hchan */
5003 	hchan = hci_chan_create(hcon);
5004 	if (!hchan)
5005 		return;
5006 
5007 	hchan->handle = le16_to_cpu(ev->handle);
5008 	hchan->amp = true;
5009 
5010 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5011 
5012 	mgr = hcon->amp_mgr;
5013 	if (mgr && mgr->bredr_chan) {
5014 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5015 
5016 		l2cap_chan_lock(bredr_chan);
5017 
5018 		bredr_chan->conn->mtu = hdev->block_mtu;
5019 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5020 		hci_conn_hold(hcon);
5021 
5022 		l2cap_chan_unlock(bredr_chan);
5023 	}
5024 }
5025 
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5026 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5027 					     struct sk_buff *skb)
5028 {
5029 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5030 	struct hci_chan *hchan;
5031 
5032 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5033 	       le16_to_cpu(ev->handle), ev->status);
5034 
5035 	if (ev->status)
5036 		return;
5037 
5038 	hci_dev_lock(hdev);
5039 
5040 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5041 	if (!hchan || !hchan->amp)
5042 		goto unlock;
5043 
5044 	amp_destroy_logical_link(hchan, ev->reason);
5045 
5046 unlock:
5047 	hci_dev_unlock(hdev);
5048 }
5049 
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5050 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5051 					     struct sk_buff *skb)
5052 {
5053 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5054 	struct hci_conn *hcon;
5055 
5056 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5057 
5058 	if (ev->status)
5059 		return;
5060 
5061 	hci_dev_lock(hdev);
5062 
5063 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5064 	if (hcon) {
5065 		hcon->state = BT_CLOSED;
5066 		hci_conn_del(hcon);
5067 	}
5068 
5069 	hci_dev_unlock(hdev);
5070 }
5071 #endif
5072 
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5073 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5074 				u8 bdaddr_type, bdaddr_t *local_rpa)
5075 {
5076 	if (conn->out) {
5077 		conn->dst_type = bdaddr_type;
5078 		conn->resp_addr_type = bdaddr_type;
5079 		bacpy(&conn->resp_addr, bdaddr);
5080 
5081 		/* Check if the controller has set a Local RPA then it must be
5082 		 * used instead or hdev->rpa.
5083 		 */
5084 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5085 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5086 			bacpy(&conn->init_addr, local_rpa);
5087 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5088 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5089 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5090 		} else {
5091 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5092 						  &conn->init_addr_type);
5093 		}
5094 	} else {
5095 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5096 		/* Check if the controller has set a Local RPA then it must be
5097 		 * used instead or hdev->rpa.
5098 		 */
5099 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5100 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5101 			bacpy(&conn->resp_addr, local_rpa);
5102 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5103 			/* In case of ext adv, resp_addr will be updated in
5104 			 * Adv Terminated event.
5105 			 */
5106 			if (!ext_adv_capable(conn->hdev))
5107 				bacpy(&conn->resp_addr,
5108 				      &conn->hdev->random_addr);
5109 		} else {
5110 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5111 		}
5112 
5113 		conn->init_addr_type = bdaddr_type;
5114 		bacpy(&conn->init_addr, bdaddr);
5115 
5116 		/* For incoming connections, set the default minimum
5117 		 * and maximum connection interval. They will be used
5118 		 * to check if the parameters are in range and if not
5119 		 * trigger the connection update procedure.
5120 		 */
5121 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5122 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5123 	}
5124 }
5125 
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5126 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5127 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5128 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5129 				 u16 interval, u16 latency,
5130 				 u16 supervision_timeout)
5131 {
5132 	struct hci_conn_params *params;
5133 	struct hci_conn *conn;
5134 	struct smp_irk *irk;
5135 	u8 addr_type;
5136 
5137 	hci_dev_lock(hdev);
5138 
5139 	/* All controllers implicitly stop advertising in the event of a
5140 	 * connection, so ensure that the state bit is cleared.
5141 	 */
5142 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5143 
5144 	conn = hci_lookup_le_connect(hdev);
5145 	if (!conn) {
5146 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5147 		if (!conn) {
5148 			bt_dev_err(hdev, "no memory for new connection");
5149 			goto unlock;
5150 		}
5151 
5152 		conn->dst_type = bdaddr_type;
5153 
5154 		/* If we didn't have a hci_conn object previously
5155 		 * but we're in master role this must be something
5156 		 * initiated using a white list. Since white list based
5157 		 * connections are not "first class citizens" we don't
5158 		 * have full tracking of them. Therefore, we go ahead
5159 		 * with a "best effort" approach of determining the
5160 		 * initiator address based on the HCI_PRIVACY flag.
5161 		 */
5162 		if (conn->out) {
5163 			conn->resp_addr_type = bdaddr_type;
5164 			bacpy(&conn->resp_addr, bdaddr);
5165 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5166 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5167 				bacpy(&conn->init_addr, &hdev->rpa);
5168 			} else {
5169 				hci_copy_identity_address(hdev,
5170 							  &conn->init_addr,
5171 							  &conn->init_addr_type);
5172 			}
5173 		}
5174 	} else {
5175 		cancel_delayed_work(&conn->le_conn_timeout);
5176 	}
5177 
5178 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5179 
5180 	/* Lookup the identity address from the stored connection
5181 	 * address and address type.
5182 	 *
5183 	 * When establishing connections to an identity address, the
5184 	 * connection procedure will store the resolvable random
5185 	 * address first. Now if it can be converted back into the
5186 	 * identity address, start using the identity address from
5187 	 * now on.
5188 	 */
5189 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5190 	if (irk) {
5191 		bacpy(&conn->dst, &irk->bdaddr);
5192 		conn->dst_type = irk->addr_type;
5193 	}
5194 
5195 	if (status) {
5196 		hci_le_conn_failed(conn, status);
5197 		goto unlock;
5198 	}
5199 
5200 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5201 		addr_type = BDADDR_LE_PUBLIC;
5202 	else
5203 		addr_type = BDADDR_LE_RANDOM;
5204 
5205 	/* Drop the connection if the device is blocked */
5206 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5207 		hci_conn_drop(conn);
5208 		goto unlock;
5209 	}
5210 
5211 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5212 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
5213 
5214 	conn->sec_level = BT_SECURITY_LOW;
5215 	conn->handle = handle;
5216 	conn->state = BT_CONFIG;
5217 
5218 	conn->le_conn_interval = interval;
5219 	conn->le_conn_latency = latency;
5220 	conn->le_supv_timeout = supervision_timeout;
5221 
5222 	hci_debugfs_create_conn(conn);
5223 	hci_conn_add_sysfs(conn);
5224 
5225 	/* The remote features procedure is defined for master
5226 	 * role only. So only in case of an initiated connection
5227 	 * request the remote features.
5228 	 *
5229 	 * If the local controller supports slave-initiated features
5230 	 * exchange, then requesting the remote features in slave
5231 	 * role is possible. Otherwise just transition into the
5232 	 * connected state without requesting the remote features.
5233 	 */
5234 	if (conn->out ||
5235 	    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5236 		struct hci_cp_le_read_remote_features cp;
5237 
5238 		cp.handle = __cpu_to_le16(conn->handle);
5239 
5240 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5241 			     sizeof(cp), &cp);
5242 
5243 		hci_conn_hold(conn);
5244 	} else {
5245 		conn->state = BT_CONNECTED;
5246 		hci_connect_cfm(conn, status);
5247 	}
5248 
5249 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5250 					   conn->dst_type);
5251 	if (params) {
5252 		list_del_init(&params->action);
5253 		if (params->conn) {
5254 			hci_conn_drop(params->conn);
5255 			hci_conn_put(params->conn);
5256 			params->conn = NULL;
5257 		}
5258 	}
5259 
5260 unlock:
5261 	hci_update_background_scan(hdev);
5262 	hci_dev_unlock(hdev);
5263 }
5264 
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5265 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5266 {
5267 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5268 
5269 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5270 
5271 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5272 			     NULL, ev->role, le16_to_cpu(ev->handle),
5273 			     le16_to_cpu(ev->interval),
5274 			     le16_to_cpu(ev->latency),
5275 			     le16_to_cpu(ev->supervision_timeout));
5276 }
5277 
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5278 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5279 					 struct sk_buff *skb)
5280 {
5281 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5282 
5283 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5284 
5285 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5286 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5287 			     le16_to_cpu(ev->interval),
5288 			     le16_to_cpu(ev->latency),
5289 			     le16_to_cpu(ev->supervision_timeout));
5290 
5291 	if (use_ll_privacy(hdev) &&
5292 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5293 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5294 		hci_req_disable_address_resolution(hdev);
5295 }
5296 
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5297 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5298 {
5299 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5300 	struct hci_conn *conn;
5301 
5302 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5303 
5304 	if (ev->status) {
5305 		struct adv_info *adv;
5306 
5307 		adv = hci_find_adv_instance(hdev, ev->handle);
5308 		if (!adv)
5309 			return;
5310 
5311 		/* Remove advertising as it has been terminated */
5312 		hci_remove_adv_instance(hdev, ev->handle);
5313 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5314 
5315 		return;
5316 	}
5317 
5318 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5319 	if (conn) {
5320 		struct adv_info *adv_instance;
5321 
5322 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5323 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5324 			return;
5325 
5326 		if (!hdev->cur_adv_instance) {
5327 			bacpy(&conn->resp_addr, &hdev->random_addr);
5328 			return;
5329 		}
5330 
5331 		adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5332 		if (adv_instance)
5333 			bacpy(&conn->resp_addr, &adv_instance->random_addr);
5334 	}
5335 }
5336 
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5337 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5338 					    struct sk_buff *skb)
5339 {
5340 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5341 	struct hci_conn *conn;
5342 
5343 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5344 
5345 	if (ev->status)
5346 		return;
5347 
5348 	hci_dev_lock(hdev);
5349 
5350 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5351 	if (conn) {
5352 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5353 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5354 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5355 	}
5356 
5357 	hci_dev_unlock(hdev);
5358 }
5359 
5360 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5361 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5362 					      bdaddr_t *addr,
5363 					      u8 addr_type, u8 adv_type,
5364 					      bdaddr_t *direct_rpa)
5365 {
5366 	struct hci_conn *conn;
5367 	struct hci_conn_params *params;
5368 
5369 	/* If the event is not connectable don't proceed further */
5370 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5371 		return NULL;
5372 
5373 	/* Ignore if the device is blocked */
5374 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5375 		return NULL;
5376 
5377 	/* Most controller will fail if we try to create new connections
5378 	 * while we have an existing one in slave role.
5379 	 */
5380 	if (hdev->conn_hash.le_num_slave > 0 &&
5381 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5382 	     !(hdev->le_states[3] & 0x10)))
5383 		return NULL;
5384 
5385 	/* If we're not connectable only connect devices that we have in
5386 	 * our pend_le_conns list.
5387 	 */
5388 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5389 					   addr_type);
5390 	if (!params)
5391 		return NULL;
5392 
5393 	if (!params->explicit_connect) {
5394 		switch (params->auto_connect) {
5395 		case HCI_AUTO_CONN_DIRECT:
5396 			/* Only devices advertising with ADV_DIRECT_IND are
5397 			 * triggering a connection attempt. This is allowing
5398 			 * incoming connections from slave devices.
5399 			 */
5400 			if (adv_type != LE_ADV_DIRECT_IND)
5401 				return NULL;
5402 			break;
5403 		case HCI_AUTO_CONN_ALWAYS:
5404 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5405 			 * are triggering a connection attempt. This means
5406 			 * that incoming connections from slave device are
5407 			 * accepted and also outgoing connections to slave
5408 			 * devices are established when found.
5409 			 */
5410 			break;
5411 		default:
5412 			return NULL;
5413 		}
5414 	}
5415 
5416 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5417 			      hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5418 			      direct_rpa);
5419 	if (!IS_ERR(conn)) {
5420 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5421 		 * by higher layer that tried to connect, if no then
5422 		 * store the pointer since we don't really have any
5423 		 * other owner of the object besides the params that
5424 		 * triggered it. This way we can abort the connection if
5425 		 * the parameters get removed and keep the reference
5426 		 * count consistent once the connection is established.
5427 		 */
5428 
5429 		if (!params->explicit_connect)
5430 			params->conn = hci_conn_get(conn);
5431 
5432 		return conn;
5433 	}
5434 
5435 	switch (PTR_ERR(conn)) {
5436 	case -EBUSY:
5437 		/* If hci_connect() returns -EBUSY it means there is already
5438 		 * an LE connection attempt going on. Since controllers don't
5439 		 * support more than one connection attempt at the time, we
5440 		 * don't consider this an error case.
5441 		 */
5442 		break;
5443 	default:
5444 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5445 		return NULL;
5446 	}
5447 
5448 	return NULL;
5449 }
5450 
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5451 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5452 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5453 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5454 			       bool ext_adv)
5455 {
5456 	struct discovery_state *d = &hdev->discovery;
5457 	struct smp_irk *irk;
5458 	struct hci_conn *conn;
5459 	bool match;
5460 	u32 flags;
5461 	u8 *ptr;
5462 
5463 	switch (type) {
5464 	case LE_ADV_IND:
5465 	case LE_ADV_DIRECT_IND:
5466 	case LE_ADV_SCAN_IND:
5467 	case LE_ADV_NONCONN_IND:
5468 	case LE_ADV_SCAN_RSP:
5469 		break;
5470 	default:
5471 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5472 				       "type: 0x%02x", type);
5473 		return;
5474 	}
5475 
5476 	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5477 		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5478 		return;
5479 	}
5480 
5481 	/* Find the end of the data in case the report contains padded zero
5482 	 * bytes at the end causing an invalid length value.
5483 	 *
5484 	 * When data is NULL, len is 0 so there is no need for extra ptr
5485 	 * check as 'ptr < data + 0' is already false in such case.
5486 	 */
5487 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5488 		if (ptr + 1 + *ptr > data + len)
5489 			break;
5490 	}
5491 
5492 	/* Adjust for actual length. This handles the case when remote
5493 	 * device is advertising with incorrect data length.
5494 	 */
5495 	len = ptr - data;
5496 
5497 	/* If the direct address is present, then this report is from
5498 	 * a LE Direct Advertising Report event. In that case it is
5499 	 * important to see if the address is matching the local
5500 	 * controller address.
5501 	 */
5502 	if (direct_addr) {
5503 		/* Only resolvable random addresses are valid for these
5504 		 * kind of reports and others can be ignored.
5505 		 */
5506 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5507 			return;
5508 
5509 		/* If the controller is not using resolvable random
5510 		 * addresses, then this report can be ignored.
5511 		 */
5512 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5513 			return;
5514 
5515 		/* If the local IRK of the controller does not match
5516 		 * with the resolvable random address provided, then
5517 		 * this report can be ignored.
5518 		 */
5519 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5520 			return;
5521 	}
5522 
5523 	/* Check if we need to convert to identity address */
5524 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5525 	if (irk) {
5526 		bdaddr = &irk->bdaddr;
5527 		bdaddr_type = irk->addr_type;
5528 	}
5529 
5530 	/* Check if we have been requested to connect to this device.
5531 	 *
5532 	 * direct_addr is set only for directed advertising reports (it is NULL
5533 	 * for advertising reports) and is already verified to be RPA above.
5534 	 */
5535 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5536 								direct_addr);
5537 	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5538 		/* Store report for later inclusion by
5539 		 * mgmt_device_connected
5540 		 */
5541 		memcpy(conn->le_adv_data, data, len);
5542 		conn->le_adv_data_len = len;
5543 	}
5544 
5545 	/* Passive scanning shouldn't trigger any device found events,
5546 	 * except for devices marked as CONN_REPORT for which we do send
5547 	 * device found events, or advertisement monitoring requested.
5548 	 */
5549 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5550 		if (type == LE_ADV_DIRECT_IND)
5551 			return;
5552 
5553 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5554 					       bdaddr, bdaddr_type) &&
5555 		    idr_is_empty(&hdev->adv_monitors_idr))
5556 			return;
5557 
5558 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5559 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5560 		else
5561 			flags = 0;
5562 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5563 				  rssi, flags, data, len, NULL, 0);
5564 		return;
5565 	}
5566 
5567 	/* When receiving non-connectable or scannable undirected
5568 	 * advertising reports, this means that the remote device is
5569 	 * not connectable and then clearly indicate this in the
5570 	 * device found event.
5571 	 *
5572 	 * When receiving a scan response, then there is no way to
5573 	 * know if the remote device is connectable or not. However
5574 	 * since scan responses are merged with a previously seen
5575 	 * advertising report, the flags field from that report
5576 	 * will be used.
5577 	 *
5578 	 * In the really unlikely case that a controller get confused
5579 	 * and just sends a scan response event, then it is marked as
5580 	 * not connectable as well.
5581 	 */
5582 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5583 	    type == LE_ADV_SCAN_RSP)
5584 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5585 	else
5586 		flags = 0;
5587 
5588 	/* If there's nothing pending either store the data from this
5589 	 * event or send an immediate device found event if the data
5590 	 * should not be stored for later.
5591 	 */
5592 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
5593 		/* If the report will trigger a SCAN_REQ store it for
5594 		 * later merging.
5595 		 */
5596 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5597 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5598 						 rssi, flags, data, len);
5599 			return;
5600 		}
5601 
5602 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5603 				  rssi, flags, data, len, NULL, 0);
5604 		return;
5605 	}
5606 
5607 	/* Check if the pending report is for the same device as the new one */
5608 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5609 		 bdaddr_type == d->last_adv_addr_type);
5610 
5611 	/* If the pending data doesn't match this report or this isn't a
5612 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5613 	 * sending of the pending data.
5614 	 */
5615 	if (type != LE_ADV_SCAN_RSP || !match) {
5616 		/* Send out whatever is in the cache, but skip duplicates */
5617 		if (!match)
5618 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5619 					  d->last_adv_addr_type, NULL,
5620 					  d->last_adv_rssi, d->last_adv_flags,
5621 					  d->last_adv_data,
5622 					  d->last_adv_data_len, NULL, 0);
5623 
5624 		/* If the new report will trigger a SCAN_REQ store it for
5625 		 * later merging.
5626 		 */
5627 		if (!ext_adv && (type == LE_ADV_IND ||
5628 				 type == LE_ADV_SCAN_IND)) {
5629 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5630 						 rssi, flags, data, len);
5631 			return;
5632 		}
5633 
5634 		/* The advertising reports cannot be merged, so clear
5635 		 * the pending report and send out a device found event.
5636 		 */
5637 		clear_pending_adv_report(hdev);
5638 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5639 				  rssi, flags, data, len, NULL, 0);
5640 		return;
5641 	}
5642 
5643 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5644 	 * the new event is a SCAN_RSP. We can therefore proceed with
5645 	 * sending a merged device found event.
5646 	 */
5647 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5648 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5649 			  d->last_adv_data, d->last_adv_data_len, data, len);
5650 	clear_pending_adv_report(hdev);
5651 }
5652 
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5653 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5654 {
5655 	u8 num_reports = skb->data[0];
5656 	void *ptr = &skb->data[1];
5657 
5658 	hci_dev_lock(hdev);
5659 
5660 	while (num_reports--) {
5661 		struct hci_ev_le_advertising_info *ev = ptr;
5662 		s8 rssi;
5663 
5664 		if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5665 			bt_dev_err(hdev, "Malicious advertising data.");
5666 			break;
5667 		}
5668 
5669 		if (ev->length <= HCI_MAX_AD_LENGTH &&
5670 		    ev->data + ev->length <= skb_tail_pointer(skb)) {
5671 			rssi = ev->data[ev->length];
5672 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5673 					   ev->bdaddr_type, NULL, 0, rssi,
5674 					   ev->data, ev->length, false);
5675 		} else {
5676 			bt_dev_err(hdev, "Dropping invalid advertising data");
5677 		}
5678 
5679 		ptr += sizeof(*ev) + ev->length + 1;
5680 	}
5681 
5682 	hci_dev_unlock(hdev);
5683 }
5684 
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)5685 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5686 {
5687 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5688 		switch (evt_type) {
5689 		case LE_LEGACY_ADV_IND:
5690 			return LE_ADV_IND;
5691 		case LE_LEGACY_ADV_DIRECT_IND:
5692 			return LE_ADV_DIRECT_IND;
5693 		case LE_LEGACY_ADV_SCAN_IND:
5694 			return LE_ADV_SCAN_IND;
5695 		case LE_LEGACY_NONCONN_IND:
5696 			return LE_ADV_NONCONN_IND;
5697 		case LE_LEGACY_SCAN_RSP_ADV:
5698 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5699 			return LE_ADV_SCAN_RSP;
5700 		}
5701 
5702 		goto invalid;
5703 	}
5704 
5705 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5706 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5707 			return LE_ADV_DIRECT_IND;
5708 
5709 		return LE_ADV_IND;
5710 	}
5711 
5712 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5713 		return LE_ADV_SCAN_RSP;
5714 
5715 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5716 		return LE_ADV_SCAN_IND;
5717 
5718 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5719 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5720 		return LE_ADV_NONCONN_IND;
5721 
5722 invalid:
5723 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5724 			       evt_type);
5725 
5726 	return LE_ADV_INVALID;
5727 }
5728 
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5729 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5730 {
5731 	u8 num_reports = skb->data[0];
5732 	void *ptr = &skb->data[1];
5733 
5734 	hci_dev_lock(hdev);
5735 
5736 	while (num_reports--) {
5737 		struct hci_ev_le_ext_adv_report *ev = ptr;
5738 		u8 legacy_evt_type;
5739 		u16 evt_type;
5740 
5741 		evt_type = __le16_to_cpu(ev->evt_type);
5742 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5743 		if (legacy_evt_type != LE_ADV_INVALID) {
5744 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5745 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5746 					   ev->data, ev->length,
5747 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5748 		}
5749 
5750 		ptr += sizeof(*ev) + ev->length;
5751 	}
5752 
5753 	hci_dev_unlock(hdev);
5754 }
5755 
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5756 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5757 					    struct sk_buff *skb)
5758 {
5759 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5760 	struct hci_conn *conn;
5761 
5762 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5763 
5764 	hci_dev_lock(hdev);
5765 
5766 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5767 	if (conn) {
5768 		if (!ev->status)
5769 			memcpy(conn->features[0], ev->features, 8);
5770 
5771 		if (conn->state == BT_CONFIG) {
5772 			__u8 status;
5773 
5774 			/* If the local controller supports slave-initiated
5775 			 * features exchange, but the remote controller does
5776 			 * not, then it is possible that the error code 0x1a
5777 			 * for unsupported remote feature gets returned.
5778 			 *
5779 			 * In this specific case, allow the connection to
5780 			 * transition into connected state and mark it as
5781 			 * successful.
5782 			 */
5783 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5784 			    !conn->out && ev->status == 0x1a)
5785 				status = 0x00;
5786 			else
5787 				status = ev->status;
5788 
5789 			conn->state = BT_CONNECTED;
5790 			hci_connect_cfm(conn, status);
5791 			hci_conn_drop(conn);
5792 		}
5793 	}
5794 
5795 	hci_dev_unlock(hdev);
5796 }
5797 
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5798 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5799 {
5800 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5801 	struct hci_cp_le_ltk_reply cp;
5802 	struct hci_cp_le_ltk_neg_reply neg;
5803 	struct hci_conn *conn;
5804 	struct smp_ltk *ltk;
5805 
5806 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5807 
5808 	hci_dev_lock(hdev);
5809 
5810 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5811 	if (conn == NULL)
5812 		goto not_found;
5813 
5814 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5815 	if (!ltk)
5816 		goto not_found;
5817 
5818 	if (smp_ltk_is_sc(ltk)) {
5819 		/* With SC both EDiv and Rand are set to zero */
5820 		if (ev->ediv || ev->rand)
5821 			goto not_found;
5822 	} else {
5823 		/* For non-SC keys check that EDiv and Rand match */
5824 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5825 			goto not_found;
5826 	}
5827 
5828 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5829 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5830 	cp.handle = cpu_to_le16(conn->handle);
5831 
5832 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5833 
5834 	conn->enc_key_size = ltk->enc_size;
5835 
5836 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5837 
5838 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5839 	 * temporary key used to encrypt a connection following
5840 	 * pairing. It is used during the Encrypted Session Setup to
5841 	 * distribute the keys. Later, security can be re-established
5842 	 * using a distributed LTK.
5843 	 */
5844 	if (ltk->type == SMP_STK) {
5845 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5846 		list_del_rcu(&ltk->list);
5847 		kfree_rcu(ltk, rcu);
5848 	} else {
5849 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5850 	}
5851 
5852 	hci_dev_unlock(hdev);
5853 
5854 	return;
5855 
5856 not_found:
5857 	neg.handle = ev->handle;
5858 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5859 	hci_dev_unlock(hdev);
5860 }
5861 
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)5862 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5863 				      u8 reason)
5864 {
5865 	struct hci_cp_le_conn_param_req_neg_reply cp;
5866 
5867 	cp.handle = cpu_to_le16(handle);
5868 	cp.reason = reason;
5869 
5870 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5871 		     &cp);
5872 }
5873 
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5874 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5875 					     struct sk_buff *skb)
5876 {
5877 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5878 	struct hci_cp_le_conn_param_req_reply cp;
5879 	struct hci_conn *hcon;
5880 	u16 handle, min, max, latency, timeout;
5881 
5882 	handle = le16_to_cpu(ev->handle);
5883 	min = le16_to_cpu(ev->interval_min);
5884 	max = le16_to_cpu(ev->interval_max);
5885 	latency = le16_to_cpu(ev->latency);
5886 	timeout = le16_to_cpu(ev->timeout);
5887 
5888 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5889 	if (!hcon || hcon->state != BT_CONNECTED)
5890 		return send_conn_param_neg_reply(hdev, handle,
5891 						 HCI_ERROR_UNKNOWN_CONN_ID);
5892 
5893 	if (hci_check_conn_params(min, max, latency, timeout))
5894 		return send_conn_param_neg_reply(hdev, handle,
5895 						 HCI_ERROR_INVALID_LL_PARAMS);
5896 
5897 	if (hcon->role == HCI_ROLE_MASTER) {
5898 		struct hci_conn_params *params;
5899 		u8 store_hint;
5900 
5901 		hci_dev_lock(hdev);
5902 
5903 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5904 						hcon->dst_type);
5905 		if (params) {
5906 			params->conn_min_interval = min;
5907 			params->conn_max_interval = max;
5908 			params->conn_latency = latency;
5909 			params->supervision_timeout = timeout;
5910 			store_hint = 0x01;
5911 		} else{
5912 			store_hint = 0x00;
5913 		}
5914 
5915 		hci_dev_unlock(hdev);
5916 
5917 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5918 				    store_hint, min, max, latency, timeout);
5919 	}
5920 
5921 	cp.handle = ev->handle;
5922 	cp.interval_min = ev->interval_min;
5923 	cp.interval_max = ev->interval_max;
5924 	cp.latency = ev->latency;
5925 	cp.timeout = ev->timeout;
5926 	cp.min_ce_len = 0;
5927 	cp.max_ce_len = 0;
5928 
5929 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5930 }
5931 
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5932 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5933 					 struct sk_buff *skb)
5934 {
5935 	u8 num_reports = skb->data[0];
5936 	struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5937 
5938 	if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5939 		return;
5940 
5941 	hci_dev_lock(hdev);
5942 
5943 	for (; num_reports; num_reports--, ev++)
5944 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5945 				   ev->bdaddr_type, &ev->direct_addr,
5946 				   ev->direct_addr_type, ev->rssi, NULL, 0,
5947 				   false);
5948 
5949 	hci_dev_unlock(hdev);
5950 }
5951 
hci_le_phy_update_evt(struct hci_dev * hdev,struct sk_buff * skb)5952 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5953 {
5954 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5955 	struct hci_conn *conn;
5956 
5957 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5958 
5959 	if (ev->status)
5960 		return;
5961 
5962 	hci_dev_lock(hdev);
5963 
5964 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5965 	if (!conn)
5966 		goto unlock;
5967 
5968 	conn->le_tx_phy = ev->tx_phy;
5969 	conn->le_rx_phy = ev->rx_phy;
5970 
5971 unlock:
5972 	hci_dev_unlock(hdev);
5973 }
5974 
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)5975 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5976 {
5977 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5978 
5979 	skb_pull(skb, sizeof(*le_ev));
5980 
5981 	switch (le_ev->subevent) {
5982 	case HCI_EV_LE_CONN_COMPLETE:
5983 		hci_le_conn_complete_evt(hdev, skb);
5984 		break;
5985 
5986 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5987 		hci_le_conn_update_complete_evt(hdev, skb);
5988 		break;
5989 
5990 	case HCI_EV_LE_ADVERTISING_REPORT:
5991 		hci_le_adv_report_evt(hdev, skb);
5992 		break;
5993 
5994 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5995 		hci_le_remote_feat_complete_evt(hdev, skb);
5996 		break;
5997 
5998 	case HCI_EV_LE_LTK_REQ:
5999 		hci_le_ltk_request_evt(hdev, skb);
6000 		break;
6001 
6002 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6003 		hci_le_remote_conn_param_req_evt(hdev, skb);
6004 		break;
6005 
6006 	case HCI_EV_LE_DIRECT_ADV_REPORT:
6007 		hci_le_direct_adv_report_evt(hdev, skb);
6008 		break;
6009 
6010 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6011 		hci_le_phy_update_evt(hdev, skb);
6012 		break;
6013 
6014 	case HCI_EV_LE_EXT_ADV_REPORT:
6015 		hci_le_ext_adv_report_evt(hdev, skb);
6016 		break;
6017 
6018 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6019 		hci_le_enh_conn_complete_evt(hdev, skb);
6020 		break;
6021 
6022 	case HCI_EV_LE_EXT_ADV_SET_TERM:
6023 		hci_le_ext_adv_term_evt(hdev, skb);
6024 		break;
6025 
6026 	default:
6027 		break;
6028 	}
6029 }
6030 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)6031 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6032 				 u8 event, struct sk_buff *skb)
6033 {
6034 	struct hci_ev_cmd_complete *ev;
6035 	struct hci_event_hdr *hdr;
6036 
6037 	if (!skb)
6038 		return false;
6039 
6040 	if (skb->len < sizeof(*hdr)) {
6041 		bt_dev_err(hdev, "too short HCI event");
6042 		return false;
6043 	}
6044 
6045 	hdr = (void *) skb->data;
6046 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6047 
6048 	if (event) {
6049 		if (hdr->evt != event)
6050 			return false;
6051 		return true;
6052 	}
6053 
6054 	/* Check if request ended in Command Status - no way to retreive
6055 	 * any extra parameters in this case.
6056 	 */
6057 	if (hdr->evt == HCI_EV_CMD_STATUS)
6058 		return false;
6059 
6060 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6061 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6062 			   hdr->evt);
6063 		return false;
6064 	}
6065 
6066 	if (skb->len < sizeof(*ev)) {
6067 		bt_dev_err(hdev, "too short cmd_complete event");
6068 		return false;
6069 	}
6070 
6071 	ev = (void *) skb->data;
6072 	skb_pull(skb, sizeof(*ev));
6073 
6074 	if (opcode != __le16_to_cpu(ev->opcode)) {
6075 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6076 		       __le16_to_cpu(ev->opcode));
6077 		return false;
6078 	}
6079 
6080 	return true;
6081 }
6082 
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)6083 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6084 				  struct sk_buff *skb)
6085 {
6086 	struct hci_ev_le_advertising_info *adv;
6087 	struct hci_ev_le_direct_adv_info *direct_adv;
6088 	struct hci_ev_le_ext_adv_report *ext_adv;
6089 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6090 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6091 
6092 	hci_dev_lock(hdev);
6093 
6094 	/* If we are currently suspended and this is the first BT event seen,
6095 	 * save the wake reason associated with the event.
6096 	 */
6097 	if (!hdev->suspended || hdev->wake_reason)
6098 		goto unlock;
6099 
6100 	/* Default to remote wake. Values for wake_reason are documented in the
6101 	 * Bluez mgmt api docs.
6102 	 */
6103 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6104 
6105 	/* Once configured for remote wakeup, we should only wake up for
6106 	 * reconnections. It's useful to see which device is waking us up so
6107 	 * keep track of the bdaddr of the connection event that woke us up.
6108 	 */
6109 	if (event == HCI_EV_CONN_REQUEST) {
6110 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6111 		hdev->wake_addr_type = BDADDR_BREDR;
6112 	} else if (event == HCI_EV_CONN_COMPLETE) {
6113 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6114 		hdev->wake_addr_type = BDADDR_BREDR;
6115 	} else if (event == HCI_EV_LE_META) {
6116 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
6117 		u8 subevent = le_ev->subevent;
6118 		u8 *ptr = &skb->data[sizeof(*le_ev)];
6119 		u8 num_reports = *ptr;
6120 
6121 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6122 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6123 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6124 		    num_reports) {
6125 			adv = (void *)(ptr + 1);
6126 			direct_adv = (void *)(ptr + 1);
6127 			ext_adv = (void *)(ptr + 1);
6128 
6129 			switch (subevent) {
6130 			case HCI_EV_LE_ADVERTISING_REPORT:
6131 				bacpy(&hdev->wake_addr, &adv->bdaddr);
6132 				hdev->wake_addr_type = adv->bdaddr_type;
6133 				break;
6134 			case HCI_EV_LE_DIRECT_ADV_REPORT:
6135 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6136 				hdev->wake_addr_type = direct_adv->bdaddr_type;
6137 				break;
6138 			case HCI_EV_LE_EXT_ADV_REPORT:
6139 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6140 				hdev->wake_addr_type = ext_adv->bdaddr_type;
6141 				break;
6142 			}
6143 		}
6144 	} else {
6145 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6146 	}
6147 
6148 unlock:
6149 	hci_dev_unlock(hdev);
6150 }
6151 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)6152 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6153 {
6154 	struct hci_event_hdr *hdr = (void *) skb->data;
6155 	hci_req_complete_t req_complete = NULL;
6156 	hci_req_complete_skb_t req_complete_skb = NULL;
6157 	struct sk_buff *orig_skb = NULL;
6158 	u8 status = 0, event = hdr->evt, req_evt = 0;
6159 	u16 opcode = HCI_OP_NOP;
6160 
6161 	if (!event) {
6162 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6163 		goto done;
6164 	}
6165 
6166 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6167 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6168 		opcode = __le16_to_cpu(cmd_hdr->opcode);
6169 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6170 				     &req_complete_skb);
6171 		req_evt = event;
6172 	}
6173 
6174 	/* If it looks like we might end up having to call
6175 	 * req_complete_skb, store a pristine copy of the skb since the
6176 	 * various handlers may modify the original one through
6177 	 * skb_pull() calls, etc.
6178 	 */
6179 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6180 	    event == HCI_EV_CMD_COMPLETE)
6181 		orig_skb = skb_clone(skb, GFP_KERNEL);
6182 
6183 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6184 
6185 	/* Store wake reason if we're suspended */
6186 	hci_store_wake_reason(hdev, event, skb);
6187 
6188 	switch (event) {
6189 	case HCI_EV_INQUIRY_COMPLETE:
6190 		hci_inquiry_complete_evt(hdev, skb);
6191 		break;
6192 
6193 	case HCI_EV_INQUIRY_RESULT:
6194 		hci_inquiry_result_evt(hdev, skb);
6195 		break;
6196 
6197 	case HCI_EV_CONN_COMPLETE:
6198 		hci_conn_complete_evt(hdev, skb);
6199 		break;
6200 
6201 	case HCI_EV_CONN_REQUEST:
6202 		hci_conn_request_evt(hdev, skb);
6203 		break;
6204 
6205 	case HCI_EV_DISCONN_COMPLETE:
6206 		hci_disconn_complete_evt(hdev, skb);
6207 		break;
6208 
6209 	case HCI_EV_AUTH_COMPLETE:
6210 		hci_auth_complete_evt(hdev, skb);
6211 		break;
6212 
6213 	case HCI_EV_REMOTE_NAME:
6214 		hci_remote_name_evt(hdev, skb);
6215 		break;
6216 
6217 	case HCI_EV_ENCRYPT_CHANGE:
6218 		hci_encrypt_change_evt(hdev, skb);
6219 		break;
6220 
6221 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6222 		hci_change_link_key_complete_evt(hdev, skb);
6223 		break;
6224 
6225 	case HCI_EV_REMOTE_FEATURES:
6226 		hci_remote_features_evt(hdev, skb);
6227 		break;
6228 
6229 	case HCI_EV_CMD_COMPLETE:
6230 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6231 				     &req_complete, &req_complete_skb);
6232 		break;
6233 
6234 	case HCI_EV_CMD_STATUS:
6235 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6236 				   &req_complete_skb);
6237 		break;
6238 
6239 	case HCI_EV_HARDWARE_ERROR:
6240 		hci_hardware_error_evt(hdev, skb);
6241 		break;
6242 
6243 	case HCI_EV_ROLE_CHANGE:
6244 		hci_role_change_evt(hdev, skb);
6245 		break;
6246 
6247 	case HCI_EV_NUM_COMP_PKTS:
6248 		hci_num_comp_pkts_evt(hdev, skb);
6249 		break;
6250 
6251 	case HCI_EV_MODE_CHANGE:
6252 		hci_mode_change_evt(hdev, skb);
6253 		break;
6254 
6255 	case HCI_EV_PIN_CODE_REQ:
6256 		hci_pin_code_request_evt(hdev, skb);
6257 		break;
6258 
6259 	case HCI_EV_LINK_KEY_REQ:
6260 		hci_link_key_request_evt(hdev, skb);
6261 		break;
6262 
6263 	case HCI_EV_LINK_KEY_NOTIFY:
6264 		hci_link_key_notify_evt(hdev, skb);
6265 		break;
6266 
6267 	case HCI_EV_CLOCK_OFFSET:
6268 		hci_clock_offset_evt(hdev, skb);
6269 		break;
6270 
6271 	case HCI_EV_PKT_TYPE_CHANGE:
6272 		hci_pkt_type_change_evt(hdev, skb);
6273 		break;
6274 
6275 	case HCI_EV_PSCAN_REP_MODE:
6276 		hci_pscan_rep_mode_evt(hdev, skb);
6277 		break;
6278 
6279 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6280 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6281 		break;
6282 
6283 	case HCI_EV_REMOTE_EXT_FEATURES:
6284 		hci_remote_ext_features_evt(hdev, skb);
6285 		break;
6286 
6287 	case HCI_EV_SYNC_CONN_COMPLETE:
6288 		hci_sync_conn_complete_evt(hdev, skb);
6289 		break;
6290 
6291 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6292 		hci_extended_inquiry_result_evt(hdev, skb);
6293 		break;
6294 
6295 	case HCI_EV_KEY_REFRESH_COMPLETE:
6296 		hci_key_refresh_complete_evt(hdev, skb);
6297 		break;
6298 
6299 	case HCI_EV_IO_CAPA_REQUEST:
6300 		hci_io_capa_request_evt(hdev, skb);
6301 		break;
6302 
6303 	case HCI_EV_IO_CAPA_REPLY:
6304 		hci_io_capa_reply_evt(hdev, skb);
6305 		break;
6306 
6307 	case HCI_EV_USER_CONFIRM_REQUEST:
6308 		hci_user_confirm_request_evt(hdev, skb);
6309 		break;
6310 
6311 	case HCI_EV_USER_PASSKEY_REQUEST:
6312 		hci_user_passkey_request_evt(hdev, skb);
6313 		break;
6314 
6315 	case HCI_EV_USER_PASSKEY_NOTIFY:
6316 		hci_user_passkey_notify_evt(hdev, skb);
6317 		break;
6318 
6319 	case HCI_EV_KEYPRESS_NOTIFY:
6320 		hci_keypress_notify_evt(hdev, skb);
6321 		break;
6322 
6323 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6324 		hci_simple_pair_complete_evt(hdev, skb);
6325 		break;
6326 
6327 	case HCI_EV_REMOTE_HOST_FEATURES:
6328 		hci_remote_host_features_evt(hdev, skb);
6329 		break;
6330 
6331 	case HCI_EV_LE_META:
6332 		hci_le_meta_evt(hdev, skb);
6333 		break;
6334 
6335 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6336 		hci_remote_oob_data_request_evt(hdev, skb);
6337 		break;
6338 
6339 #if IS_ENABLED(CONFIG_BT_HS)
6340 	case HCI_EV_CHANNEL_SELECTED:
6341 		hci_chan_selected_evt(hdev, skb);
6342 		break;
6343 
6344 	case HCI_EV_PHY_LINK_COMPLETE:
6345 		hci_phy_link_complete_evt(hdev, skb);
6346 		break;
6347 
6348 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6349 		hci_loglink_complete_evt(hdev, skb);
6350 		break;
6351 
6352 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6353 		hci_disconn_loglink_complete_evt(hdev, skb);
6354 		break;
6355 
6356 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6357 		hci_disconn_phylink_complete_evt(hdev, skb);
6358 		break;
6359 #endif
6360 
6361 	case HCI_EV_NUM_COMP_BLOCKS:
6362 		hci_num_comp_blocks_evt(hdev, skb);
6363 		break;
6364 
6365 	case HCI_EV_VENDOR:
6366 		msft_vendor_evt(hdev, skb);
6367 		break;
6368 
6369 	default:
6370 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6371 		break;
6372 	}
6373 
6374 	if (req_complete) {
6375 		req_complete(hdev, status, opcode);
6376 	} else if (req_complete_skb) {
6377 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6378 			kfree_skb(orig_skb);
6379 			orig_skb = NULL;
6380 		}
6381 		req_complete_skb(hdev, status, opcode, orig_skb);
6382 	}
6383 
6384 done:
6385 	kfree_skb(orig_skb);
6386 	kfree_skb(skb);
6387 	hdev->stat.evt_rx++;
6388 }
6389