• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "a2mp.h"
38 #include "amp.h"
39 #include "smp.h"
40 #include "msft.h"
41 
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 
45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 
47 /* Handle HCI Event packets */
48 
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 				  u8 *new_status)
51 {
52 	__u8 status = *((__u8 *) skb->data);
53 
54 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
55 
56 	/* It is possible that we receive Inquiry Complete event right
57 	 * before we receive Inquiry Cancel Command Complete event, in
58 	 * which case the latter event should have status of Command
59 	 * Disallowed (0x0c). This should not be treated as error, since
60 	 * we actually achieve what Inquiry Cancel wants to achieve,
61 	 * which is to end the last Inquiry session.
62 	 */
63 	if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
64 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
65 		status = 0x00;
66 	}
67 
68 	*new_status = status;
69 
70 	if (status)
71 		return;
72 
73 	clear_bit(HCI_INQUIRY, &hdev->flags);
74 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
75 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 
77 	hci_dev_lock(hdev);
78 	/* Set discovery state to stopped if we're not doing LE active
79 	 * scanning.
80 	 */
81 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
82 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
83 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 	hci_dev_unlock(hdev);
85 
86 	hci_conn_check_pending(hdev);
87 }
88 
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)89 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	__u8 status = *((__u8 *) skb->data);
92 
93 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
94 
95 	if (status)
96 		return;
97 
98 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 }
100 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)101 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 	__u8 status = *((__u8 *) skb->data);
104 
105 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
106 
107 	if (status)
108 		return;
109 
110 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
111 
112 	hci_conn_check_pending(hdev);
113 }
114 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)115 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 					  struct sk_buff *skb)
117 {
118 	BT_DBG("%s", hdev->name);
119 }
120 
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)121 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
122 {
123 	struct hci_rp_role_discovery *rp = (void *) skb->data;
124 	struct hci_conn *conn;
125 
126 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 
128 	if (rp->status)
129 		return;
130 
131 	hci_dev_lock(hdev);
132 
133 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 	if (conn)
135 		conn->role = rp->role;
136 
137 	hci_dev_unlock(hdev);
138 }
139 
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)140 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 {
142 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
143 	struct hci_conn *conn;
144 
145 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146 
147 	if (rp->status)
148 		return;
149 
150 	hci_dev_lock(hdev);
151 
152 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 	if (conn)
154 		conn->link_policy = __le16_to_cpu(rp->policy);
155 
156 	hci_dev_unlock(hdev);
157 }
158 
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)159 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
162 	struct hci_conn *conn;
163 	void *sent;
164 
165 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166 
167 	if (rp->status)
168 		return;
169 
170 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 	if (!sent)
172 		return;
173 
174 	hci_dev_lock(hdev);
175 
176 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
177 	if (conn)
178 		conn->link_policy = get_unaligned_le16(sent + 2);
179 
180 	hci_dev_unlock(hdev);
181 }
182 
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)183 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 					struct sk_buff *skb)
185 {
186 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
187 
188 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
189 
190 	if (rp->status)
191 		return;
192 
193 	hdev->link_policy = __le16_to_cpu(rp->policy);
194 }
195 
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)196 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 					 struct sk_buff *skb)
198 {
199 	__u8 status = *((__u8 *) skb->data);
200 	void *sent;
201 
202 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
203 
204 	if (status)
205 		return;
206 
207 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
208 	if (!sent)
209 		return;
210 
211 	hdev->link_policy = get_unaligned_le16(sent);
212 }
213 
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)214 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
215 {
216 	__u8 status = *((__u8 *) skb->data);
217 
218 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
219 
220 	clear_bit(HCI_RESET, &hdev->flags);
221 
222 	if (status)
223 		return;
224 
225 	/* Reset all non-persistent flags */
226 	hci_dev_clear_volatile_flags(hdev);
227 
228 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
229 
230 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
231 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
232 
233 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
234 	hdev->adv_data_len = 0;
235 
236 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
237 	hdev->scan_rsp_data_len = 0;
238 
239 	hdev->le_scan_type = LE_SCAN_PASSIVE;
240 
241 	hdev->ssp_debug_mode = 0;
242 
243 	hci_bdaddr_list_clear(&hdev->le_accept_list);
244 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 }
246 
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)247 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 					struct sk_buff *skb)
249 {
250 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
251 	struct hci_cp_read_stored_link_key *sent;
252 
253 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
254 
255 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
256 	if (!sent)
257 		return;
258 
259 	if (!rp->status && sent->read_all == 0x01) {
260 		hdev->stored_max_keys = rp->max_keys;
261 		hdev->stored_num_keys = rp->num_keys;
262 	}
263 }
264 
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)265 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 					  struct sk_buff *skb)
267 {
268 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
269 
270 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
271 
272 	if (rp->status)
273 		return;
274 
275 	if (rp->num_keys <= hdev->stored_num_keys)
276 		hdev->stored_num_keys -= rp->num_keys;
277 	else
278 		hdev->stored_num_keys = 0;
279 }
280 
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)281 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 	__u8 status = *((__u8 *) skb->data);
284 	void *sent;
285 
286 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
287 
288 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
289 	if (!sent)
290 		return;
291 
292 	hci_dev_lock(hdev);
293 
294 	if (hci_dev_test_flag(hdev, HCI_MGMT))
295 		mgmt_set_local_name_complete(hdev, sent, status);
296 	else if (!status)
297 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
298 
299 	hci_dev_unlock(hdev);
300 }
301 
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)302 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
303 {
304 	struct hci_rp_read_local_name *rp = (void *) skb->data;
305 
306 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
307 
308 	if (rp->status)
309 		return;
310 
311 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
312 	    hci_dev_test_flag(hdev, HCI_CONFIG))
313 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 }
315 
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)316 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
317 {
318 	__u8 status = *((__u8 *) skb->data);
319 	void *sent;
320 
321 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
322 
323 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
324 	if (!sent)
325 		return;
326 
327 	hci_dev_lock(hdev);
328 
329 	if (!status) {
330 		__u8 param = *((__u8 *) sent);
331 
332 		if (param == AUTH_ENABLED)
333 			set_bit(HCI_AUTH, &hdev->flags);
334 		else
335 			clear_bit(HCI_AUTH, &hdev->flags);
336 	}
337 
338 	if (hci_dev_test_flag(hdev, HCI_MGMT))
339 		mgmt_auth_enable_complete(hdev, status);
340 
341 	hci_dev_unlock(hdev);
342 }
343 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)344 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
345 {
346 	__u8 status = *((__u8 *) skb->data);
347 	__u8 param;
348 	void *sent;
349 
350 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
351 
352 	if (status)
353 		return;
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
356 	if (!sent)
357 		return;
358 
359 	param = *((__u8 *) sent);
360 
361 	if (param)
362 		set_bit(HCI_ENCRYPT, &hdev->flags);
363 	else
364 		clear_bit(HCI_ENCRYPT, &hdev->flags);
365 }
366 
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)367 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
368 {
369 	__u8 status = *((__u8 *) skb->data);
370 	__u8 param;
371 	void *sent;
372 
373 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
374 
375 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
376 	if (!sent)
377 		return;
378 
379 	param = *((__u8 *) sent);
380 
381 	hci_dev_lock(hdev);
382 
383 	if (status) {
384 		hdev->discov_timeout = 0;
385 		goto done;
386 	}
387 
388 	if (param & SCAN_INQUIRY)
389 		set_bit(HCI_ISCAN, &hdev->flags);
390 	else
391 		clear_bit(HCI_ISCAN, &hdev->flags);
392 
393 	if (param & SCAN_PAGE)
394 		set_bit(HCI_PSCAN, &hdev->flags);
395 	else
396 		clear_bit(HCI_PSCAN, &hdev->flags);
397 
398 done:
399 	hci_dev_unlock(hdev);
400 }
401 
hci_cc_set_event_filter(struct hci_dev * hdev,struct sk_buff * skb)402 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
403 {
404 	__u8 status = *((__u8 *)skb->data);
405 	struct hci_cp_set_event_filter *cp;
406 	void *sent;
407 
408 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
409 
410 	if (status)
411 		return;
412 
413 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
414 	if (!sent)
415 		return;
416 
417 	cp = (struct hci_cp_set_event_filter *)sent;
418 
419 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
420 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
421 	else
422 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 }
424 
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)425 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
426 {
427 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
428 
429 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
430 
431 	if (rp->status)
432 		return;
433 
434 	memcpy(hdev->dev_class, rp->dev_class, 3);
435 
436 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
437 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 }
439 
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)440 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 	__u8 status = *((__u8 *) skb->data);
443 	void *sent;
444 
445 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
446 
447 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
448 	if (!sent)
449 		return;
450 
451 	hci_dev_lock(hdev);
452 
453 	if (status == 0)
454 		memcpy(hdev->dev_class, sent, 3);
455 
456 	if (hci_dev_test_flag(hdev, HCI_MGMT))
457 		mgmt_set_class_of_dev_complete(hdev, sent, status);
458 
459 	hci_dev_unlock(hdev);
460 }
461 
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)462 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
463 {
464 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 	__u16 setting;
466 
467 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
468 
469 	if (rp->status)
470 		return;
471 
472 	setting = __le16_to_cpu(rp->voice_setting);
473 
474 	if (hdev->voice_setting == setting)
475 		return;
476 
477 	hdev->voice_setting = setting;
478 
479 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480 
481 	if (hdev->notify)
482 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 }
484 
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)485 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 				       struct sk_buff *skb)
487 {
488 	__u8 status = *((__u8 *) skb->data);
489 	__u16 setting;
490 	void *sent;
491 
492 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 
494 	if (status)
495 		return;
496 
497 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
498 	if (!sent)
499 		return;
500 
501 	setting = get_unaligned_le16(sent);
502 
503 	if (hdev->voice_setting == setting)
504 		return;
505 
506 	hdev->voice_setting = setting;
507 
508 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509 
510 	if (hdev->notify)
511 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 }
513 
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)514 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 					  struct sk_buff *skb)
516 {
517 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
518 
519 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
520 
521 	if (rp->status)
522 		return;
523 
524 	hdev->num_iac = rp->num_iac;
525 
526 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 }
528 
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)529 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
530 {
531 	__u8 status = *((__u8 *) skb->data);
532 	struct hci_cp_write_ssp_mode *sent;
533 
534 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
535 
536 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
537 	if (!sent)
538 		return;
539 
540 	hci_dev_lock(hdev);
541 
542 	if (!status) {
543 		if (sent->mode)
544 			hdev->features[1][0] |= LMP_HOST_SSP;
545 		else
546 			hdev->features[1][0] &= ~LMP_HOST_SSP;
547 	}
548 
549 	if (hci_dev_test_flag(hdev, HCI_MGMT))
550 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 	else if (!status) {
552 		if (sent->mode)
553 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
554 		else
555 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 	}
557 
558 	hci_dev_unlock(hdev);
559 }
560 
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)561 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
562 {
563 	u8 status = *((u8 *) skb->data);
564 	struct hci_cp_write_sc_support *sent;
565 
566 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
567 
568 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
569 	if (!sent)
570 		return;
571 
572 	hci_dev_lock(hdev);
573 
574 	if (!status) {
575 		if (sent->support)
576 			hdev->features[1][0] |= LMP_HOST_SC;
577 		else
578 			hdev->features[1][0] &= ~LMP_HOST_SC;
579 	}
580 
581 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
582 		if (sent->support)
583 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
584 		else
585 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 	}
587 
588 	hci_dev_unlock(hdev);
589 }
590 
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 {
593 	struct hci_rp_read_local_version *rp = (void *) skb->data;
594 
595 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 
597 	if (rp->status)
598 		return;
599 
600 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
601 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
602 		hdev->hci_ver = rp->hci_ver;
603 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
604 		hdev->lmp_ver = rp->lmp_ver;
605 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
606 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
607 	}
608 }
609 
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)610 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 				       struct sk_buff *skb)
612 {
613 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
614 
615 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
616 
617 	if (rp->status)
618 		return;
619 
620 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
621 	    hci_dev_test_flag(hdev, HCI_CONFIG))
622 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 }
624 
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)625 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 					     struct sk_buff *skb)
627 {
628 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
629 	struct hci_conn *conn;
630 
631 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632 
633 	if (rp->status)
634 		return;
635 
636 	hci_dev_lock(hdev);
637 
638 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
639 	if (conn)
640 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
641 
642 	hci_dev_unlock(hdev);
643 }
644 
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)645 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 					      struct sk_buff *skb)
647 {
648 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
649 	struct hci_conn *conn;
650 	void *sent;
651 
652 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 
654 	if (rp->status)
655 		return;
656 
657 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
658 	if (!sent)
659 		return;
660 
661 	hci_dev_lock(hdev);
662 
663 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
664 	if (conn)
665 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
666 
667 	hci_dev_unlock(hdev);
668 }
669 
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)670 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 				       struct sk_buff *skb)
672 {
673 	struct hci_rp_read_local_features *rp = (void *) skb->data;
674 
675 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676 
677 	if (rp->status)
678 		return;
679 
680 	memcpy(hdev->features, rp->features, 8);
681 
682 	/* Adjust default settings according to features
683 	 * supported by device. */
684 
685 	if (hdev->features[0][0] & LMP_3SLOT)
686 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
687 
688 	if (hdev->features[0][0] & LMP_5SLOT)
689 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
690 
691 	if (hdev->features[0][1] & LMP_HV2) {
692 		hdev->pkt_type  |= (HCI_HV2);
693 		hdev->esco_type |= (ESCO_HV2);
694 	}
695 
696 	if (hdev->features[0][1] & LMP_HV3) {
697 		hdev->pkt_type  |= (HCI_HV3);
698 		hdev->esco_type |= (ESCO_HV3);
699 	}
700 
701 	if (lmp_esco_capable(hdev))
702 		hdev->esco_type |= (ESCO_EV3);
703 
704 	if (hdev->features[0][4] & LMP_EV4)
705 		hdev->esco_type |= (ESCO_EV4);
706 
707 	if (hdev->features[0][4] & LMP_EV5)
708 		hdev->esco_type |= (ESCO_EV5);
709 
710 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
711 		hdev->esco_type |= (ESCO_2EV3);
712 
713 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
714 		hdev->esco_type |= (ESCO_3EV3);
715 
716 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
717 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 }
719 
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)720 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 					   struct sk_buff *skb)
722 {
723 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
724 
725 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
726 
727 	if (rp->status)
728 		return;
729 
730 	if (hdev->max_page < rp->max_page)
731 		hdev->max_page = rp->max_page;
732 
733 	if (rp->page < HCI_MAX_PAGES)
734 		memcpy(hdev->features[rp->page], rp->features, 8);
735 }
736 
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)737 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 					  struct sk_buff *skb)
739 {
740 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
741 
742 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 
744 	if (rp->status)
745 		return;
746 
747 	hdev->flow_ctl_mode = rp->mode;
748 }
749 
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)750 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
751 {
752 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
760 	hdev->sco_mtu  = rp->sco_mtu;
761 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
762 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
763 
764 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
765 		hdev->sco_mtu  = 64;
766 		hdev->sco_pkts = 8;
767 	}
768 
769 	hdev->acl_cnt = hdev->acl_pkts;
770 	hdev->sco_cnt = hdev->sco_pkts;
771 
772 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
773 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 }
775 
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)776 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
777 {
778 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
779 
780 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
781 
782 	if (rp->status)
783 		return;
784 
785 	if (test_bit(HCI_INIT, &hdev->flags))
786 		bacpy(&hdev->bdaddr, &rp->bdaddr);
787 
788 	if (hci_dev_test_flag(hdev, HCI_SETUP))
789 		bacpy(&hdev->setup_addr, &rp->bdaddr);
790 }
791 
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,struct sk_buff * skb)792 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 					   struct sk_buff *skb)
794 {
795 	struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
796 
797 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 
799 	if (rp->status)
800 		return;
801 
802 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
803 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
804 		hdev->pairing_opts = rp->pairing_opts;
805 		hdev->max_enc_key_size = rp->max_key_size;
806 	}
807 }
808 
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)809 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 					   struct sk_buff *skb)
811 {
812 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
813 
814 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 
816 	if (rp->status)
817 		return;
818 
819 	if (test_bit(HCI_INIT, &hdev->flags)) {
820 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
821 		hdev->page_scan_window = __le16_to_cpu(rp->window);
822 	}
823 }
824 
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)825 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 					    struct sk_buff *skb)
827 {
828 	u8 status = *((u8 *) skb->data);
829 	struct hci_cp_write_page_scan_activity *sent;
830 
831 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
832 
833 	if (status)
834 		return;
835 
836 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
837 	if (!sent)
838 		return;
839 
840 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
841 	hdev->page_scan_window = __le16_to_cpu(sent->window);
842 }
843 
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)844 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 					   struct sk_buff *skb)
846 {
847 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
848 
849 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850 
851 	if (rp->status)
852 		return;
853 
854 	if (test_bit(HCI_INIT, &hdev->flags))
855 		hdev->page_scan_type = rp->type;
856 }
857 
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)858 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 					struct sk_buff *skb)
860 {
861 	u8 status = *((u8 *) skb->data);
862 	u8 *type;
863 
864 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
865 
866 	if (status)
867 		return;
868 
869 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
870 	if (type)
871 		hdev->page_scan_type = *type;
872 }
873 
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)874 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 					struct sk_buff *skb)
876 {
877 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
878 
879 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
880 
881 	if (rp->status)
882 		return;
883 
884 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
885 	hdev->block_len = __le16_to_cpu(rp->block_len);
886 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
887 
888 	hdev->block_cnt = hdev->num_blocks;
889 
890 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
891 	       hdev->block_cnt, hdev->block_len);
892 }
893 
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)894 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
895 {
896 	struct hci_rp_read_clock *rp = (void *) skb->data;
897 	struct hci_cp_read_clock *cp;
898 	struct hci_conn *conn;
899 
900 	BT_DBG("%s", hdev->name);
901 
902 	if (skb->len < sizeof(*rp))
903 		return;
904 
905 	if (rp->status)
906 		return;
907 
908 	hci_dev_lock(hdev);
909 
910 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
911 	if (!cp)
912 		goto unlock;
913 
914 	if (cp->which == 0x00) {
915 		hdev->clock = le32_to_cpu(rp->clock);
916 		goto unlock;
917 	}
918 
919 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
920 	if (conn) {
921 		conn->clock = le32_to_cpu(rp->clock);
922 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
923 	}
924 
925 unlock:
926 	hci_dev_unlock(hdev);
927 }
928 
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)929 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 				       struct sk_buff *skb)
931 {
932 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
933 
934 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
935 
936 	if (rp->status)
937 		return;
938 
939 	hdev->amp_status = rp->amp_status;
940 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
941 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
942 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
943 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
944 	hdev->amp_type = rp->amp_type;
945 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
946 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
947 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
948 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 }
950 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)951 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 					 struct sk_buff *skb)
953 {
954 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
955 
956 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 
958 	if (rp->status)
959 		return;
960 
961 	hdev->inq_tx_power = rp->tx_power;
962 }
963 
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)964 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 					       struct sk_buff *skb)
966 {
967 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
968 
969 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970 
971 	if (rp->status)
972 		return;
973 
974 	hdev->err_data_reporting = rp->err_data_reporting;
975 }
976 
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)977 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 						struct sk_buff *skb)
979 {
980 	__u8 status = *((__u8 *)skb->data);
981 	struct hci_cp_write_def_err_data_reporting *cp;
982 
983 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
984 
985 	if (status)
986 		return;
987 
988 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
989 	if (!cp)
990 		return;
991 
992 	hdev->err_data_reporting = cp->err_data_reporting;
993 }
994 
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)995 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
996 {
997 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
998 	struct hci_cp_pin_code_reply *cp;
999 	struct hci_conn *conn;
1000 
1001 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002 
1003 	hci_dev_lock(hdev);
1004 
1005 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1006 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1007 
1008 	if (rp->status)
1009 		goto unlock;
1010 
1011 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1012 	if (!cp)
1013 		goto unlock;
1014 
1015 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1016 	if (conn)
1017 		conn->pin_length = cp->pin_len;
1018 
1019 unlock:
1020 	hci_dev_unlock(hdev);
1021 }
1022 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1023 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 {
1025 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1026 
1027 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1028 
1029 	hci_dev_lock(hdev);
1030 
1031 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1032 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 						 rp->status);
1034 
1035 	hci_dev_unlock(hdev);
1036 }
1037 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)1038 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1039 				       struct sk_buff *skb)
1040 {
1041 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1042 
1043 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044 
1045 	if (rp->status)
1046 		return;
1047 
1048 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1049 	hdev->le_pkts = rp->le_max_pkt;
1050 
1051 	hdev->le_cnt = hdev->le_pkts;
1052 
1053 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 }
1055 
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)1056 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1057 					  struct sk_buff *skb)
1058 {
1059 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1060 
1061 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1062 
1063 	if (rp->status)
1064 		return;
1065 
1066 	memcpy(hdev->le_features, rp->features, 8);
1067 }
1068 
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1069 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1070 					struct sk_buff *skb)
1071 {
1072 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1073 
1074 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1075 
1076 	if (rp->status)
1077 		return;
1078 
1079 	hdev->adv_tx_power = rp->tx_power;
1080 }
1081 
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1082 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1083 {
1084 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1085 
1086 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1087 
1088 	hci_dev_lock(hdev);
1089 
1090 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1091 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 						 rp->status);
1093 
1094 	hci_dev_unlock(hdev);
1095 }
1096 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1097 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1098 					  struct sk_buff *skb)
1099 {
1100 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1101 
1102 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103 
1104 	hci_dev_lock(hdev);
1105 
1106 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1107 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1108 						     ACL_LINK, 0, rp->status);
1109 
1110 	hci_dev_unlock(hdev);
1111 }
1112 
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1113 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1114 {
1115 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1116 
1117 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1118 
1119 	hci_dev_lock(hdev);
1120 
1121 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1122 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 						 0, rp->status);
1124 
1125 	hci_dev_unlock(hdev);
1126 }
1127 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1128 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1129 					  struct sk_buff *skb)
1130 {
1131 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1132 
1133 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134 
1135 	hci_dev_lock(hdev);
1136 
1137 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1138 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1139 						     ACL_LINK, 0, rp->status);
1140 
1141 	hci_dev_unlock(hdev);
1142 }
1143 
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1144 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1145 				       struct sk_buff *skb)
1146 {
1147 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1148 
1149 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 }
1151 
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1152 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1153 					   struct sk_buff *skb)
1154 {
1155 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1156 
1157 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 }
1159 
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1160 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1161 {
1162 	__u8 status = *((__u8 *) skb->data);
1163 	bdaddr_t *sent;
1164 
1165 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 
1167 	if (status)
1168 		return;
1169 
1170 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1171 	if (!sent)
1172 		return;
1173 
1174 	hci_dev_lock(hdev);
1175 
1176 	bacpy(&hdev->random_addr, sent);
1177 
1178 	if (!bacmp(&hdev->rpa, sent)) {
1179 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1180 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1181 				   secs_to_jiffies(hdev->rpa_timeout));
1182 	}
1183 
1184 	hci_dev_unlock(hdev);
1185 }
1186 
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1187 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1188 {
1189 	__u8 status = *((__u8 *) skb->data);
1190 	struct hci_cp_le_set_default_phy *cp;
1191 
1192 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193 
1194 	if (status)
1195 		return;
1196 
1197 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1198 	if (!cp)
1199 		return;
1200 
1201 	hci_dev_lock(hdev);
1202 
1203 	hdev->le_tx_def_phys = cp->tx_phys;
1204 	hdev->le_rx_def_phys = cp->rx_phys;
1205 
1206 	hci_dev_unlock(hdev);
1207 }
1208 
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1209 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1210                                               struct sk_buff *skb)
1211 {
1212 	__u8 status = *((__u8 *) skb->data);
1213 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1214 	struct adv_info *adv;
1215 
1216 	if (status)
1217 		return;
1218 
1219 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1220 	/* Update only in case the adv instance since handle 0x00 shall be using
1221 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1222 	 * non-extended adverting.
1223 	 */
1224 	if (!cp || !cp->handle)
1225 		return;
1226 
1227 	hci_dev_lock(hdev);
1228 
1229 	adv = hci_find_adv_instance(hdev, cp->handle);
1230 	if (adv) {
1231 		bacpy(&adv->random_addr, &cp->bdaddr);
1232 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1233 			adv->rpa_expired = false;
1234 			queue_delayed_work(hdev->workqueue,
1235 					   &adv->rpa_expired_cb,
1236 					   secs_to_jiffies(hdev->rpa_timeout));
1237 		}
1238 	}
1239 
1240 	hci_dev_unlock(hdev);
1241 }
1242 
hci_cc_le_read_transmit_power(struct hci_dev * hdev,struct sk_buff * skb)1243 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1244 					  struct sk_buff *skb)
1245 {
1246 	struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1247 
1248 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1249 
1250 	if (rp->status)
1251 		return;
1252 
1253 	hdev->min_le_tx_power = rp->min_le_tx_power;
1254 	hdev->max_le_tx_power = rp->max_le_tx_power;
1255 }
1256 
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1257 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1258 {
1259 	__u8 *sent, status = *((__u8 *) skb->data);
1260 
1261 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1262 
1263 	if (status)
1264 		return;
1265 
1266 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1267 	if (!sent)
1268 		return;
1269 
1270 	hci_dev_lock(hdev);
1271 
1272 	/* If we're doing connection initiation as peripheral. Set a
1273 	 * timeout in case something goes wrong.
1274 	 */
1275 	if (*sent) {
1276 		struct hci_conn *conn;
1277 
1278 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1279 
1280 		conn = hci_lookup_le_connect(hdev);
1281 		if (conn)
1282 			queue_delayed_work(hdev->workqueue,
1283 					   &conn->le_conn_timeout,
1284 					   conn->conn_timeout);
1285 	} else {
1286 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 	}
1288 
1289 	hci_dev_unlock(hdev);
1290 }
1291 
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1292 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1293 					 struct sk_buff *skb)
1294 {
1295 	struct hci_cp_le_set_ext_adv_enable *cp;
1296 	struct hci_cp_ext_adv_set *set;
1297 	__u8 status = *((__u8 *) skb->data);
1298 	struct adv_info *adv = NULL, *n;
1299 
1300 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1301 
1302 	if (status)
1303 		return;
1304 
1305 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1306 	if (!cp)
1307 		return;
1308 
1309 	set = (void *)cp->data;
1310 
1311 	hci_dev_lock(hdev);
1312 
1313 	if (cp->num_of_sets)
1314 		adv = hci_find_adv_instance(hdev, set->handle);
1315 
1316 	if (cp->enable) {
1317 		struct hci_conn *conn;
1318 
1319 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1320 
1321 		if (adv)
1322 			adv->enabled = true;
1323 
1324 		conn = hci_lookup_le_connect(hdev);
1325 		if (conn)
1326 			queue_delayed_work(hdev->workqueue,
1327 					   &conn->le_conn_timeout,
1328 					   conn->conn_timeout);
1329 	} else {
1330 		if (cp->num_of_sets) {
1331 			if (adv)
1332 				adv->enabled = false;
1333 
1334 			/* If just one instance was disabled check if there are
1335 			 * any other instance enabled before clearing HCI_LE_ADV
1336 			 */
1337 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1338 						 list) {
1339 				if (adv->enabled)
1340 					goto unlock;
1341 			}
1342 		} else {
1343 			/* All instances shall be considered disabled */
1344 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1345 						 list)
1346 				adv->enabled = false;
1347 		}
1348 
1349 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1350 	}
1351 
1352 unlock:
1353 	hci_dev_unlock(hdev);
1354 }
1355 
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1356 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1357 {
1358 	struct hci_cp_le_set_scan_param *cp;
1359 	__u8 status = *((__u8 *) skb->data);
1360 
1361 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1362 
1363 	if (status)
1364 		return;
1365 
1366 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1367 	if (!cp)
1368 		return;
1369 
1370 	hci_dev_lock(hdev);
1371 
1372 	hdev->le_scan_type = cp->type;
1373 
1374 	hci_dev_unlock(hdev);
1375 }
1376 
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1377 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1378 					 struct sk_buff *skb)
1379 {
1380 	struct hci_cp_le_set_ext_scan_params *cp;
1381 	__u8 status = *((__u8 *) skb->data);
1382 	struct hci_cp_le_scan_phy_params *phy_param;
1383 
1384 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385 
1386 	if (status)
1387 		return;
1388 
1389 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1390 	if (!cp)
1391 		return;
1392 
1393 	phy_param = (void *)cp->data;
1394 
1395 	hci_dev_lock(hdev);
1396 
1397 	hdev->le_scan_type = phy_param->type;
1398 
1399 	hci_dev_unlock(hdev);
1400 }
1401 
has_pending_adv_report(struct hci_dev * hdev)1402 static bool has_pending_adv_report(struct hci_dev *hdev)
1403 {
1404 	struct discovery_state *d = &hdev->discovery;
1405 
1406 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1407 }
1408 
clear_pending_adv_report(struct hci_dev * hdev)1409 static void clear_pending_adv_report(struct hci_dev *hdev)
1410 {
1411 	struct discovery_state *d = &hdev->discovery;
1412 
1413 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1414 	d->last_adv_data_len = 0;
1415 }
1416 
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1417 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1418 				     u8 bdaddr_type, s8 rssi, u32 flags,
1419 				     u8 *data, u8 len)
1420 {
1421 	struct discovery_state *d = &hdev->discovery;
1422 
1423 	if (len > HCI_MAX_AD_LENGTH)
1424 		return;
1425 
1426 	bacpy(&d->last_adv_addr, bdaddr);
1427 	d->last_adv_addr_type = bdaddr_type;
1428 	d->last_adv_rssi = rssi;
1429 	d->last_adv_flags = flags;
1430 	memcpy(d->last_adv_data, data, len);
1431 	d->last_adv_data_len = len;
1432 }
1433 
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1434 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1435 {
1436 	hci_dev_lock(hdev);
1437 
1438 	switch (enable) {
1439 	case LE_SCAN_ENABLE:
1440 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1441 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1442 			clear_pending_adv_report(hdev);
1443 		break;
1444 
1445 	case LE_SCAN_DISABLE:
1446 		/* We do this here instead of when setting DISCOVERY_STOPPED
1447 		 * since the latter would potentially require waiting for
1448 		 * inquiry to stop too.
1449 		 */
1450 		if (has_pending_adv_report(hdev)) {
1451 			struct discovery_state *d = &hdev->discovery;
1452 
1453 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1454 					  d->last_adv_addr_type, NULL,
1455 					  d->last_adv_rssi, d->last_adv_flags,
1456 					  d->last_adv_data,
1457 					  d->last_adv_data_len, NULL, 0);
1458 		}
1459 
1460 		/* Cancel this timer so that we don't try to disable scanning
1461 		 * when it's already disabled.
1462 		 */
1463 		cancel_delayed_work(&hdev->le_scan_disable);
1464 
1465 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1466 
1467 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1468 		 * interrupted scanning due to a connect request. Mark
1469 		 * therefore discovery as stopped. If this was not
1470 		 * because of a connect request advertising might have
1471 		 * been disabled because of active scanning, so
1472 		 * re-enable it again if necessary.
1473 		 */
1474 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1475 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1477 			 hdev->discovery.state == DISCOVERY_FINDING)
1478 			hci_req_reenable_advertising(hdev);
1479 
1480 		break;
1481 
1482 	default:
1483 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1484 			   enable);
1485 		break;
1486 	}
1487 
1488 	hci_dev_unlock(hdev);
1489 }
1490 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1491 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1492 				      struct sk_buff *skb)
1493 {
1494 	struct hci_cp_le_set_scan_enable *cp;
1495 	__u8 status = *((__u8 *) skb->data);
1496 
1497 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498 
1499 	if (status)
1500 		return;
1501 
1502 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1503 	if (!cp)
1504 		return;
1505 
1506 	le_set_scan_enable_complete(hdev, cp->enable);
1507 }
1508 
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1509 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1510 				      struct sk_buff *skb)
1511 {
1512 	struct hci_cp_le_set_ext_scan_enable *cp;
1513 	__u8 status = *((__u8 *) skb->data);
1514 
1515 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1516 
1517 	if (status)
1518 		return;
1519 
1520 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1521 	if (!cp)
1522 		return;
1523 
1524 	le_set_scan_enable_complete(hdev, cp->enable);
1525 }
1526 
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1527 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1528 				      struct sk_buff *skb)
1529 {
1530 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1531 
1532 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1533 	       rp->num_of_sets);
1534 
1535 	if (rp->status)
1536 		return;
1537 
1538 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1539 }
1540 
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,struct sk_buff * skb)1541 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1542 					    struct sk_buff *skb)
1543 {
1544 	struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1545 
1546 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1547 
1548 	if (rp->status)
1549 		return;
1550 
1551 	hdev->le_accept_list_size = rp->size;
1552 }
1553 
hci_cc_le_clear_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1554 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1555 					struct sk_buff *skb)
1556 {
1557 	__u8 status = *((__u8 *) skb->data);
1558 
1559 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1560 
1561 	if (status)
1562 		return;
1563 
1564 	hci_dev_lock(hdev);
1565 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1566 	hci_dev_unlock(hdev);
1567 }
1568 
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1569 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1570 					 struct sk_buff *skb)
1571 {
1572 	struct hci_cp_le_add_to_accept_list *sent;
1573 	__u8 status = *((__u8 *) skb->data);
1574 
1575 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1576 
1577 	if (status)
1578 		return;
1579 
1580 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1581 	if (!sent)
1582 		return;
1583 
1584 	hci_dev_lock(hdev);
1585 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1586 			    sent->bdaddr_type);
1587 	hci_dev_unlock(hdev);
1588 }
1589 
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1590 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1591 					   struct sk_buff *skb)
1592 {
1593 	struct hci_cp_le_del_from_accept_list *sent;
1594 	__u8 status = *((__u8 *) skb->data);
1595 
1596 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1597 
1598 	if (status)
1599 		return;
1600 
1601 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1602 	if (!sent)
1603 		return;
1604 
1605 	hci_dev_lock(hdev);
1606 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1607 			    sent->bdaddr_type);
1608 	hci_dev_unlock(hdev);
1609 }
1610 
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1611 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1612 					    struct sk_buff *skb)
1613 {
1614 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1615 
1616 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1617 
1618 	if (rp->status)
1619 		return;
1620 
1621 	memcpy(hdev->le_states, rp->le_states, 8);
1622 }
1623 
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1624 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1625 					struct sk_buff *skb)
1626 {
1627 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1628 
1629 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1630 
1631 	if (rp->status)
1632 		return;
1633 
1634 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1635 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1636 }
1637 
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1638 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1639 					 struct sk_buff *skb)
1640 {
1641 	struct hci_cp_le_write_def_data_len *sent;
1642 	__u8 status = *((__u8 *) skb->data);
1643 
1644 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645 
1646 	if (status)
1647 		return;
1648 
1649 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1650 	if (!sent)
1651 		return;
1652 
1653 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1654 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1655 }
1656 
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1657 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1658 					 struct sk_buff *skb)
1659 {
1660 	struct hci_cp_le_add_to_resolv_list *sent;
1661 	__u8 status = *((__u8 *) skb->data);
1662 
1663 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1664 
1665 	if (status)
1666 		return;
1667 
1668 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1669 	if (!sent)
1670 		return;
1671 
1672 	hci_dev_lock(hdev);
1673 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1674 				sent->bdaddr_type, sent->peer_irk,
1675 				sent->local_irk);
1676 	hci_dev_unlock(hdev);
1677 }
1678 
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1679 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1680 					  struct sk_buff *skb)
1681 {
1682 	struct hci_cp_le_del_from_resolv_list *sent;
1683 	__u8 status = *((__u8 *) skb->data);
1684 
1685 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1686 
1687 	if (status)
1688 		return;
1689 
1690 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1691 	if (!sent)
1692 		return;
1693 
1694 	hci_dev_lock(hdev);
1695 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1696 			    sent->bdaddr_type);
1697 	hci_dev_unlock(hdev);
1698 }
1699 
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1700 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1701 				       struct sk_buff *skb)
1702 {
1703 	__u8 status = *((__u8 *) skb->data);
1704 
1705 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1706 
1707 	if (status)
1708 		return;
1709 
1710 	hci_dev_lock(hdev);
1711 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1712 	hci_dev_unlock(hdev);
1713 }
1714 
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1715 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1716 					   struct sk_buff *skb)
1717 {
1718 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1719 
1720 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1721 
1722 	if (rp->status)
1723 		return;
1724 
1725 	hdev->le_resolv_list_size = rp->size;
1726 }
1727 
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1728 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1729 						struct sk_buff *skb)
1730 {
1731 	__u8 *sent, status = *((__u8 *) skb->data);
1732 
1733 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1734 
1735 	if (status)
1736 		return;
1737 
1738 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1739 	if (!sent)
1740 		return;
1741 
1742 	hci_dev_lock(hdev);
1743 
1744 	if (*sent)
1745 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1746 	else
1747 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1748 
1749 	hci_dev_unlock(hdev);
1750 }
1751 
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1752 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1753 					struct sk_buff *skb)
1754 {
1755 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1756 
1757 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1758 
1759 	if (rp->status)
1760 		return;
1761 
1762 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1763 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1764 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1765 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1766 }
1767 
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1768 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1769 					   struct sk_buff *skb)
1770 {
1771 	struct hci_cp_write_le_host_supported *sent;
1772 	__u8 status = *((__u8 *) skb->data);
1773 
1774 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1775 
1776 	if (status)
1777 		return;
1778 
1779 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1780 	if (!sent)
1781 		return;
1782 
1783 	hci_dev_lock(hdev);
1784 
1785 	if (sent->le) {
1786 		hdev->features[1][0] |= LMP_HOST_LE;
1787 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1788 	} else {
1789 		hdev->features[1][0] &= ~LMP_HOST_LE;
1790 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1791 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1792 	}
1793 
1794 	if (sent->simul)
1795 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1796 	else
1797 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1798 
1799 	hci_dev_unlock(hdev);
1800 }
1801 
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1802 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1803 {
1804 	struct hci_cp_le_set_adv_param *cp;
1805 	u8 status = *((u8 *) skb->data);
1806 
1807 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1808 
1809 	if (status)
1810 		return;
1811 
1812 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1813 	if (!cp)
1814 		return;
1815 
1816 	hci_dev_lock(hdev);
1817 	hdev->adv_addr_type = cp->own_address_type;
1818 	hci_dev_unlock(hdev);
1819 }
1820 
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1821 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1822 {
1823 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1824 	struct hci_cp_le_set_ext_adv_params *cp;
1825 	struct adv_info *adv_instance;
1826 
1827 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1828 
1829 	if (rp->status)
1830 		return;
1831 
1832 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1833 	if (!cp)
1834 		return;
1835 
1836 	hci_dev_lock(hdev);
1837 	hdev->adv_addr_type = cp->own_addr_type;
1838 	if (!cp->handle) {
1839 		/* Store in hdev for instance 0 */
1840 		hdev->adv_tx_power = rp->tx_power;
1841 	} else {
1842 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
1843 		if (adv_instance)
1844 			adv_instance->tx_power = rp->tx_power;
1845 	}
1846 	/* Update adv data as tx power is known now */
1847 	hci_req_update_adv_data(hdev, cp->handle);
1848 
1849 	hci_dev_unlock(hdev);
1850 }
1851 
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1852 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1853 {
1854 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1855 	struct hci_conn *conn;
1856 
1857 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1858 
1859 	if (rp->status)
1860 		return;
1861 
1862 	hci_dev_lock(hdev);
1863 
1864 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1865 	if (conn)
1866 		conn->rssi = rp->rssi;
1867 
1868 	hci_dev_unlock(hdev);
1869 }
1870 
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1871 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1872 {
1873 	struct hci_cp_read_tx_power *sent;
1874 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1875 	struct hci_conn *conn;
1876 
1877 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1878 
1879 	if (rp->status)
1880 		return;
1881 
1882 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1883 	if (!sent)
1884 		return;
1885 
1886 	hci_dev_lock(hdev);
1887 
1888 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1889 	if (!conn)
1890 		goto unlock;
1891 
1892 	switch (sent->type) {
1893 	case 0x00:
1894 		conn->tx_power = rp->tx_power;
1895 		break;
1896 	case 0x01:
1897 		conn->max_tx_power = rp->tx_power;
1898 		break;
1899 	}
1900 
1901 unlock:
1902 	hci_dev_unlock(hdev);
1903 }
1904 
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1905 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1906 {
1907 	u8 status = *((u8 *) skb->data);
1908 	u8 *mode;
1909 
1910 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1911 
1912 	if (status)
1913 		return;
1914 
1915 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1916 	if (mode)
1917 		hdev->ssp_debug_mode = *mode;
1918 }
1919 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1920 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1921 {
1922 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1923 
1924 	if (status) {
1925 		hci_conn_check_pending(hdev);
1926 		return;
1927 	}
1928 
1929 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
1930 		set_bit(HCI_INQUIRY, &hdev->flags);
1931 }
1932 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1933 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1934 {
1935 	struct hci_cp_create_conn *cp;
1936 	struct hci_conn *conn;
1937 
1938 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1939 
1940 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1941 	if (!cp)
1942 		return;
1943 
1944 	hci_dev_lock(hdev);
1945 
1946 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1947 
1948 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1949 
1950 	if (status) {
1951 		if (conn && conn->state == BT_CONNECT) {
1952 			if (status != 0x0c || conn->attempt > 2) {
1953 				conn->state = BT_CLOSED;
1954 				hci_connect_cfm(conn, status);
1955 				hci_conn_del(conn);
1956 			} else
1957 				conn->state = BT_CONNECT2;
1958 		}
1959 	} else {
1960 		if (!conn) {
1961 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1962 					    HCI_ROLE_MASTER);
1963 			if (!conn)
1964 				bt_dev_err(hdev, "no memory for new connection");
1965 		}
1966 	}
1967 
1968 	hci_dev_unlock(hdev);
1969 }
1970 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1971 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1972 {
1973 	struct hci_cp_add_sco *cp;
1974 	struct hci_conn *acl, *sco;
1975 	__u16 handle;
1976 
1977 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1978 
1979 	if (!status)
1980 		return;
1981 
1982 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1983 	if (!cp)
1984 		return;
1985 
1986 	handle = __le16_to_cpu(cp->handle);
1987 
1988 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1989 
1990 	hci_dev_lock(hdev);
1991 
1992 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1993 	if (acl) {
1994 		sco = acl->link;
1995 		if (sco) {
1996 			sco->state = BT_CLOSED;
1997 
1998 			hci_connect_cfm(sco, status);
1999 			hci_conn_del(sco);
2000 		}
2001 	}
2002 
2003 	hci_dev_unlock(hdev);
2004 }
2005 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2006 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2007 {
2008 	struct hci_cp_auth_requested *cp;
2009 	struct hci_conn *conn;
2010 
2011 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2012 
2013 	if (!status)
2014 		return;
2015 
2016 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2017 	if (!cp)
2018 		return;
2019 
2020 	hci_dev_lock(hdev);
2021 
2022 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2023 	if (conn) {
2024 		if (conn->state == BT_CONFIG) {
2025 			hci_connect_cfm(conn, status);
2026 			hci_conn_drop(conn);
2027 		}
2028 	}
2029 
2030 	hci_dev_unlock(hdev);
2031 }
2032 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2033 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2034 {
2035 	struct hci_cp_set_conn_encrypt *cp;
2036 	struct hci_conn *conn;
2037 
2038 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2039 
2040 	if (!status)
2041 		return;
2042 
2043 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2044 	if (!cp)
2045 		return;
2046 
2047 	hci_dev_lock(hdev);
2048 
2049 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2050 	if (conn) {
2051 		if (conn->state == BT_CONFIG) {
2052 			hci_connect_cfm(conn, status);
2053 			hci_conn_drop(conn);
2054 		}
2055 	}
2056 
2057 	hci_dev_unlock(hdev);
2058 }
2059 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2060 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2061 				    struct hci_conn *conn)
2062 {
2063 	if (conn->state != BT_CONFIG || !conn->out)
2064 		return 0;
2065 
2066 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2067 		return 0;
2068 
2069 	/* Only request authentication for SSP connections or non-SSP
2070 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2071 	 * is requested.
2072 	 */
2073 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2074 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2075 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2076 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2077 		return 0;
2078 
2079 	return 1;
2080 }
2081 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2082 static int hci_resolve_name(struct hci_dev *hdev,
2083 				   struct inquiry_entry *e)
2084 {
2085 	struct hci_cp_remote_name_req cp;
2086 
2087 	memset(&cp, 0, sizeof(cp));
2088 
2089 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2090 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2091 	cp.pscan_mode = e->data.pscan_mode;
2092 	cp.clock_offset = e->data.clock_offset;
2093 
2094 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2095 }
2096 
hci_resolve_next_name(struct hci_dev * hdev)2097 static bool hci_resolve_next_name(struct hci_dev *hdev)
2098 {
2099 	struct discovery_state *discov = &hdev->discovery;
2100 	struct inquiry_entry *e;
2101 
2102 	if (list_empty(&discov->resolve))
2103 		return false;
2104 
2105 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2106 	if (!e)
2107 		return false;
2108 
2109 	if (hci_resolve_name(hdev, e) == 0) {
2110 		e->name_state = NAME_PENDING;
2111 		return true;
2112 	}
2113 
2114 	return false;
2115 }
2116 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2117 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2118 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2119 {
2120 	struct discovery_state *discov = &hdev->discovery;
2121 	struct inquiry_entry *e;
2122 
2123 	/* Update the mgmt connected state if necessary. Be careful with
2124 	 * conn objects that exist but are not (yet) connected however.
2125 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2126 	 * considered connected.
2127 	 */
2128 	if (conn &&
2129 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2130 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2131 		mgmt_device_connected(hdev, conn, name, name_len);
2132 
2133 	if (discov->state == DISCOVERY_STOPPED)
2134 		return;
2135 
2136 	if (discov->state == DISCOVERY_STOPPING)
2137 		goto discov_complete;
2138 
2139 	if (discov->state != DISCOVERY_RESOLVING)
2140 		return;
2141 
2142 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2143 	/* If the device was not found in a list of found devices names of which
2144 	 * are pending. there is no need to continue resolving a next name as it
2145 	 * will be done upon receiving another Remote Name Request Complete
2146 	 * Event */
2147 	if (!e)
2148 		return;
2149 
2150 	list_del(&e->list);
2151 	if (name) {
2152 		e->name_state = NAME_KNOWN;
2153 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2154 				 e->data.rssi, name, name_len);
2155 	} else {
2156 		e->name_state = NAME_NOT_KNOWN;
2157 	}
2158 
2159 	if (hci_resolve_next_name(hdev))
2160 		return;
2161 
2162 discov_complete:
2163 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2164 }
2165 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2166 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2167 {
2168 	struct hci_cp_remote_name_req *cp;
2169 	struct hci_conn *conn;
2170 
2171 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2172 
2173 	/* If successful wait for the name req complete event before
2174 	 * checking for the need to do authentication */
2175 	if (!status)
2176 		return;
2177 
2178 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2179 	if (!cp)
2180 		return;
2181 
2182 	hci_dev_lock(hdev);
2183 
2184 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2185 
2186 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2187 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2188 
2189 	if (!conn)
2190 		goto unlock;
2191 
2192 	if (!hci_outgoing_auth_needed(hdev, conn))
2193 		goto unlock;
2194 
2195 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2196 		struct hci_cp_auth_requested auth_cp;
2197 
2198 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2199 
2200 		auth_cp.handle = __cpu_to_le16(conn->handle);
2201 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2202 			     sizeof(auth_cp), &auth_cp);
2203 	}
2204 
2205 unlock:
2206 	hci_dev_unlock(hdev);
2207 }
2208 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2209 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2210 {
2211 	struct hci_cp_read_remote_features *cp;
2212 	struct hci_conn *conn;
2213 
2214 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2215 
2216 	if (!status)
2217 		return;
2218 
2219 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2220 	if (!cp)
2221 		return;
2222 
2223 	hci_dev_lock(hdev);
2224 
2225 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2226 	if (conn) {
2227 		if (conn->state == BT_CONFIG) {
2228 			hci_connect_cfm(conn, status);
2229 			hci_conn_drop(conn);
2230 		}
2231 	}
2232 
2233 	hci_dev_unlock(hdev);
2234 }
2235 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2236 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2237 {
2238 	struct hci_cp_read_remote_ext_features *cp;
2239 	struct hci_conn *conn;
2240 
2241 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2242 
2243 	if (!status)
2244 		return;
2245 
2246 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2247 	if (!cp)
2248 		return;
2249 
2250 	hci_dev_lock(hdev);
2251 
2252 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2253 	if (conn) {
2254 		if (conn->state == BT_CONFIG) {
2255 			hci_connect_cfm(conn, status);
2256 			hci_conn_drop(conn);
2257 		}
2258 	}
2259 
2260 	hci_dev_unlock(hdev);
2261 }
2262 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2263 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2264 {
2265 	struct hci_cp_setup_sync_conn *cp;
2266 	struct hci_conn *acl, *sco;
2267 	__u16 handle;
2268 
2269 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2270 
2271 	if (!status)
2272 		return;
2273 
2274 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2275 	if (!cp)
2276 		return;
2277 
2278 	handle = __le16_to_cpu(cp->handle);
2279 
2280 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2281 
2282 	hci_dev_lock(hdev);
2283 
2284 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2285 	if (acl) {
2286 		sco = acl->link;
2287 		if (sco) {
2288 			sco->state = BT_CLOSED;
2289 
2290 			hci_connect_cfm(sco, status);
2291 			hci_conn_del(sco);
2292 		}
2293 	}
2294 
2295 	hci_dev_unlock(hdev);
2296 }
2297 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2298 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2299 {
2300 	struct hci_cp_sniff_mode *cp;
2301 	struct hci_conn *conn;
2302 
2303 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2304 
2305 	if (!status)
2306 		return;
2307 
2308 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2309 	if (!cp)
2310 		return;
2311 
2312 	hci_dev_lock(hdev);
2313 
2314 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2315 	if (conn) {
2316 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2317 
2318 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2319 			hci_sco_setup(conn, status);
2320 	}
2321 
2322 	hci_dev_unlock(hdev);
2323 }
2324 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2325 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2326 {
2327 	struct hci_cp_exit_sniff_mode *cp;
2328 	struct hci_conn *conn;
2329 
2330 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2331 
2332 	if (!status)
2333 		return;
2334 
2335 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2336 	if (!cp)
2337 		return;
2338 
2339 	hci_dev_lock(hdev);
2340 
2341 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2342 	if (conn) {
2343 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2344 
2345 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2346 			hci_sco_setup(conn, status);
2347 	}
2348 
2349 	hci_dev_unlock(hdev);
2350 }
2351 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2352 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2353 {
2354 	struct hci_cp_disconnect *cp;
2355 	struct hci_conn *conn;
2356 
2357 	if (!status)
2358 		return;
2359 
2360 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2361 	if (!cp)
2362 		return;
2363 
2364 	hci_dev_lock(hdev);
2365 
2366 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2367 	if (conn) {
2368 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2369 				       conn->dst_type, status);
2370 
2371 		if (conn->type == LE_LINK) {
2372 			hdev->cur_adv_instance = conn->adv_instance;
2373 			hci_req_reenable_advertising(hdev);
2374 		}
2375 
2376 		/* If the disconnection failed for any reason, the upper layer
2377 		 * does not retry to disconnect in current implementation.
2378 		 * Hence, we need to do some basic cleanup here and re-enable
2379 		 * advertising if necessary.
2380 		 */
2381 		hci_conn_del(conn);
2382 	}
2383 
2384 	hci_dev_unlock(hdev);
2385 }
2386 
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2387 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2388 			      u8 peer_addr_type, u8 own_address_type,
2389 			      u8 filter_policy)
2390 {
2391 	struct hci_conn *conn;
2392 
2393 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2394 				       peer_addr_type);
2395 	if (!conn)
2396 		return;
2397 
2398 	/* When using controller based address resolution, then the new
2399 	 * address types 0x02 and 0x03 are used. These types need to be
2400 	 * converted back into either public address or random address type
2401 	 */
2402 	if (use_ll_privacy(hdev) &&
2403 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2404 		switch (own_address_type) {
2405 		case ADDR_LE_DEV_PUBLIC_RESOLVED:
2406 			own_address_type = ADDR_LE_DEV_PUBLIC;
2407 			break;
2408 		case ADDR_LE_DEV_RANDOM_RESOLVED:
2409 			own_address_type = ADDR_LE_DEV_RANDOM;
2410 			break;
2411 		}
2412 	}
2413 
2414 	/* Store the initiator and responder address information which
2415 	 * is needed for SMP. These values will not change during the
2416 	 * lifetime of the connection.
2417 	 */
2418 	conn->init_addr_type = own_address_type;
2419 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2420 		bacpy(&conn->init_addr, &hdev->random_addr);
2421 	else
2422 		bacpy(&conn->init_addr, &hdev->bdaddr);
2423 
2424 	conn->resp_addr_type = peer_addr_type;
2425 	bacpy(&conn->resp_addr, peer_addr);
2426 
2427 	/* We don't want the connection attempt to stick around
2428 	 * indefinitely since LE doesn't have a page timeout concept
2429 	 * like BR/EDR. Set a timer for any connection that doesn't use
2430 	 * the accept list for connecting.
2431 	 */
2432 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2433 		queue_delayed_work(conn->hdev->workqueue,
2434 				   &conn->le_conn_timeout,
2435 				   conn->conn_timeout);
2436 }
2437 
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2438 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2439 {
2440 	struct hci_cp_le_create_conn *cp;
2441 
2442 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2443 
2444 	/* All connection failure handling is taken care of by the
2445 	 * hci_le_conn_failed function which is triggered by the HCI
2446 	 * request completion callbacks used for connecting.
2447 	 */
2448 	if (status)
2449 		return;
2450 
2451 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2452 	if (!cp)
2453 		return;
2454 
2455 	hci_dev_lock(hdev);
2456 
2457 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2458 			  cp->own_address_type, cp->filter_policy);
2459 
2460 	hci_dev_unlock(hdev);
2461 }
2462 
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2463 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2464 {
2465 	struct hci_cp_le_ext_create_conn *cp;
2466 
2467 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2468 
2469 	/* All connection failure handling is taken care of by the
2470 	 * hci_le_conn_failed function which is triggered by the HCI
2471 	 * request completion callbacks used for connecting.
2472 	 */
2473 	if (status)
2474 		return;
2475 
2476 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2477 	if (!cp)
2478 		return;
2479 
2480 	hci_dev_lock(hdev);
2481 
2482 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2483 			  cp->own_addr_type, cp->filter_policy);
2484 
2485 	hci_dev_unlock(hdev);
2486 }
2487 
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2488 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2489 {
2490 	struct hci_cp_le_read_remote_features *cp;
2491 	struct hci_conn *conn;
2492 
2493 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2494 
2495 	if (!status)
2496 		return;
2497 
2498 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2499 	if (!cp)
2500 		return;
2501 
2502 	hci_dev_lock(hdev);
2503 
2504 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2505 	if (conn) {
2506 		if (conn->state == BT_CONFIG) {
2507 			hci_connect_cfm(conn, status);
2508 			hci_conn_drop(conn);
2509 		}
2510 	}
2511 
2512 	hci_dev_unlock(hdev);
2513 }
2514 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2515 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2516 {
2517 	struct hci_cp_le_start_enc *cp;
2518 	struct hci_conn *conn;
2519 
2520 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2521 
2522 	if (!status)
2523 		return;
2524 
2525 	hci_dev_lock(hdev);
2526 
2527 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2528 	if (!cp)
2529 		goto unlock;
2530 
2531 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2532 	if (!conn)
2533 		goto unlock;
2534 
2535 	if (conn->state != BT_CONNECTED)
2536 		goto unlock;
2537 
2538 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2539 	hci_conn_drop(conn);
2540 
2541 unlock:
2542 	hci_dev_unlock(hdev);
2543 }
2544 
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2545 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2546 {
2547 	struct hci_cp_switch_role *cp;
2548 	struct hci_conn *conn;
2549 
2550 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2551 
2552 	if (!status)
2553 		return;
2554 
2555 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2556 	if (!cp)
2557 		return;
2558 
2559 	hci_dev_lock(hdev);
2560 
2561 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2562 	if (conn)
2563 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2564 
2565 	hci_dev_unlock(hdev);
2566 }
2567 
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2568 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 {
2570 	__u8 status = *((__u8 *) skb->data);
2571 	struct discovery_state *discov = &hdev->discovery;
2572 	struct inquiry_entry *e;
2573 
2574 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2575 
2576 	hci_conn_check_pending(hdev);
2577 
2578 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2579 		return;
2580 
2581 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2582 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2583 
2584 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2585 		return;
2586 
2587 	hci_dev_lock(hdev);
2588 
2589 	if (discov->state != DISCOVERY_FINDING)
2590 		goto unlock;
2591 
2592 	if (list_empty(&discov->resolve)) {
2593 		/* When BR/EDR inquiry is active and no LE scanning is in
2594 		 * progress, then change discovery state to indicate completion.
2595 		 *
2596 		 * When running LE scanning and BR/EDR inquiry simultaneously
2597 		 * and the LE scan already finished, then change the discovery
2598 		 * state to indicate completion.
2599 		 */
2600 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2601 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2602 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2603 		goto unlock;
2604 	}
2605 
2606 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2607 	if (e && hci_resolve_name(hdev, e) == 0) {
2608 		e->name_state = NAME_PENDING;
2609 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2610 	} else {
2611 		/* When BR/EDR inquiry is active and no LE scanning is in
2612 		 * progress, then change discovery state to indicate completion.
2613 		 *
2614 		 * When running LE scanning and BR/EDR inquiry simultaneously
2615 		 * and the LE scan already finished, then change the discovery
2616 		 * state to indicate completion.
2617 		 */
2618 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2619 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2620 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2621 	}
2622 
2623 unlock:
2624 	hci_dev_unlock(hdev);
2625 }
2626 
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2627 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 	struct inquiry_data data;
2630 	struct inquiry_info *info = (void *) (skb->data + 1);
2631 	int num_rsp = *((__u8 *) skb->data);
2632 
2633 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2634 
2635 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2636 		return;
2637 
2638 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2639 		return;
2640 
2641 	hci_dev_lock(hdev);
2642 
2643 	for (; num_rsp; num_rsp--, info++) {
2644 		u32 flags;
2645 
2646 		bacpy(&data.bdaddr, &info->bdaddr);
2647 		data.pscan_rep_mode	= info->pscan_rep_mode;
2648 		data.pscan_period_mode	= info->pscan_period_mode;
2649 		data.pscan_mode		= info->pscan_mode;
2650 		memcpy(data.dev_class, info->dev_class, 3);
2651 		data.clock_offset	= info->clock_offset;
2652 		data.rssi		= HCI_RSSI_INVALID;
2653 		data.ssp_mode		= 0x00;
2654 
2655 		flags = hci_inquiry_cache_update(hdev, &data, false);
2656 
2657 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2658 				  info->dev_class, HCI_RSSI_INVALID,
2659 				  flags, NULL, 0, NULL, 0);
2660 	}
2661 
2662 	hci_dev_unlock(hdev);
2663 }
2664 
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2665 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2666 {
2667 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2668 	struct hci_conn *conn;
2669 
2670 	BT_DBG("%s", hdev->name);
2671 
2672 	hci_dev_lock(hdev);
2673 
2674 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2675 	if (!conn) {
2676 		/* Connection may not exist if auto-connected. Check the bredr
2677 		 * allowlist to see if this device is allowed to auto connect.
2678 		 * If link is an ACL type, create a connection class
2679 		 * automatically.
2680 		 *
2681 		 * Auto-connect will only occur if the event filter is
2682 		 * programmed with a given address. Right now, event filter is
2683 		 * only used during suspend.
2684 		 */
2685 		if (ev->link_type == ACL_LINK &&
2686 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2687 						      &ev->bdaddr,
2688 						      BDADDR_BREDR)) {
2689 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2690 					    HCI_ROLE_SLAVE);
2691 			if (!conn) {
2692 				bt_dev_err(hdev, "no memory for new conn");
2693 				goto unlock;
2694 			}
2695 		} else {
2696 			if (ev->link_type != SCO_LINK)
2697 				goto unlock;
2698 
2699 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2700 						       &ev->bdaddr);
2701 			if (!conn)
2702 				goto unlock;
2703 
2704 			conn->type = SCO_LINK;
2705 		}
2706 	}
2707 
2708 	if (!ev->status) {
2709 		conn->handle = __le16_to_cpu(ev->handle);
2710 
2711 		if (conn->type == ACL_LINK) {
2712 			conn->state = BT_CONFIG;
2713 			hci_conn_hold(conn);
2714 
2715 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2716 			    !hci_find_link_key(hdev, &ev->bdaddr))
2717 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2718 			else
2719 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2720 		} else
2721 			conn->state = BT_CONNECTED;
2722 
2723 		hci_debugfs_create_conn(conn);
2724 		hci_conn_add_sysfs(conn);
2725 
2726 		if (test_bit(HCI_AUTH, &hdev->flags))
2727 			set_bit(HCI_CONN_AUTH, &conn->flags);
2728 
2729 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2730 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2731 
2732 		/* Get remote features */
2733 		if (conn->type == ACL_LINK) {
2734 			struct hci_cp_read_remote_features cp;
2735 			cp.handle = ev->handle;
2736 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2737 				     sizeof(cp), &cp);
2738 
2739 			hci_req_update_scan(hdev);
2740 		}
2741 
2742 		/* Set packet type for incoming connection */
2743 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2744 			struct hci_cp_change_conn_ptype cp;
2745 			cp.handle = ev->handle;
2746 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2747 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2748 				     &cp);
2749 		}
2750 	} else {
2751 		conn->state = BT_CLOSED;
2752 		if (conn->type == ACL_LINK)
2753 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2754 					    conn->dst_type, ev->status);
2755 	}
2756 
2757 	if (conn->type == ACL_LINK)
2758 		hci_sco_setup(conn, ev->status);
2759 
2760 	if (ev->status) {
2761 		hci_connect_cfm(conn, ev->status);
2762 		hci_conn_del(conn);
2763 	} else if (ev->link_type == SCO_LINK) {
2764 		switch (conn->setting & SCO_AIRMODE_MASK) {
2765 		case SCO_AIRMODE_CVSD:
2766 			if (hdev->notify)
2767 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2768 			break;
2769 		}
2770 
2771 		hci_connect_cfm(conn, ev->status);
2772 	}
2773 
2774 unlock:
2775 	hci_dev_unlock(hdev);
2776 
2777 	hci_conn_check_pending(hdev);
2778 }
2779 
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2780 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2781 {
2782 	struct hci_cp_reject_conn_req cp;
2783 
2784 	bacpy(&cp.bdaddr, bdaddr);
2785 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2786 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2787 }
2788 
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2789 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 {
2791 	struct hci_ev_conn_request *ev = (void *) skb->data;
2792 	int mask = hdev->link_mode;
2793 	struct inquiry_entry *ie;
2794 	struct hci_conn *conn;
2795 	__u8 flags = 0;
2796 
2797 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2798 	       ev->link_type);
2799 
2800 	/* Reject incoming connection from device with same BD ADDR against
2801 	 * CVE-2020-26555
2802 	 */
2803 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2804 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2805 			   &ev->bdaddr);
2806 		hci_reject_conn(hdev, &ev->bdaddr);
2807 		return;
2808 	}
2809 
2810 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2811 				      &flags);
2812 
2813 	if (!(mask & HCI_LM_ACCEPT)) {
2814 		hci_reject_conn(hdev, &ev->bdaddr);
2815 		return;
2816 	}
2817 
2818 	hci_dev_lock(hdev);
2819 
2820 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2821 				   BDADDR_BREDR)) {
2822 		hci_reject_conn(hdev, &ev->bdaddr);
2823 		goto unlock;
2824 	}
2825 
2826 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
2827 	 * connection. These features are only touched through mgmt so
2828 	 * only do the checks if HCI_MGMT is set.
2829 	 */
2830 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2831 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2832 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2833 					       BDADDR_BREDR)) {
2834 		hci_reject_conn(hdev, &ev->bdaddr);
2835 		goto unlock;
2836 	}
2837 
2838 	/* Connection accepted */
2839 
2840 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2841 	if (ie)
2842 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2843 
2844 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2845 			&ev->bdaddr);
2846 	if (!conn) {
2847 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2848 				    HCI_ROLE_SLAVE);
2849 		if (!conn) {
2850 			bt_dev_err(hdev, "no memory for new connection");
2851 			goto unlock;
2852 		}
2853 	}
2854 
2855 	memcpy(conn->dev_class, ev->dev_class, 3);
2856 
2857 	hci_dev_unlock(hdev);
2858 
2859 	if (ev->link_type == ACL_LINK ||
2860 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2861 		struct hci_cp_accept_conn_req cp;
2862 		conn->state = BT_CONNECT;
2863 
2864 		bacpy(&cp.bdaddr, &ev->bdaddr);
2865 
2866 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2867 			cp.role = 0x00; /* Become central */
2868 		else
2869 			cp.role = 0x01; /* Remain peripheral */
2870 
2871 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2872 	} else if (!(flags & HCI_PROTO_DEFER)) {
2873 		struct hci_cp_accept_sync_conn_req cp;
2874 		conn->state = BT_CONNECT;
2875 
2876 		bacpy(&cp.bdaddr, &ev->bdaddr);
2877 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2878 
2879 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2880 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2881 		cp.max_latency    = cpu_to_le16(0xffff);
2882 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2883 		cp.retrans_effort = 0xff;
2884 
2885 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2886 			     &cp);
2887 	} else {
2888 		conn->state = BT_CONNECT2;
2889 		hci_connect_cfm(conn, 0);
2890 	}
2891 
2892 	return;
2893 unlock:
2894 	hci_dev_unlock(hdev);
2895 }
2896 
hci_to_mgmt_reason(u8 err)2897 static u8 hci_to_mgmt_reason(u8 err)
2898 {
2899 	switch (err) {
2900 	case HCI_ERROR_CONNECTION_TIMEOUT:
2901 		return MGMT_DEV_DISCONN_TIMEOUT;
2902 	case HCI_ERROR_REMOTE_USER_TERM:
2903 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2904 	case HCI_ERROR_REMOTE_POWER_OFF:
2905 		return MGMT_DEV_DISCONN_REMOTE;
2906 	case HCI_ERROR_LOCAL_HOST_TERM:
2907 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2908 	default:
2909 		return MGMT_DEV_DISCONN_UNKNOWN;
2910 	}
2911 }
2912 
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2913 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2914 {
2915 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2916 	u8 reason;
2917 	struct hci_conn_params *params;
2918 	struct hci_conn *conn;
2919 	bool mgmt_connected;
2920 
2921 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2922 
2923 	hci_dev_lock(hdev);
2924 
2925 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2926 	if (!conn)
2927 		goto unlock;
2928 
2929 	if (ev->status) {
2930 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2931 				       conn->dst_type, ev->status);
2932 		goto unlock;
2933 	}
2934 
2935 	conn->state = BT_CLOSED;
2936 
2937 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2938 
2939 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2940 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2941 	else
2942 		reason = hci_to_mgmt_reason(ev->reason);
2943 
2944 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2945 				reason, mgmt_connected);
2946 
2947 	if (conn->type == ACL_LINK) {
2948 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2949 			hci_remove_link_key(hdev, &conn->dst);
2950 
2951 		hci_req_update_scan(hdev);
2952 	}
2953 
2954 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2955 	if (params) {
2956 		switch (params->auto_connect) {
2957 		case HCI_AUTO_CONN_LINK_LOSS:
2958 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2959 				break;
2960 			fallthrough;
2961 
2962 		case HCI_AUTO_CONN_DIRECT:
2963 		case HCI_AUTO_CONN_ALWAYS:
2964 			list_del_init(&params->action);
2965 			list_add(&params->action, &hdev->pend_le_conns);
2966 			hci_update_background_scan(hdev);
2967 			break;
2968 
2969 		default:
2970 			break;
2971 		}
2972 	}
2973 
2974 	hci_disconn_cfm(conn, ev->reason);
2975 
2976 	/* The suspend notifier is waiting for all devices to disconnect so
2977 	 * clear the bit from pending tasks and inform the wait queue.
2978 	 */
2979 	if (list_empty(&hdev->conn_hash.list) &&
2980 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2981 		wake_up(&hdev->suspend_wait_q);
2982 	}
2983 
2984 	/* Re-enable advertising if necessary, since it might
2985 	 * have been disabled by the connection. From the
2986 	 * HCI_LE_Set_Advertise_Enable command description in
2987 	 * the core specification (v4.0):
2988 	 * "The Controller shall continue advertising until the Host
2989 	 * issues an LE_Set_Advertise_Enable command with
2990 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2991 	 * or until a connection is created or until the Advertising
2992 	 * is timed out due to Directed Advertising."
2993 	 */
2994 	if (conn->type == LE_LINK) {
2995 		hdev->cur_adv_instance = conn->adv_instance;
2996 		hci_req_reenable_advertising(hdev);
2997 	}
2998 
2999 	hci_conn_del(conn);
3000 
3001 unlock:
3002 	hci_dev_unlock(hdev);
3003 }
3004 
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3005 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3006 {
3007 	struct hci_ev_auth_complete *ev = (void *) skb->data;
3008 	struct hci_conn *conn;
3009 
3010 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3011 
3012 	hci_dev_lock(hdev);
3013 
3014 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3015 	if (!conn)
3016 		goto unlock;
3017 
3018 	if (!ev->status) {
3019 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3020 		set_bit(HCI_CONN_AUTH, &conn->flags);
3021 		conn->sec_level = conn->pending_sec_level;
3022 	} else {
3023 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3024 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3025 
3026 		mgmt_auth_failed(conn, ev->status);
3027 	}
3028 
3029 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3030 
3031 	if (conn->state == BT_CONFIG) {
3032 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3033 			struct hci_cp_set_conn_encrypt cp;
3034 			cp.handle  = ev->handle;
3035 			cp.encrypt = 0x01;
3036 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3037 				     &cp);
3038 		} else {
3039 			conn->state = BT_CONNECTED;
3040 			hci_connect_cfm(conn, ev->status);
3041 			hci_conn_drop(conn);
3042 		}
3043 	} else {
3044 		hci_auth_cfm(conn, ev->status);
3045 
3046 		hci_conn_hold(conn);
3047 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3048 		hci_conn_drop(conn);
3049 	}
3050 
3051 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3052 		if (!ev->status) {
3053 			struct hci_cp_set_conn_encrypt cp;
3054 			cp.handle  = ev->handle;
3055 			cp.encrypt = 0x01;
3056 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3057 				     &cp);
3058 		} else {
3059 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3060 			hci_encrypt_cfm(conn, ev->status);
3061 		}
3062 	}
3063 
3064 unlock:
3065 	hci_dev_unlock(hdev);
3066 }
3067 
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)3068 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3069 {
3070 	struct hci_ev_remote_name *ev = (void *) skb->data;
3071 	struct hci_conn *conn;
3072 
3073 	BT_DBG("%s", hdev->name);
3074 
3075 	hci_conn_check_pending(hdev);
3076 
3077 	hci_dev_lock(hdev);
3078 
3079 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3080 
3081 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3082 		goto check_auth;
3083 
3084 	if (ev->status == 0)
3085 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3086 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3087 	else
3088 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3089 
3090 check_auth:
3091 	if (!conn)
3092 		goto unlock;
3093 
3094 	if (!hci_outgoing_auth_needed(hdev, conn))
3095 		goto unlock;
3096 
3097 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3098 		struct hci_cp_auth_requested cp;
3099 
3100 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3101 
3102 		cp.handle = __cpu_to_le16(conn->handle);
3103 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3104 	}
3105 
3106 unlock:
3107 	hci_dev_unlock(hdev);
3108 }
3109 
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3110 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3111 				       u16 opcode, struct sk_buff *skb)
3112 {
3113 	const struct hci_rp_read_enc_key_size *rp;
3114 	struct hci_conn *conn;
3115 	u16 handle;
3116 
3117 	BT_DBG("%s status 0x%02x", hdev->name, status);
3118 
3119 	if (!skb || skb->len < sizeof(*rp)) {
3120 		bt_dev_err(hdev, "invalid read key size response");
3121 		return;
3122 	}
3123 
3124 	rp = (void *)skb->data;
3125 	handle = le16_to_cpu(rp->handle);
3126 
3127 	hci_dev_lock(hdev);
3128 
3129 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3130 	if (!conn)
3131 		goto unlock;
3132 
3133 	/* While unexpected, the read_enc_key_size command may fail. The most
3134 	 * secure approach is to then assume the key size is 0 to force a
3135 	 * disconnection.
3136 	 */
3137 	if (rp->status) {
3138 		bt_dev_err(hdev, "failed to read key size for handle %u",
3139 			   handle);
3140 		conn->enc_key_size = 0;
3141 	} else {
3142 		conn->enc_key_size = rp->key_size;
3143 	}
3144 
3145 	hci_encrypt_cfm(conn, 0);
3146 
3147 unlock:
3148 	hci_dev_unlock(hdev);
3149 }
3150 
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3151 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152 {
3153 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3154 	struct hci_conn *conn;
3155 
3156 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3157 
3158 	hci_dev_lock(hdev);
3159 
3160 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3161 	if (!conn)
3162 		goto unlock;
3163 
3164 	if (!ev->status) {
3165 		if (ev->encrypt) {
3166 			/* Encryption implies authentication */
3167 			set_bit(HCI_CONN_AUTH, &conn->flags);
3168 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3169 			conn->sec_level = conn->pending_sec_level;
3170 
3171 			/* P-256 authentication key implies FIPS */
3172 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3173 				set_bit(HCI_CONN_FIPS, &conn->flags);
3174 
3175 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3176 			    conn->type == LE_LINK)
3177 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3178 		} else {
3179 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3180 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3181 		}
3182 	}
3183 
3184 	/* We should disregard the current RPA and generate a new one
3185 	 * whenever the encryption procedure fails.
3186 	 */
3187 	if (ev->status && conn->type == LE_LINK) {
3188 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3189 		hci_adv_instances_set_rpa_expired(hdev, true);
3190 	}
3191 
3192 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3193 
3194 	/* Check link security requirements are met */
3195 	if (!hci_conn_check_link_mode(conn))
3196 		ev->status = HCI_ERROR_AUTH_FAILURE;
3197 
3198 	if (ev->status && conn->state == BT_CONNECTED) {
3199 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3200 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3201 
3202 		/* Notify upper layers so they can cleanup before
3203 		 * disconnecting.
3204 		 */
3205 		hci_encrypt_cfm(conn, ev->status);
3206 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3207 		hci_conn_drop(conn);
3208 		goto unlock;
3209 	}
3210 
3211 	/* Try reading the encryption key size for encrypted ACL links */
3212 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3213 		struct hci_cp_read_enc_key_size cp;
3214 		struct hci_request req;
3215 
3216 		/* Only send HCI_Read_Encryption_Key_Size if the
3217 		 * controller really supports it. If it doesn't, assume
3218 		 * the default size (16).
3219 		 */
3220 		if (!(hdev->commands[20] & 0x10)) {
3221 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3222 			goto notify;
3223 		}
3224 
3225 		hci_req_init(&req, hdev);
3226 
3227 		cp.handle = cpu_to_le16(conn->handle);
3228 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3229 
3230 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3231 			bt_dev_err(hdev, "sending read key size failed");
3232 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3233 			goto notify;
3234 		}
3235 
3236 		goto unlock;
3237 	}
3238 
3239 	/* Set the default Authenticated Payload Timeout after
3240 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3241 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3242 	 * sent when the link is active and Encryption is enabled, the conn
3243 	 * type can be either LE or ACL and controller must support LMP Ping.
3244 	 * Ensure for AES-CCM encryption as well.
3245 	 */
3246 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3247 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3248 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3249 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3250 		struct hci_cp_write_auth_payload_to cp;
3251 
3252 		cp.handle = cpu_to_le16(conn->handle);
3253 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3254 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3255 			     sizeof(cp), &cp);
3256 	}
3257 
3258 notify:
3259 	hci_encrypt_cfm(conn, ev->status);
3260 
3261 unlock:
3262 	hci_dev_unlock(hdev);
3263 }
3264 
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3265 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3266 					     struct sk_buff *skb)
3267 {
3268 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3269 	struct hci_conn *conn;
3270 
3271 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3272 
3273 	hci_dev_lock(hdev);
3274 
3275 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3276 	if (conn) {
3277 		if (!ev->status)
3278 			set_bit(HCI_CONN_SECURE, &conn->flags);
3279 
3280 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3281 
3282 		hci_key_change_cfm(conn, ev->status);
3283 	}
3284 
3285 	hci_dev_unlock(hdev);
3286 }
3287 
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3288 static void hci_remote_features_evt(struct hci_dev *hdev,
3289 				    struct sk_buff *skb)
3290 {
3291 	struct hci_ev_remote_features *ev = (void *) skb->data;
3292 	struct hci_conn *conn;
3293 
3294 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3295 
3296 	hci_dev_lock(hdev);
3297 
3298 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3299 	if (!conn)
3300 		goto unlock;
3301 
3302 	if (!ev->status)
3303 		memcpy(conn->features[0], ev->features, 8);
3304 
3305 	if (conn->state != BT_CONFIG)
3306 		goto unlock;
3307 
3308 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3309 	    lmp_ext_feat_capable(conn)) {
3310 		struct hci_cp_read_remote_ext_features cp;
3311 		cp.handle = ev->handle;
3312 		cp.page = 0x01;
3313 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3314 			     sizeof(cp), &cp);
3315 		goto unlock;
3316 	}
3317 
3318 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3319 		struct hci_cp_remote_name_req cp;
3320 		memset(&cp, 0, sizeof(cp));
3321 		bacpy(&cp.bdaddr, &conn->dst);
3322 		cp.pscan_rep_mode = 0x02;
3323 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3324 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3325 		mgmt_device_connected(hdev, conn, NULL, 0);
3326 
3327 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3328 		conn->state = BT_CONNECTED;
3329 		hci_connect_cfm(conn, ev->status);
3330 		hci_conn_drop(conn);
3331 	}
3332 
3333 unlock:
3334 	hci_dev_unlock(hdev);
3335 }
3336 
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3337 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3338 {
3339 	cancel_delayed_work(&hdev->cmd_timer);
3340 
3341 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3342 		if (ncmd) {
3343 			cancel_delayed_work(&hdev->ncmd_timer);
3344 			atomic_set(&hdev->cmd_cnt, 1);
3345 		} else {
3346 			schedule_delayed_work(&hdev->ncmd_timer,
3347 					      HCI_NCMD_TIMEOUT);
3348 		}
3349 	}
3350 }
3351 
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3352 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3353 				 u16 *opcode, u8 *status,
3354 				 hci_req_complete_t *req_complete,
3355 				 hci_req_complete_skb_t *req_complete_skb)
3356 {
3357 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3358 
3359 	*opcode = __le16_to_cpu(ev->opcode);
3360 	*status = skb->data[sizeof(*ev)];
3361 
3362 	skb_pull(skb, sizeof(*ev));
3363 
3364 	switch (*opcode) {
3365 	case HCI_OP_INQUIRY_CANCEL:
3366 		hci_cc_inquiry_cancel(hdev, skb, status);
3367 		break;
3368 
3369 	case HCI_OP_PERIODIC_INQ:
3370 		hci_cc_periodic_inq(hdev, skb);
3371 		break;
3372 
3373 	case HCI_OP_EXIT_PERIODIC_INQ:
3374 		hci_cc_exit_periodic_inq(hdev, skb);
3375 		break;
3376 
3377 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3378 		hci_cc_remote_name_req_cancel(hdev, skb);
3379 		break;
3380 
3381 	case HCI_OP_ROLE_DISCOVERY:
3382 		hci_cc_role_discovery(hdev, skb);
3383 		break;
3384 
3385 	case HCI_OP_READ_LINK_POLICY:
3386 		hci_cc_read_link_policy(hdev, skb);
3387 		break;
3388 
3389 	case HCI_OP_WRITE_LINK_POLICY:
3390 		hci_cc_write_link_policy(hdev, skb);
3391 		break;
3392 
3393 	case HCI_OP_READ_DEF_LINK_POLICY:
3394 		hci_cc_read_def_link_policy(hdev, skb);
3395 		break;
3396 
3397 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3398 		hci_cc_write_def_link_policy(hdev, skb);
3399 		break;
3400 
3401 	case HCI_OP_RESET:
3402 		hci_cc_reset(hdev, skb);
3403 		break;
3404 
3405 	case HCI_OP_READ_STORED_LINK_KEY:
3406 		hci_cc_read_stored_link_key(hdev, skb);
3407 		break;
3408 
3409 	case HCI_OP_DELETE_STORED_LINK_KEY:
3410 		hci_cc_delete_stored_link_key(hdev, skb);
3411 		break;
3412 
3413 	case HCI_OP_WRITE_LOCAL_NAME:
3414 		hci_cc_write_local_name(hdev, skb);
3415 		break;
3416 
3417 	case HCI_OP_READ_LOCAL_NAME:
3418 		hci_cc_read_local_name(hdev, skb);
3419 		break;
3420 
3421 	case HCI_OP_WRITE_AUTH_ENABLE:
3422 		hci_cc_write_auth_enable(hdev, skb);
3423 		break;
3424 
3425 	case HCI_OP_WRITE_ENCRYPT_MODE:
3426 		hci_cc_write_encrypt_mode(hdev, skb);
3427 		break;
3428 
3429 	case HCI_OP_WRITE_SCAN_ENABLE:
3430 		hci_cc_write_scan_enable(hdev, skb);
3431 		break;
3432 
3433 	case HCI_OP_SET_EVENT_FLT:
3434 		hci_cc_set_event_filter(hdev, skb);
3435 		break;
3436 
3437 	case HCI_OP_READ_CLASS_OF_DEV:
3438 		hci_cc_read_class_of_dev(hdev, skb);
3439 		break;
3440 
3441 	case HCI_OP_WRITE_CLASS_OF_DEV:
3442 		hci_cc_write_class_of_dev(hdev, skb);
3443 		break;
3444 
3445 	case HCI_OP_READ_VOICE_SETTING:
3446 		hci_cc_read_voice_setting(hdev, skb);
3447 		break;
3448 
3449 	case HCI_OP_WRITE_VOICE_SETTING:
3450 		hci_cc_write_voice_setting(hdev, skb);
3451 		break;
3452 
3453 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3454 		hci_cc_read_num_supported_iac(hdev, skb);
3455 		break;
3456 
3457 	case HCI_OP_WRITE_SSP_MODE:
3458 		hci_cc_write_ssp_mode(hdev, skb);
3459 		break;
3460 
3461 	case HCI_OP_WRITE_SC_SUPPORT:
3462 		hci_cc_write_sc_support(hdev, skb);
3463 		break;
3464 
3465 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3466 		hci_cc_read_auth_payload_timeout(hdev, skb);
3467 		break;
3468 
3469 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3470 		hci_cc_write_auth_payload_timeout(hdev, skb);
3471 		break;
3472 
3473 	case HCI_OP_READ_LOCAL_VERSION:
3474 		hci_cc_read_local_version(hdev, skb);
3475 		break;
3476 
3477 	case HCI_OP_READ_LOCAL_COMMANDS:
3478 		hci_cc_read_local_commands(hdev, skb);
3479 		break;
3480 
3481 	case HCI_OP_READ_LOCAL_FEATURES:
3482 		hci_cc_read_local_features(hdev, skb);
3483 		break;
3484 
3485 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3486 		hci_cc_read_local_ext_features(hdev, skb);
3487 		break;
3488 
3489 	case HCI_OP_READ_BUFFER_SIZE:
3490 		hci_cc_read_buffer_size(hdev, skb);
3491 		break;
3492 
3493 	case HCI_OP_READ_BD_ADDR:
3494 		hci_cc_read_bd_addr(hdev, skb);
3495 		break;
3496 
3497 	case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3498 		hci_cc_read_local_pairing_opts(hdev, skb);
3499 		break;
3500 
3501 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3502 		hci_cc_read_page_scan_activity(hdev, skb);
3503 		break;
3504 
3505 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3506 		hci_cc_write_page_scan_activity(hdev, skb);
3507 		break;
3508 
3509 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3510 		hci_cc_read_page_scan_type(hdev, skb);
3511 		break;
3512 
3513 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3514 		hci_cc_write_page_scan_type(hdev, skb);
3515 		break;
3516 
3517 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3518 		hci_cc_read_data_block_size(hdev, skb);
3519 		break;
3520 
3521 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3522 		hci_cc_read_flow_control_mode(hdev, skb);
3523 		break;
3524 
3525 	case HCI_OP_READ_LOCAL_AMP_INFO:
3526 		hci_cc_read_local_amp_info(hdev, skb);
3527 		break;
3528 
3529 	case HCI_OP_READ_CLOCK:
3530 		hci_cc_read_clock(hdev, skb);
3531 		break;
3532 
3533 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3534 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3535 		break;
3536 
3537 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3538 		hci_cc_read_def_err_data_reporting(hdev, skb);
3539 		break;
3540 
3541 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3542 		hci_cc_write_def_err_data_reporting(hdev, skb);
3543 		break;
3544 
3545 	case HCI_OP_PIN_CODE_REPLY:
3546 		hci_cc_pin_code_reply(hdev, skb);
3547 		break;
3548 
3549 	case HCI_OP_PIN_CODE_NEG_REPLY:
3550 		hci_cc_pin_code_neg_reply(hdev, skb);
3551 		break;
3552 
3553 	case HCI_OP_READ_LOCAL_OOB_DATA:
3554 		hci_cc_read_local_oob_data(hdev, skb);
3555 		break;
3556 
3557 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3558 		hci_cc_read_local_oob_ext_data(hdev, skb);
3559 		break;
3560 
3561 	case HCI_OP_LE_READ_BUFFER_SIZE:
3562 		hci_cc_le_read_buffer_size(hdev, skb);
3563 		break;
3564 
3565 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3566 		hci_cc_le_read_local_features(hdev, skb);
3567 		break;
3568 
3569 	case HCI_OP_LE_READ_ADV_TX_POWER:
3570 		hci_cc_le_read_adv_tx_power(hdev, skb);
3571 		break;
3572 
3573 	case HCI_OP_USER_CONFIRM_REPLY:
3574 		hci_cc_user_confirm_reply(hdev, skb);
3575 		break;
3576 
3577 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3578 		hci_cc_user_confirm_neg_reply(hdev, skb);
3579 		break;
3580 
3581 	case HCI_OP_USER_PASSKEY_REPLY:
3582 		hci_cc_user_passkey_reply(hdev, skb);
3583 		break;
3584 
3585 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3586 		hci_cc_user_passkey_neg_reply(hdev, skb);
3587 		break;
3588 
3589 	case HCI_OP_LE_SET_RANDOM_ADDR:
3590 		hci_cc_le_set_random_addr(hdev, skb);
3591 		break;
3592 
3593 	case HCI_OP_LE_SET_ADV_ENABLE:
3594 		hci_cc_le_set_adv_enable(hdev, skb);
3595 		break;
3596 
3597 	case HCI_OP_LE_SET_SCAN_PARAM:
3598 		hci_cc_le_set_scan_param(hdev, skb);
3599 		break;
3600 
3601 	case HCI_OP_LE_SET_SCAN_ENABLE:
3602 		hci_cc_le_set_scan_enable(hdev, skb);
3603 		break;
3604 
3605 	case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3606 		hci_cc_le_read_accept_list_size(hdev, skb);
3607 		break;
3608 
3609 	case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3610 		hci_cc_le_clear_accept_list(hdev, skb);
3611 		break;
3612 
3613 	case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3614 		hci_cc_le_add_to_accept_list(hdev, skb);
3615 		break;
3616 
3617 	case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3618 		hci_cc_le_del_from_accept_list(hdev, skb);
3619 		break;
3620 
3621 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3622 		hci_cc_le_read_supported_states(hdev, skb);
3623 		break;
3624 
3625 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3626 		hci_cc_le_read_def_data_len(hdev, skb);
3627 		break;
3628 
3629 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3630 		hci_cc_le_write_def_data_len(hdev, skb);
3631 		break;
3632 
3633 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3634 		hci_cc_le_add_to_resolv_list(hdev, skb);
3635 		break;
3636 
3637 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3638 		hci_cc_le_del_from_resolv_list(hdev, skb);
3639 		break;
3640 
3641 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3642 		hci_cc_le_clear_resolv_list(hdev, skb);
3643 		break;
3644 
3645 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3646 		hci_cc_le_read_resolv_list_size(hdev, skb);
3647 		break;
3648 
3649 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3650 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3651 		break;
3652 
3653 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3654 		hci_cc_le_read_max_data_len(hdev, skb);
3655 		break;
3656 
3657 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3658 		hci_cc_write_le_host_supported(hdev, skb);
3659 		break;
3660 
3661 	case HCI_OP_LE_SET_ADV_PARAM:
3662 		hci_cc_set_adv_param(hdev, skb);
3663 		break;
3664 
3665 	case HCI_OP_READ_RSSI:
3666 		hci_cc_read_rssi(hdev, skb);
3667 		break;
3668 
3669 	case HCI_OP_READ_TX_POWER:
3670 		hci_cc_read_tx_power(hdev, skb);
3671 		break;
3672 
3673 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3674 		hci_cc_write_ssp_debug_mode(hdev, skb);
3675 		break;
3676 
3677 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3678 		hci_cc_le_set_ext_scan_param(hdev, skb);
3679 		break;
3680 
3681 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3682 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3683 		break;
3684 
3685 	case HCI_OP_LE_SET_DEFAULT_PHY:
3686 		hci_cc_le_set_default_phy(hdev, skb);
3687 		break;
3688 
3689 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3690 		hci_cc_le_read_num_adv_sets(hdev, skb);
3691 		break;
3692 
3693 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3694 		hci_cc_set_ext_adv_param(hdev, skb);
3695 		break;
3696 
3697 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3698 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3699 		break;
3700 
3701 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3702 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3703 		break;
3704 
3705 	case HCI_OP_LE_READ_TRANSMIT_POWER:
3706 		hci_cc_le_read_transmit_power(hdev, skb);
3707 		break;
3708 
3709 	default:
3710 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3711 		break;
3712 	}
3713 
3714 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3715 
3716 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3717 			     req_complete_skb);
3718 
3719 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3720 		bt_dev_err(hdev,
3721 			   "unexpected event for opcode 0x%4.4x", *opcode);
3722 		return;
3723 	}
3724 
3725 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3726 		queue_work(hdev->workqueue, &hdev->cmd_work);
3727 }
3728 
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3729 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3730 			       u16 *opcode, u8 *status,
3731 			       hci_req_complete_t *req_complete,
3732 			       hci_req_complete_skb_t *req_complete_skb)
3733 {
3734 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3735 
3736 	skb_pull(skb, sizeof(*ev));
3737 
3738 	*opcode = __le16_to_cpu(ev->opcode);
3739 	*status = ev->status;
3740 
3741 	switch (*opcode) {
3742 	case HCI_OP_INQUIRY:
3743 		hci_cs_inquiry(hdev, ev->status);
3744 		break;
3745 
3746 	case HCI_OP_CREATE_CONN:
3747 		hci_cs_create_conn(hdev, ev->status);
3748 		break;
3749 
3750 	case HCI_OP_DISCONNECT:
3751 		hci_cs_disconnect(hdev, ev->status);
3752 		break;
3753 
3754 	case HCI_OP_ADD_SCO:
3755 		hci_cs_add_sco(hdev, ev->status);
3756 		break;
3757 
3758 	case HCI_OP_AUTH_REQUESTED:
3759 		hci_cs_auth_requested(hdev, ev->status);
3760 		break;
3761 
3762 	case HCI_OP_SET_CONN_ENCRYPT:
3763 		hci_cs_set_conn_encrypt(hdev, ev->status);
3764 		break;
3765 
3766 	case HCI_OP_REMOTE_NAME_REQ:
3767 		hci_cs_remote_name_req(hdev, ev->status);
3768 		break;
3769 
3770 	case HCI_OP_READ_REMOTE_FEATURES:
3771 		hci_cs_read_remote_features(hdev, ev->status);
3772 		break;
3773 
3774 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3775 		hci_cs_read_remote_ext_features(hdev, ev->status);
3776 		break;
3777 
3778 	case HCI_OP_SETUP_SYNC_CONN:
3779 		hci_cs_setup_sync_conn(hdev, ev->status);
3780 		break;
3781 
3782 	case HCI_OP_SNIFF_MODE:
3783 		hci_cs_sniff_mode(hdev, ev->status);
3784 		break;
3785 
3786 	case HCI_OP_EXIT_SNIFF_MODE:
3787 		hci_cs_exit_sniff_mode(hdev, ev->status);
3788 		break;
3789 
3790 	case HCI_OP_SWITCH_ROLE:
3791 		hci_cs_switch_role(hdev, ev->status);
3792 		break;
3793 
3794 	case HCI_OP_LE_CREATE_CONN:
3795 		hci_cs_le_create_conn(hdev, ev->status);
3796 		break;
3797 
3798 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3799 		hci_cs_le_read_remote_features(hdev, ev->status);
3800 		break;
3801 
3802 	case HCI_OP_LE_START_ENC:
3803 		hci_cs_le_start_enc(hdev, ev->status);
3804 		break;
3805 
3806 	case HCI_OP_LE_EXT_CREATE_CONN:
3807 		hci_cs_le_ext_create_conn(hdev, ev->status);
3808 		break;
3809 
3810 	default:
3811 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3812 		break;
3813 	}
3814 
3815 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3816 
3817 	/* Indicate request completion if the command failed. Also, if
3818 	 * we're not waiting for a special event and we get a success
3819 	 * command status we should try to flag the request as completed
3820 	 * (since for this kind of commands there will not be a command
3821 	 * complete event).
3822 	 */
3823 	if (ev->status ||
3824 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3825 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3826 				     req_complete_skb);
3827 
3828 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3829 		bt_dev_err(hdev,
3830 			   "unexpected event for opcode 0x%4.4x", *opcode);
3831 		return;
3832 	}
3833 
3834 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3835 		queue_work(hdev->workqueue, &hdev->cmd_work);
3836 }
3837 
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3838 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3839 {
3840 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3841 
3842 	hdev->hw_error_code = ev->code;
3843 
3844 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3845 }
3846 
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3847 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3848 {
3849 	struct hci_ev_role_change *ev = (void *) skb->data;
3850 	struct hci_conn *conn;
3851 
3852 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3857 	if (conn) {
3858 		if (!ev->status)
3859 			conn->role = ev->role;
3860 
3861 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3862 
3863 		hci_role_switch_cfm(conn, ev->status, ev->role);
3864 	}
3865 
3866 	hci_dev_unlock(hdev);
3867 }
3868 
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3869 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3870 {
3871 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3872 	int i;
3873 
3874 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3875 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3876 		return;
3877 	}
3878 
3879 	if (skb->len < sizeof(*ev) ||
3880 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3881 		BT_DBG("%s bad parameters", hdev->name);
3882 		return;
3883 	}
3884 
3885 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3886 
3887 	for (i = 0; i < ev->num_hndl; i++) {
3888 		struct hci_comp_pkts_info *info = &ev->handles[i];
3889 		struct hci_conn *conn;
3890 		__u16  handle, count;
3891 
3892 		handle = __le16_to_cpu(info->handle);
3893 		count  = __le16_to_cpu(info->count);
3894 
3895 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3896 		if (!conn)
3897 			continue;
3898 
3899 		conn->sent -= count;
3900 
3901 		switch (conn->type) {
3902 		case ACL_LINK:
3903 			hdev->acl_cnt += count;
3904 			if (hdev->acl_cnt > hdev->acl_pkts)
3905 				hdev->acl_cnt = hdev->acl_pkts;
3906 			break;
3907 
3908 		case LE_LINK:
3909 			if (hdev->le_pkts) {
3910 				hdev->le_cnt += count;
3911 				if (hdev->le_cnt > hdev->le_pkts)
3912 					hdev->le_cnt = hdev->le_pkts;
3913 			} else {
3914 				hdev->acl_cnt += count;
3915 				if (hdev->acl_cnt > hdev->acl_pkts)
3916 					hdev->acl_cnt = hdev->acl_pkts;
3917 			}
3918 			break;
3919 
3920 		case SCO_LINK:
3921 			hdev->sco_cnt += count;
3922 			if (hdev->sco_cnt > hdev->sco_pkts)
3923 				hdev->sco_cnt = hdev->sco_pkts;
3924 			break;
3925 
3926 		default:
3927 			bt_dev_err(hdev, "unknown type %d conn %p",
3928 				   conn->type, conn);
3929 			break;
3930 		}
3931 	}
3932 
3933 	queue_work(hdev->workqueue, &hdev->tx_work);
3934 }
3935 
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3936 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3937 						 __u16 handle)
3938 {
3939 	struct hci_chan *chan;
3940 
3941 	switch (hdev->dev_type) {
3942 	case HCI_PRIMARY:
3943 		return hci_conn_hash_lookup_handle(hdev, handle);
3944 	case HCI_AMP:
3945 		chan = hci_chan_lookup_handle(hdev, handle);
3946 		if (chan)
3947 			return chan->conn;
3948 		break;
3949 	default:
3950 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3951 		break;
3952 	}
3953 
3954 	return NULL;
3955 }
3956 
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3957 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3958 {
3959 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3960 	int i;
3961 
3962 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3963 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3964 		return;
3965 	}
3966 
3967 	if (skb->len < sizeof(*ev) ||
3968 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3969 		BT_DBG("%s bad parameters", hdev->name);
3970 		return;
3971 	}
3972 
3973 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3974 	       ev->num_hndl);
3975 
3976 	for (i = 0; i < ev->num_hndl; i++) {
3977 		struct hci_comp_blocks_info *info = &ev->handles[i];
3978 		struct hci_conn *conn = NULL;
3979 		__u16  handle, block_count;
3980 
3981 		handle = __le16_to_cpu(info->handle);
3982 		block_count = __le16_to_cpu(info->blocks);
3983 
3984 		conn = __hci_conn_lookup_handle(hdev, handle);
3985 		if (!conn)
3986 			continue;
3987 
3988 		conn->sent -= block_count;
3989 
3990 		switch (conn->type) {
3991 		case ACL_LINK:
3992 		case AMP_LINK:
3993 			hdev->block_cnt += block_count;
3994 			if (hdev->block_cnt > hdev->num_blocks)
3995 				hdev->block_cnt = hdev->num_blocks;
3996 			break;
3997 
3998 		default:
3999 			bt_dev_err(hdev, "unknown type %d conn %p",
4000 				   conn->type, conn);
4001 			break;
4002 		}
4003 	}
4004 
4005 	queue_work(hdev->workqueue, &hdev->tx_work);
4006 }
4007 
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4008 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010 	struct hci_ev_mode_change *ev = (void *) skb->data;
4011 	struct hci_conn *conn;
4012 
4013 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4014 
4015 	hci_dev_lock(hdev);
4016 
4017 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4018 	if (conn) {
4019 		conn->mode = ev->mode;
4020 
4021 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4022 					&conn->flags)) {
4023 			if (conn->mode == HCI_CM_ACTIVE)
4024 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4025 			else
4026 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4027 		}
4028 
4029 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4030 			hci_sco_setup(conn, ev->status);
4031 	}
4032 
4033 	hci_dev_unlock(hdev);
4034 }
4035 
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4036 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4037 {
4038 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
4039 	struct hci_conn *conn;
4040 
4041 	BT_DBG("%s", hdev->name);
4042 
4043 	hci_dev_lock(hdev);
4044 
4045 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4046 	if (!conn)
4047 		goto unlock;
4048 
4049 	if (conn->state == BT_CONNECTED) {
4050 		hci_conn_hold(conn);
4051 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4052 		hci_conn_drop(conn);
4053 	}
4054 
4055 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4056 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4057 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4058 			     sizeof(ev->bdaddr), &ev->bdaddr);
4059 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4060 		u8 secure;
4061 
4062 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4063 			secure = 1;
4064 		else
4065 			secure = 0;
4066 
4067 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4068 	}
4069 
4070 unlock:
4071 	hci_dev_unlock(hdev);
4072 }
4073 
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4074 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4075 {
4076 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4077 		return;
4078 
4079 	conn->pin_length = pin_len;
4080 	conn->key_type = key_type;
4081 
4082 	switch (key_type) {
4083 	case HCI_LK_LOCAL_UNIT:
4084 	case HCI_LK_REMOTE_UNIT:
4085 	case HCI_LK_DEBUG_COMBINATION:
4086 		return;
4087 	case HCI_LK_COMBINATION:
4088 		if (pin_len == 16)
4089 			conn->pending_sec_level = BT_SECURITY_HIGH;
4090 		else
4091 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4092 		break;
4093 	case HCI_LK_UNAUTH_COMBINATION_P192:
4094 	case HCI_LK_UNAUTH_COMBINATION_P256:
4095 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4096 		break;
4097 	case HCI_LK_AUTH_COMBINATION_P192:
4098 		conn->pending_sec_level = BT_SECURITY_HIGH;
4099 		break;
4100 	case HCI_LK_AUTH_COMBINATION_P256:
4101 		conn->pending_sec_level = BT_SECURITY_FIPS;
4102 		break;
4103 	}
4104 }
4105 
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4106 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4107 {
4108 	struct hci_ev_link_key_req *ev = (void *) skb->data;
4109 	struct hci_cp_link_key_reply cp;
4110 	struct hci_conn *conn;
4111 	struct link_key *key;
4112 
4113 	BT_DBG("%s", hdev->name);
4114 
4115 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4116 		return;
4117 
4118 	hci_dev_lock(hdev);
4119 
4120 	key = hci_find_link_key(hdev, &ev->bdaddr);
4121 	if (!key) {
4122 		BT_DBG("%s link key not found for %pMR", hdev->name,
4123 		       &ev->bdaddr);
4124 		goto not_found;
4125 	}
4126 
4127 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4128 	       &ev->bdaddr);
4129 
4130 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4131 	if (conn) {
4132 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4133 
4134 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4135 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4136 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4137 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
4138 			goto not_found;
4139 		}
4140 
4141 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4142 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4143 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4144 			BT_DBG("%s ignoring key unauthenticated for high security",
4145 			       hdev->name);
4146 			goto not_found;
4147 		}
4148 
4149 		conn_set_key(conn, key->type, key->pin_len);
4150 	}
4151 
4152 	bacpy(&cp.bdaddr, &ev->bdaddr);
4153 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4154 
4155 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4156 
4157 	hci_dev_unlock(hdev);
4158 
4159 	return;
4160 
4161 not_found:
4162 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4163 	hci_dev_unlock(hdev);
4164 }
4165 
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4166 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4167 {
4168 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4169 	struct hci_conn *conn;
4170 	struct link_key *key;
4171 	bool persistent;
4172 	u8 pin_len = 0;
4173 
4174 	BT_DBG("%s", hdev->name);
4175 
4176 	hci_dev_lock(hdev);
4177 
4178 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4179 	if (!conn)
4180 		goto unlock;
4181 
4182 	/* Ignore NULL link key against CVE-2020-26555 */
4183 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4184 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4185 			   &ev->bdaddr);
4186 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4187 		hci_conn_drop(conn);
4188 		goto unlock;
4189 	}
4190 
4191 	hci_conn_hold(conn);
4192 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4193 	hci_conn_drop(conn);
4194 
4195 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4196 	conn_set_key(conn, ev->key_type, conn->pin_length);
4197 
4198 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4199 		goto unlock;
4200 
4201 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4202 			        ev->key_type, pin_len, &persistent);
4203 	if (!key)
4204 		goto unlock;
4205 
4206 	/* Update connection information since adding the key will have
4207 	 * fixed up the type in the case of changed combination keys.
4208 	 */
4209 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4210 		conn_set_key(conn, key->type, key->pin_len);
4211 
4212 	mgmt_new_link_key(hdev, key, persistent);
4213 
4214 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4215 	 * is set. If it's not set simply remove the key from the kernel
4216 	 * list (we've still notified user space about it but with
4217 	 * store_hint being 0).
4218 	 */
4219 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4220 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4221 		list_del_rcu(&key->list);
4222 		kfree_rcu(key, rcu);
4223 		goto unlock;
4224 	}
4225 
4226 	if (persistent)
4227 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4228 	else
4229 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4230 
4231 unlock:
4232 	hci_dev_unlock(hdev);
4233 }
4234 
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4235 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4236 {
4237 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4238 	struct hci_conn *conn;
4239 
4240 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4241 
4242 	hci_dev_lock(hdev);
4243 
4244 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4245 	if (conn && !ev->status) {
4246 		struct inquiry_entry *ie;
4247 
4248 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4249 		if (ie) {
4250 			ie->data.clock_offset = ev->clock_offset;
4251 			ie->timestamp = jiffies;
4252 		}
4253 	}
4254 
4255 	hci_dev_unlock(hdev);
4256 }
4257 
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4258 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4259 {
4260 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4261 	struct hci_conn *conn;
4262 
4263 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4264 
4265 	hci_dev_lock(hdev);
4266 
4267 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4268 	if (conn && !ev->status)
4269 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4270 
4271 	hci_dev_unlock(hdev);
4272 }
4273 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4274 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4275 {
4276 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4277 	struct inquiry_entry *ie;
4278 
4279 	BT_DBG("%s", hdev->name);
4280 
4281 	hci_dev_lock(hdev);
4282 
4283 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4284 	if (ie) {
4285 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4286 		ie->timestamp = jiffies;
4287 	}
4288 
4289 	hci_dev_unlock(hdev);
4290 }
4291 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4292 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4293 					     struct sk_buff *skb)
4294 {
4295 	struct inquiry_data data;
4296 	int num_rsp = *((__u8 *) skb->data);
4297 
4298 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4299 
4300 	if (!num_rsp)
4301 		return;
4302 
4303 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4304 		return;
4305 
4306 	hci_dev_lock(hdev);
4307 
4308 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4309 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4310 		info = (void *) (skb->data + 1);
4311 
4312 		if (skb->len < num_rsp * sizeof(*info) + 1)
4313 			goto unlock;
4314 
4315 		for (; num_rsp; num_rsp--, info++) {
4316 			u32 flags;
4317 
4318 			bacpy(&data.bdaddr, &info->bdaddr);
4319 			data.pscan_rep_mode	= info->pscan_rep_mode;
4320 			data.pscan_period_mode	= info->pscan_period_mode;
4321 			data.pscan_mode		= info->pscan_mode;
4322 			memcpy(data.dev_class, info->dev_class, 3);
4323 			data.clock_offset	= info->clock_offset;
4324 			data.rssi		= info->rssi;
4325 			data.ssp_mode		= 0x00;
4326 
4327 			flags = hci_inquiry_cache_update(hdev, &data, false);
4328 
4329 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4330 					  info->dev_class, info->rssi,
4331 					  flags, NULL, 0, NULL, 0);
4332 		}
4333 	} else {
4334 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4335 
4336 		if (skb->len < num_rsp * sizeof(*info) + 1)
4337 			goto unlock;
4338 
4339 		for (; num_rsp; num_rsp--, info++) {
4340 			u32 flags;
4341 
4342 			bacpy(&data.bdaddr, &info->bdaddr);
4343 			data.pscan_rep_mode	= info->pscan_rep_mode;
4344 			data.pscan_period_mode	= info->pscan_period_mode;
4345 			data.pscan_mode		= 0x00;
4346 			memcpy(data.dev_class, info->dev_class, 3);
4347 			data.clock_offset	= info->clock_offset;
4348 			data.rssi		= info->rssi;
4349 			data.ssp_mode		= 0x00;
4350 
4351 			flags = hci_inquiry_cache_update(hdev, &data, false);
4352 
4353 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4354 					  info->dev_class, info->rssi,
4355 					  flags, NULL, 0, NULL, 0);
4356 		}
4357 	}
4358 
4359 unlock:
4360 	hci_dev_unlock(hdev);
4361 }
4362 
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4363 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4364 					struct sk_buff *skb)
4365 {
4366 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4367 	struct hci_conn *conn;
4368 
4369 	BT_DBG("%s", hdev->name);
4370 
4371 	hci_dev_lock(hdev);
4372 
4373 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4374 	if (!conn)
4375 		goto unlock;
4376 
4377 	if (ev->page < HCI_MAX_PAGES)
4378 		memcpy(conn->features[ev->page], ev->features, 8);
4379 
4380 	if (!ev->status && ev->page == 0x01) {
4381 		struct inquiry_entry *ie;
4382 
4383 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4384 		if (ie)
4385 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4386 
4387 		if (ev->features[0] & LMP_HOST_SSP) {
4388 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4389 		} else {
4390 			/* It is mandatory by the Bluetooth specification that
4391 			 * Extended Inquiry Results are only used when Secure
4392 			 * Simple Pairing is enabled, but some devices violate
4393 			 * this.
4394 			 *
4395 			 * To make these devices work, the internal SSP
4396 			 * enabled flag needs to be cleared if the remote host
4397 			 * features do not indicate SSP support */
4398 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4399 		}
4400 
4401 		if (ev->features[0] & LMP_HOST_SC)
4402 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4403 	}
4404 
4405 	if (conn->state != BT_CONFIG)
4406 		goto unlock;
4407 
4408 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4409 		struct hci_cp_remote_name_req cp;
4410 		memset(&cp, 0, sizeof(cp));
4411 		bacpy(&cp.bdaddr, &conn->dst);
4412 		cp.pscan_rep_mode = 0x02;
4413 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4414 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4415 		mgmt_device_connected(hdev, conn, NULL, 0);
4416 
4417 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4418 		conn->state = BT_CONNECTED;
4419 		hci_connect_cfm(conn, ev->status);
4420 		hci_conn_drop(conn);
4421 	}
4422 
4423 unlock:
4424 	hci_dev_unlock(hdev);
4425 }
4426 
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4427 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4428 				       struct sk_buff *skb)
4429 {
4430 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4431 	struct hci_conn *conn;
4432 
4433 	switch (ev->link_type) {
4434 	case SCO_LINK:
4435 	case ESCO_LINK:
4436 		break;
4437 	default:
4438 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4439 		 * for HCI_Synchronous_Connection_Complete is limited to
4440 		 * either SCO or eSCO
4441 		 */
4442 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4443 		return;
4444 	}
4445 
4446 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4447 
4448 	hci_dev_lock(hdev);
4449 
4450 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4451 	if (!conn) {
4452 		if (ev->link_type == ESCO_LINK)
4453 			goto unlock;
4454 
4455 		/* When the link type in the event indicates SCO connection
4456 		 * and lookup of the connection object fails, then check
4457 		 * if an eSCO connection object exists.
4458 		 *
4459 		 * The core limits the synchronous connections to either
4460 		 * SCO or eSCO. The eSCO connection is preferred and tried
4461 		 * to be setup first and until successfully established,
4462 		 * the link type will be hinted as eSCO.
4463 		 */
4464 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4465 		if (!conn)
4466 			goto unlock;
4467 	}
4468 
4469 	switch (ev->status) {
4470 	case 0x00:
4471 		/* The synchronous connection complete event should only be
4472 		 * sent once per new connection. Receiving a successful
4473 		 * complete event when the connection status is already
4474 		 * BT_CONNECTED means that the device is misbehaving and sent
4475 		 * multiple complete event packets for the same new connection.
4476 		 *
4477 		 * Registering the device more than once can corrupt kernel
4478 		 * memory, hence upon detecting this invalid event, we report
4479 		 * an error and ignore the packet.
4480 		 */
4481 		if (conn->state == BT_CONNECTED) {
4482 			bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4483 			goto unlock;
4484 		}
4485 
4486 		conn->handle = __le16_to_cpu(ev->handle);
4487 		conn->state  = BT_CONNECTED;
4488 		conn->type   = ev->link_type;
4489 
4490 		hci_debugfs_create_conn(conn);
4491 		hci_conn_add_sysfs(conn);
4492 		break;
4493 
4494 	case 0x10:	/* Connection Accept Timeout */
4495 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4496 	case 0x11:	/* Unsupported Feature or Parameter Value */
4497 	case 0x1c:	/* SCO interval rejected */
4498 	case 0x1a:	/* Unsupported Remote Feature */
4499 	case 0x1e:	/* Invalid LMP Parameters */
4500 	case 0x1f:	/* Unspecified error */
4501 	case 0x20:	/* Unsupported LMP Parameter value */
4502 		if (conn->out) {
4503 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4504 					(hdev->esco_type & EDR_ESCO_MASK);
4505 			if (hci_setup_sync(conn, conn->link->handle))
4506 				goto unlock;
4507 		}
4508 		fallthrough;
4509 
4510 	default:
4511 		conn->state = BT_CLOSED;
4512 		break;
4513 	}
4514 
4515 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4516 
4517 	switch (ev->air_mode) {
4518 	case 0x02:
4519 		if (hdev->notify)
4520 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4521 		break;
4522 	case 0x03:
4523 		if (hdev->notify)
4524 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4525 		break;
4526 	}
4527 
4528 	hci_connect_cfm(conn, ev->status);
4529 	if (ev->status)
4530 		hci_conn_del(conn);
4531 
4532 unlock:
4533 	hci_dev_unlock(hdev);
4534 }
4535 
eir_get_length(u8 * eir,size_t eir_len)4536 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4537 {
4538 	size_t parsed = 0;
4539 
4540 	while (parsed < eir_len) {
4541 		u8 field_len = eir[0];
4542 
4543 		if (field_len == 0)
4544 			return parsed;
4545 
4546 		parsed += field_len + 1;
4547 		eir += field_len + 1;
4548 	}
4549 
4550 	return eir_len;
4551 }
4552 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4553 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4554 					    struct sk_buff *skb)
4555 {
4556 	struct inquiry_data data;
4557 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4558 	int num_rsp = *((__u8 *) skb->data);
4559 	size_t eir_len;
4560 
4561 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4562 
4563 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4564 		return;
4565 
4566 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4567 		return;
4568 
4569 	hci_dev_lock(hdev);
4570 
4571 	for (; num_rsp; num_rsp--, info++) {
4572 		u32 flags;
4573 		bool name_known;
4574 
4575 		bacpy(&data.bdaddr, &info->bdaddr);
4576 		data.pscan_rep_mode	= info->pscan_rep_mode;
4577 		data.pscan_period_mode	= info->pscan_period_mode;
4578 		data.pscan_mode		= 0x00;
4579 		memcpy(data.dev_class, info->dev_class, 3);
4580 		data.clock_offset	= info->clock_offset;
4581 		data.rssi		= info->rssi;
4582 		data.ssp_mode		= 0x01;
4583 
4584 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4585 			name_known = eir_get_data(info->data,
4586 						  sizeof(info->data),
4587 						  EIR_NAME_COMPLETE, NULL);
4588 		else
4589 			name_known = true;
4590 
4591 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4592 
4593 		eir_len = eir_get_length(info->data, sizeof(info->data));
4594 
4595 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4596 				  info->dev_class, info->rssi,
4597 				  flags, info->data, eir_len, NULL, 0);
4598 	}
4599 
4600 	hci_dev_unlock(hdev);
4601 }
4602 
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4603 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4604 					 struct sk_buff *skb)
4605 {
4606 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4607 	struct hci_conn *conn;
4608 
4609 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4610 	       __le16_to_cpu(ev->handle));
4611 
4612 	hci_dev_lock(hdev);
4613 
4614 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4615 	if (!conn)
4616 		goto unlock;
4617 
4618 	/* For BR/EDR the necessary steps are taken through the
4619 	 * auth_complete event.
4620 	 */
4621 	if (conn->type != LE_LINK)
4622 		goto unlock;
4623 
4624 	if (!ev->status)
4625 		conn->sec_level = conn->pending_sec_level;
4626 
4627 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4628 
4629 	if (ev->status && conn->state == BT_CONNECTED) {
4630 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4631 		hci_conn_drop(conn);
4632 		goto unlock;
4633 	}
4634 
4635 	if (conn->state == BT_CONFIG) {
4636 		if (!ev->status)
4637 			conn->state = BT_CONNECTED;
4638 
4639 		hci_connect_cfm(conn, ev->status);
4640 		hci_conn_drop(conn);
4641 	} else {
4642 		hci_auth_cfm(conn, ev->status);
4643 
4644 		hci_conn_hold(conn);
4645 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4646 		hci_conn_drop(conn);
4647 	}
4648 
4649 unlock:
4650 	hci_dev_unlock(hdev);
4651 }
4652 
hci_get_auth_req(struct hci_conn * conn)4653 static u8 hci_get_auth_req(struct hci_conn *conn)
4654 {
4655 	/* If remote requests no-bonding follow that lead */
4656 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4657 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4658 		return conn->remote_auth | (conn->auth_type & 0x01);
4659 
4660 	/* If both remote and local have enough IO capabilities, require
4661 	 * MITM protection
4662 	 */
4663 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4664 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4665 		return conn->remote_auth | 0x01;
4666 
4667 	/* No MITM protection possible so ignore remote requirement */
4668 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4669 }
4670 
bredr_oob_data_present(struct hci_conn * conn)4671 static u8 bredr_oob_data_present(struct hci_conn *conn)
4672 {
4673 	struct hci_dev *hdev = conn->hdev;
4674 	struct oob_data *data;
4675 
4676 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4677 	if (!data)
4678 		return 0x00;
4679 
4680 	if (bredr_sc_enabled(hdev)) {
4681 		/* When Secure Connections is enabled, then just
4682 		 * return the present value stored with the OOB
4683 		 * data. The stored value contains the right present
4684 		 * information. However it can only be trusted when
4685 		 * not in Secure Connection Only mode.
4686 		 */
4687 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4688 			return data->present;
4689 
4690 		/* When Secure Connections Only mode is enabled, then
4691 		 * the P-256 values are required. If they are not
4692 		 * available, then do not declare that OOB data is
4693 		 * present.
4694 		 */
4695 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4696 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
4697 			return 0x00;
4698 
4699 		return 0x02;
4700 	}
4701 
4702 	/* When Secure Connections is not enabled or actually
4703 	 * not supported by the hardware, then check that if
4704 	 * P-192 data values are present.
4705 	 */
4706 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4707 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
4708 		return 0x00;
4709 
4710 	return 0x01;
4711 }
4712 
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4713 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4714 {
4715 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4716 	struct hci_conn *conn;
4717 
4718 	BT_DBG("%s", hdev->name);
4719 
4720 	hci_dev_lock(hdev);
4721 
4722 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4723 	if (!conn || !hci_conn_ssp_enabled(conn))
4724 		goto unlock;
4725 
4726 	hci_conn_hold(conn);
4727 
4728 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4729 		goto unlock;
4730 
4731 	/* Allow pairing if we're pairable, the initiators of the
4732 	 * pairing or if the remote is not requesting bonding.
4733 	 */
4734 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4735 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4736 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4737 		struct hci_cp_io_capability_reply cp;
4738 
4739 		bacpy(&cp.bdaddr, &ev->bdaddr);
4740 		/* Change the IO capability from KeyboardDisplay
4741 		 * to DisplayYesNo as it is not supported by BT spec. */
4742 		cp.capability = (conn->io_capability == 0x04) ?
4743 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4744 
4745 		/* If we are initiators, there is no remote information yet */
4746 		if (conn->remote_auth == 0xff) {
4747 			/* Request MITM protection if our IO caps allow it
4748 			 * except for the no-bonding case.
4749 			 */
4750 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4751 			    conn->auth_type != HCI_AT_NO_BONDING)
4752 				conn->auth_type |= 0x01;
4753 		} else {
4754 			conn->auth_type = hci_get_auth_req(conn);
4755 		}
4756 
4757 		/* If we're not bondable, force one of the non-bondable
4758 		 * authentication requirement values.
4759 		 */
4760 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4761 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4762 
4763 		cp.authentication = conn->auth_type;
4764 		cp.oob_data = bredr_oob_data_present(conn);
4765 
4766 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4767 			     sizeof(cp), &cp);
4768 	} else {
4769 		struct hci_cp_io_capability_neg_reply cp;
4770 
4771 		bacpy(&cp.bdaddr, &ev->bdaddr);
4772 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4773 
4774 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4775 			     sizeof(cp), &cp);
4776 	}
4777 
4778 unlock:
4779 	hci_dev_unlock(hdev);
4780 }
4781 
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4782 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4783 {
4784 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4785 	struct hci_conn *conn;
4786 
4787 	BT_DBG("%s", hdev->name);
4788 
4789 	hci_dev_lock(hdev);
4790 
4791 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4792 	if (!conn)
4793 		goto unlock;
4794 
4795 	conn->remote_cap = ev->capability;
4796 	conn->remote_auth = ev->authentication;
4797 
4798 unlock:
4799 	hci_dev_unlock(hdev);
4800 }
4801 
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4802 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4803 					 struct sk_buff *skb)
4804 {
4805 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4806 	int loc_mitm, rem_mitm, confirm_hint = 0;
4807 	struct hci_conn *conn;
4808 
4809 	BT_DBG("%s", hdev->name);
4810 
4811 	hci_dev_lock(hdev);
4812 
4813 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4814 		goto unlock;
4815 
4816 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4817 	if (!conn)
4818 		goto unlock;
4819 
4820 	loc_mitm = (conn->auth_type & 0x01);
4821 	rem_mitm = (conn->remote_auth & 0x01);
4822 
4823 	/* If we require MITM but the remote device can't provide that
4824 	 * (it has NoInputNoOutput) then reject the confirmation
4825 	 * request. We check the security level here since it doesn't
4826 	 * necessarily match conn->auth_type.
4827 	 */
4828 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4829 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4830 		BT_DBG("Rejecting request: remote device can't provide MITM");
4831 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4832 			     sizeof(ev->bdaddr), &ev->bdaddr);
4833 		goto unlock;
4834 	}
4835 
4836 	/* If no side requires MITM protection; auto-accept */
4837 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4838 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4839 
4840 		/* If we're not the initiators request authorization to
4841 		 * proceed from user space (mgmt_user_confirm with
4842 		 * confirm_hint set to 1). The exception is if neither
4843 		 * side had MITM or if the local IO capability is
4844 		 * NoInputNoOutput, in which case we do auto-accept
4845 		 */
4846 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4847 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4848 		    (loc_mitm || rem_mitm)) {
4849 			BT_DBG("Confirming auto-accept as acceptor");
4850 			confirm_hint = 1;
4851 			goto confirm;
4852 		}
4853 
4854 		/* If there already exists link key in local host, leave the
4855 		 * decision to user space since the remote device could be
4856 		 * legitimate or malicious.
4857 		 */
4858 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4859 			bt_dev_dbg(hdev, "Local host already has link key");
4860 			confirm_hint = 1;
4861 			goto confirm;
4862 		}
4863 
4864 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4865 		       hdev->auto_accept_delay);
4866 
4867 		if (hdev->auto_accept_delay > 0) {
4868 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4869 			queue_delayed_work(conn->hdev->workqueue,
4870 					   &conn->auto_accept_work, delay);
4871 			goto unlock;
4872 		}
4873 
4874 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4875 			     sizeof(ev->bdaddr), &ev->bdaddr);
4876 		goto unlock;
4877 	}
4878 
4879 confirm:
4880 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4881 				  le32_to_cpu(ev->passkey), confirm_hint);
4882 
4883 unlock:
4884 	hci_dev_unlock(hdev);
4885 }
4886 
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4887 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4888 					 struct sk_buff *skb)
4889 {
4890 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4891 
4892 	BT_DBG("%s", hdev->name);
4893 
4894 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4895 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4896 }
4897 
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4898 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4899 					struct sk_buff *skb)
4900 {
4901 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4902 	struct hci_conn *conn;
4903 
4904 	BT_DBG("%s", hdev->name);
4905 
4906 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4907 	if (!conn)
4908 		return;
4909 
4910 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4911 	conn->passkey_entered = 0;
4912 
4913 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4914 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4915 					 conn->dst_type, conn->passkey_notify,
4916 					 conn->passkey_entered);
4917 }
4918 
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4919 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4920 {
4921 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4922 	struct hci_conn *conn;
4923 
4924 	BT_DBG("%s", hdev->name);
4925 
4926 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4927 	if (!conn)
4928 		return;
4929 
4930 	switch (ev->type) {
4931 	case HCI_KEYPRESS_STARTED:
4932 		conn->passkey_entered = 0;
4933 		return;
4934 
4935 	case HCI_KEYPRESS_ENTERED:
4936 		conn->passkey_entered++;
4937 		break;
4938 
4939 	case HCI_KEYPRESS_ERASED:
4940 		conn->passkey_entered--;
4941 		break;
4942 
4943 	case HCI_KEYPRESS_CLEARED:
4944 		conn->passkey_entered = 0;
4945 		break;
4946 
4947 	case HCI_KEYPRESS_COMPLETED:
4948 		return;
4949 	}
4950 
4951 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4952 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4953 					 conn->dst_type, conn->passkey_notify,
4954 					 conn->passkey_entered);
4955 }
4956 
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4957 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4958 					 struct sk_buff *skb)
4959 {
4960 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4961 	struct hci_conn *conn;
4962 
4963 	BT_DBG("%s", hdev->name);
4964 
4965 	hci_dev_lock(hdev);
4966 
4967 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4968 	if (!conn || !hci_conn_ssp_enabled(conn))
4969 		goto unlock;
4970 
4971 	/* Reset the authentication requirement to unknown */
4972 	conn->remote_auth = 0xff;
4973 
4974 	/* To avoid duplicate auth_failed events to user space we check
4975 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4976 	 * initiated the authentication. A traditional auth_complete
4977 	 * event gets always produced as initiator and is also mapped to
4978 	 * the mgmt_auth_failed event */
4979 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4980 		mgmt_auth_failed(conn, ev->status);
4981 
4982 	hci_conn_drop(conn);
4983 
4984 unlock:
4985 	hci_dev_unlock(hdev);
4986 }
4987 
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4988 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4989 					 struct sk_buff *skb)
4990 {
4991 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4992 	struct inquiry_entry *ie;
4993 	struct hci_conn *conn;
4994 
4995 	BT_DBG("%s", hdev->name);
4996 
4997 	hci_dev_lock(hdev);
4998 
4999 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5000 	if (conn)
5001 		memcpy(conn->features[1], ev->features, 8);
5002 
5003 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5004 	if (ie)
5005 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5006 
5007 	hci_dev_unlock(hdev);
5008 }
5009 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5010 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5011 					    struct sk_buff *skb)
5012 {
5013 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5014 	struct oob_data *data;
5015 
5016 	BT_DBG("%s", hdev->name);
5017 
5018 	hci_dev_lock(hdev);
5019 
5020 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5021 		goto unlock;
5022 
5023 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5024 	if (!data) {
5025 		struct hci_cp_remote_oob_data_neg_reply cp;
5026 
5027 		bacpy(&cp.bdaddr, &ev->bdaddr);
5028 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5029 			     sizeof(cp), &cp);
5030 		goto unlock;
5031 	}
5032 
5033 	if (bredr_sc_enabled(hdev)) {
5034 		struct hci_cp_remote_oob_ext_data_reply cp;
5035 
5036 		bacpy(&cp.bdaddr, &ev->bdaddr);
5037 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5038 			memset(cp.hash192, 0, sizeof(cp.hash192));
5039 			memset(cp.rand192, 0, sizeof(cp.rand192));
5040 		} else {
5041 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5042 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5043 		}
5044 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5045 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5046 
5047 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5048 			     sizeof(cp), &cp);
5049 	} else {
5050 		struct hci_cp_remote_oob_data_reply cp;
5051 
5052 		bacpy(&cp.bdaddr, &ev->bdaddr);
5053 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5054 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5055 
5056 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5057 			     sizeof(cp), &cp);
5058 	}
5059 
5060 unlock:
5061 	hci_dev_unlock(hdev);
5062 }
5063 
5064 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)5065 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5066 {
5067 	struct hci_ev_channel_selected *ev = (void *)skb->data;
5068 	struct hci_conn *hcon;
5069 
5070 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5071 
5072 	skb_pull(skb, sizeof(*ev));
5073 
5074 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5075 	if (!hcon)
5076 		return;
5077 
5078 	amp_read_loc_assoc_final_data(hdev, hcon);
5079 }
5080 
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5081 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5082 				      struct sk_buff *skb)
5083 {
5084 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5085 	struct hci_conn *hcon, *bredr_hcon;
5086 
5087 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5088 	       ev->status);
5089 
5090 	hci_dev_lock(hdev);
5091 
5092 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5093 	if (!hcon)
5094 		goto unlock;
5095 
5096 	if (!hcon->amp_mgr)
5097 		goto unlock;
5098 
5099 	if (ev->status) {
5100 		hci_conn_del(hcon);
5101 		goto unlock;
5102 	}
5103 
5104 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5105 
5106 	hcon->state = BT_CONNECTED;
5107 	bacpy(&hcon->dst, &bredr_hcon->dst);
5108 
5109 	hci_conn_hold(hcon);
5110 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5111 	hci_conn_drop(hcon);
5112 
5113 	hci_debugfs_create_conn(hcon);
5114 	hci_conn_add_sysfs(hcon);
5115 
5116 	amp_physical_cfm(bredr_hcon, hcon);
5117 
5118 unlock:
5119 	hci_dev_unlock(hdev);
5120 }
5121 
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5122 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5123 {
5124 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5125 	struct hci_conn *hcon;
5126 	struct hci_chan *hchan;
5127 	struct amp_mgr *mgr;
5128 
5129 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5130 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5131 	       ev->status);
5132 
5133 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5134 	if (!hcon)
5135 		return;
5136 
5137 	/* Create AMP hchan */
5138 	hchan = hci_chan_create(hcon);
5139 	if (!hchan)
5140 		return;
5141 
5142 	hchan->handle = le16_to_cpu(ev->handle);
5143 	hchan->amp = true;
5144 
5145 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5146 
5147 	mgr = hcon->amp_mgr;
5148 	if (mgr && mgr->bredr_chan) {
5149 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5150 
5151 		l2cap_chan_lock(bredr_chan);
5152 
5153 		bredr_chan->conn->mtu = hdev->block_mtu;
5154 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5155 		hci_conn_hold(hcon);
5156 
5157 		l2cap_chan_unlock(bredr_chan);
5158 	}
5159 }
5160 
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5161 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5162 					     struct sk_buff *skb)
5163 {
5164 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5165 	struct hci_chan *hchan;
5166 
5167 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5168 	       le16_to_cpu(ev->handle), ev->status);
5169 
5170 	if (ev->status)
5171 		return;
5172 
5173 	hci_dev_lock(hdev);
5174 
5175 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5176 	if (!hchan || !hchan->amp)
5177 		goto unlock;
5178 
5179 	amp_destroy_logical_link(hchan, ev->reason);
5180 
5181 unlock:
5182 	hci_dev_unlock(hdev);
5183 }
5184 
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5185 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5186 					     struct sk_buff *skb)
5187 {
5188 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5189 	struct hci_conn *hcon;
5190 
5191 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5192 
5193 	if (ev->status)
5194 		return;
5195 
5196 	hci_dev_lock(hdev);
5197 
5198 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5199 	if (hcon && hcon->type == AMP_LINK) {
5200 		hcon->state = BT_CLOSED;
5201 		hci_disconn_cfm(hcon, ev->reason);
5202 		hci_conn_del(hcon);
5203 	}
5204 
5205 	hci_dev_unlock(hdev);
5206 }
5207 #endif
5208 
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5209 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5210 				u8 bdaddr_type, bdaddr_t *local_rpa)
5211 {
5212 	if (conn->out) {
5213 		conn->dst_type = bdaddr_type;
5214 		conn->resp_addr_type = bdaddr_type;
5215 		bacpy(&conn->resp_addr, bdaddr);
5216 
5217 		/* Check if the controller has set a Local RPA then it must be
5218 		 * used instead or hdev->rpa.
5219 		 */
5220 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5221 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5222 			bacpy(&conn->init_addr, local_rpa);
5223 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5224 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5225 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5226 		} else {
5227 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5228 						  &conn->init_addr_type);
5229 		}
5230 	} else {
5231 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5232 		/* Check if the controller has set a Local RPA then it must be
5233 		 * used instead or hdev->rpa.
5234 		 */
5235 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5236 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5237 			bacpy(&conn->resp_addr, local_rpa);
5238 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5239 			/* In case of ext adv, resp_addr will be updated in
5240 			 * Adv Terminated event.
5241 			 */
5242 			if (!ext_adv_capable(conn->hdev))
5243 				bacpy(&conn->resp_addr,
5244 				      &conn->hdev->random_addr);
5245 		} else {
5246 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5247 		}
5248 
5249 		conn->init_addr_type = bdaddr_type;
5250 		bacpy(&conn->init_addr, bdaddr);
5251 
5252 		/* For incoming connections, set the default minimum
5253 		 * and maximum connection interval. They will be used
5254 		 * to check if the parameters are in range and if not
5255 		 * trigger the connection update procedure.
5256 		 */
5257 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5258 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5259 	}
5260 }
5261 
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5262 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5263 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5264 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5265 				 u16 interval, u16 latency,
5266 				 u16 supervision_timeout)
5267 {
5268 	struct hci_conn_params *params;
5269 	struct hci_conn *conn;
5270 	struct smp_irk *irk;
5271 	u8 addr_type;
5272 
5273 	hci_dev_lock(hdev);
5274 
5275 	/* All controllers implicitly stop advertising in the event of a
5276 	 * connection, so ensure that the state bit is cleared.
5277 	 */
5278 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5279 
5280 	conn = hci_lookup_le_connect(hdev);
5281 	if (!conn) {
5282 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5283 		if (!conn) {
5284 			bt_dev_err(hdev, "no memory for new connection");
5285 			goto unlock;
5286 		}
5287 
5288 		conn->dst_type = bdaddr_type;
5289 
5290 		/* If we didn't have a hci_conn object previously
5291 		 * but we're in central role this must be something
5292 		 * initiated using an accept list. Since accept list based
5293 		 * connections are not "first class citizens" we don't
5294 		 * have full tracking of them. Therefore, we go ahead
5295 		 * with a "best effort" approach of determining the
5296 		 * initiator address based on the HCI_PRIVACY flag.
5297 		 */
5298 		if (conn->out) {
5299 			conn->resp_addr_type = bdaddr_type;
5300 			bacpy(&conn->resp_addr, bdaddr);
5301 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5302 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5303 				bacpy(&conn->init_addr, &hdev->rpa);
5304 			} else {
5305 				hci_copy_identity_address(hdev,
5306 							  &conn->init_addr,
5307 							  &conn->init_addr_type);
5308 			}
5309 		}
5310 	} else {
5311 		cancel_delayed_work(&conn->le_conn_timeout);
5312 	}
5313 
5314 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5315 
5316 	/* Lookup the identity address from the stored connection
5317 	 * address and address type.
5318 	 *
5319 	 * When establishing connections to an identity address, the
5320 	 * connection procedure will store the resolvable random
5321 	 * address first. Now if it can be converted back into the
5322 	 * identity address, start using the identity address from
5323 	 * now on.
5324 	 */
5325 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5326 	if (irk) {
5327 		bacpy(&conn->dst, &irk->bdaddr);
5328 		conn->dst_type = irk->addr_type;
5329 	}
5330 
5331 	/* When using controller based address resolution, then the new
5332 	 * address types 0x02 and 0x03 are used. These types need to be
5333 	 * converted back into either public address or random address type
5334 	 */
5335 	if (use_ll_privacy(hdev) &&
5336 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5337 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5338 		switch (conn->dst_type) {
5339 		case ADDR_LE_DEV_PUBLIC_RESOLVED:
5340 			conn->dst_type = ADDR_LE_DEV_PUBLIC;
5341 			break;
5342 		case ADDR_LE_DEV_RANDOM_RESOLVED:
5343 			conn->dst_type = ADDR_LE_DEV_RANDOM;
5344 			break;
5345 		}
5346 	}
5347 
5348 	if (status) {
5349 		hci_le_conn_failed(conn, status);
5350 		goto unlock;
5351 	}
5352 
5353 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5354 		addr_type = BDADDR_LE_PUBLIC;
5355 	else
5356 		addr_type = BDADDR_LE_RANDOM;
5357 
5358 	/* Drop the connection if the device is blocked */
5359 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5360 		hci_conn_drop(conn);
5361 		goto unlock;
5362 	}
5363 
5364 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5365 		mgmt_device_connected(hdev, conn, NULL, 0);
5366 
5367 	conn->sec_level = BT_SECURITY_LOW;
5368 	conn->handle = handle;
5369 	conn->state = BT_CONFIG;
5370 
5371 	/* Store current advertising instance as connection advertising instance
5372 	 * when sotfware rotation is in use so it can be re-enabled when
5373 	 * disconnected.
5374 	 */
5375 	if (!ext_adv_capable(hdev))
5376 		conn->adv_instance = hdev->cur_adv_instance;
5377 
5378 	conn->le_conn_interval = interval;
5379 	conn->le_conn_latency = latency;
5380 	conn->le_supv_timeout = supervision_timeout;
5381 
5382 	hci_debugfs_create_conn(conn);
5383 	hci_conn_add_sysfs(conn);
5384 
5385 	/* The remote features procedure is defined for central
5386 	 * role only. So only in case of an initiated connection
5387 	 * request the remote features.
5388 	 *
5389 	 * If the local controller supports peripheral-initiated features
5390 	 * exchange, then requesting the remote features in peripheral
5391 	 * role is possible. Otherwise just transition into the
5392 	 * connected state without requesting the remote features.
5393 	 */
5394 	if (conn->out ||
5395 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5396 		struct hci_cp_le_read_remote_features cp;
5397 
5398 		cp.handle = __cpu_to_le16(conn->handle);
5399 
5400 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5401 			     sizeof(cp), &cp);
5402 
5403 		hci_conn_hold(conn);
5404 	} else {
5405 		conn->state = BT_CONNECTED;
5406 		hci_connect_cfm(conn, status);
5407 	}
5408 
5409 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5410 					   conn->dst_type);
5411 	if (params) {
5412 		list_del_init(&params->action);
5413 		if (params->conn) {
5414 			hci_conn_drop(params->conn);
5415 			hci_conn_put(params->conn);
5416 			params->conn = NULL;
5417 		}
5418 	}
5419 
5420 unlock:
5421 	hci_update_background_scan(hdev);
5422 	hci_dev_unlock(hdev);
5423 }
5424 
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5425 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5426 {
5427 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5428 
5429 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5430 
5431 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5432 			     NULL, ev->role, le16_to_cpu(ev->handle),
5433 			     le16_to_cpu(ev->interval),
5434 			     le16_to_cpu(ev->latency),
5435 			     le16_to_cpu(ev->supervision_timeout));
5436 }
5437 
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5438 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5439 					 struct sk_buff *skb)
5440 {
5441 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5442 
5443 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5444 
5445 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5446 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5447 			     le16_to_cpu(ev->interval),
5448 			     le16_to_cpu(ev->latency),
5449 			     le16_to_cpu(ev->supervision_timeout));
5450 
5451 	if (use_ll_privacy(hdev) &&
5452 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5453 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5454 		hci_req_disable_address_resolution(hdev);
5455 }
5456 
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5457 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5458 {
5459 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5460 	struct hci_conn *conn;
5461 	struct adv_info *adv;
5462 
5463 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5464 
5465 	adv = hci_find_adv_instance(hdev, ev->handle);
5466 
5467 	if (ev->status) {
5468 		if (!adv)
5469 			return;
5470 
5471 		/* Remove advertising as it has been terminated */
5472 		hci_remove_adv_instance(hdev, ev->handle);
5473 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5474 
5475 		return;
5476 	}
5477 
5478 	if (adv)
5479 		adv->enabled = false;
5480 
5481 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5482 	if (conn) {
5483 		/* Store handle in the connection so the correct advertising
5484 		 * instance can be re-enabled when disconnected.
5485 		 */
5486 		conn->adv_instance = ev->handle;
5487 
5488 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5489 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5490 			return;
5491 
5492 		if (!ev->handle) {
5493 			bacpy(&conn->resp_addr, &hdev->random_addr);
5494 			return;
5495 		}
5496 
5497 		if (adv)
5498 			bacpy(&conn->resp_addr, &adv->random_addr);
5499 	}
5500 }
5501 
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5502 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5503 					    struct sk_buff *skb)
5504 {
5505 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5506 	struct hci_conn *conn;
5507 
5508 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5509 
5510 	if (ev->status)
5511 		return;
5512 
5513 	hci_dev_lock(hdev);
5514 
5515 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5516 	if (conn) {
5517 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5518 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5519 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5520 	}
5521 
5522 	hci_dev_unlock(hdev);
5523 }
5524 
5525 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5526 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5527 					      bdaddr_t *addr,
5528 					      u8 addr_type, u8 adv_type,
5529 					      bdaddr_t *direct_rpa)
5530 {
5531 	struct hci_conn *conn;
5532 	struct hci_conn_params *params;
5533 
5534 	/* If the event is not connectable don't proceed further */
5535 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5536 		return NULL;
5537 
5538 	/* Ignore if the device is blocked */
5539 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5540 		return NULL;
5541 
5542 	/* Most controller will fail if we try to create new connections
5543 	 * while we have an existing one in peripheral role.
5544 	 */
5545 	if (hdev->conn_hash.le_num_peripheral > 0 &&
5546 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5547 	     !(hdev->le_states[3] & 0x10)))
5548 		return NULL;
5549 
5550 	/* If we're not connectable only connect devices that we have in
5551 	 * our pend_le_conns list.
5552 	 */
5553 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5554 					   addr_type);
5555 	if (!params)
5556 		return NULL;
5557 
5558 	if (!params->explicit_connect) {
5559 		switch (params->auto_connect) {
5560 		case HCI_AUTO_CONN_DIRECT:
5561 			/* Only devices advertising with ADV_DIRECT_IND are
5562 			 * triggering a connection attempt. This is allowing
5563 			 * incoming connections from peripheral devices.
5564 			 */
5565 			if (adv_type != LE_ADV_DIRECT_IND)
5566 				return NULL;
5567 			break;
5568 		case HCI_AUTO_CONN_ALWAYS:
5569 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5570 			 * are triggering a connection attempt. This means
5571 			 * that incoming connections from peripheral device are
5572 			 * accepted and also outgoing connections to peripheral
5573 			 * devices are established when found.
5574 			 */
5575 			break;
5576 		default:
5577 			return NULL;
5578 		}
5579 	}
5580 
5581 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5582 			      hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5583 			      direct_rpa);
5584 	if (!IS_ERR(conn)) {
5585 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5586 		 * by higher layer that tried to connect, if no then
5587 		 * store the pointer since we don't really have any
5588 		 * other owner of the object besides the params that
5589 		 * triggered it. This way we can abort the connection if
5590 		 * the parameters get removed and keep the reference
5591 		 * count consistent once the connection is established.
5592 		 */
5593 
5594 		if (!params->explicit_connect)
5595 			params->conn = hci_conn_get(conn);
5596 
5597 		return conn;
5598 	}
5599 
5600 	switch (PTR_ERR(conn)) {
5601 	case -EBUSY:
5602 		/* If hci_connect() returns -EBUSY it means there is already
5603 		 * an LE connection attempt going on. Since controllers don't
5604 		 * support more than one connection attempt at the time, we
5605 		 * don't consider this an error case.
5606 		 */
5607 		break;
5608 	default:
5609 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5610 		return NULL;
5611 	}
5612 
5613 	return NULL;
5614 }
5615 
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5616 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5617 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5618 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5619 			       bool ext_adv)
5620 {
5621 	struct discovery_state *d = &hdev->discovery;
5622 	struct smp_irk *irk;
5623 	struct hci_conn *conn;
5624 	bool match;
5625 	u32 flags;
5626 	u8 *ptr;
5627 
5628 	switch (type) {
5629 	case LE_ADV_IND:
5630 	case LE_ADV_DIRECT_IND:
5631 	case LE_ADV_SCAN_IND:
5632 	case LE_ADV_NONCONN_IND:
5633 	case LE_ADV_SCAN_RSP:
5634 		break;
5635 	default:
5636 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5637 				       "type: 0x%02x", type);
5638 		return;
5639 	}
5640 
5641 	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5642 		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5643 		return;
5644 	}
5645 
5646 	/* Find the end of the data in case the report contains padded zero
5647 	 * bytes at the end causing an invalid length value.
5648 	 *
5649 	 * When data is NULL, len is 0 so there is no need for extra ptr
5650 	 * check as 'ptr < data + 0' is already false in such case.
5651 	 */
5652 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5653 		if (ptr + 1 + *ptr > data + len)
5654 			break;
5655 	}
5656 
5657 	/* Adjust for actual length. This handles the case when remote
5658 	 * device is advertising with incorrect data length.
5659 	 */
5660 	len = ptr - data;
5661 
5662 	/* If the direct address is present, then this report is from
5663 	 * a LE Direct Advertising Report event. In that case it is
5664 	 * important to see if the address is matching the local
5665 	 * controller address.
5666 	 */
5667 	if (direct_addr) {
5668 		/* Only resolvable random addresses are valid for these
5669 		 * kind of reports and others can be ignored.
5670 		 */
5671 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5672 			return;
5673 
5674 		/* If the controller is not using resolvable random
5675 		 * addresses, then this report can be ignored.
5676 		 */
5677 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5678 			return;
5679 
5680 		/* If the local IRK of the controller does not match
5681 		 * with the resolvable random address provided, then
5682 		 * this report can be ignored.
5683 		 */
5684 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5685 			return;
5686 	}
5687 
5688 	/* Check if we need to convert to identity address */
5689 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5690 	if (irk) {
5691 		bdaddr = &irk->bdaddr;
5692 		bdaddr_type = irk->addr_type;
5693 	}
5694 
5695 	/* Check if we have been requested to connect to this device.
5696 	 *
5697 	 * direct_addr is set only for directed advertising reports (it is NULL
5698 	 * for advertising reports) and is already verified to be RPA above.
5699 	 */
5700 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5701 								direct_addr);
5702 	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5703 		/* Store report for later inclusion by
5704 		 * mgmt_device_connected
5705 		 */
5706 		memcpy(conn->le_adv_data, data, len);
5707 		conn->le_adv_data_len = len;
5708 	}
5709 
5710 	/* Passive scanning shouldn't trigger any device found events,
5711 	 * except for devices marked as CONN_REPORT for which we do send
5712 	 * device found events, or advertisement monitoring requested.
5713 	 */
5714 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5715 		if (type == LE_ADV_DIRECT_IND)
5716 			return;
5717 
5718 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5719 					       bdaddr, bdaddr_type) &&
5720 		    idr_is_empty(&hdev->adv_monitors_idr))
5721 			return;
5722 
5723 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5724 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5725 		else
5726 			flags = 0;
5727 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5728 				  rssi, flags, data, len, NULL, 0);
5729 		return;
5730 	}
5731 
5732 	/* When receiving non-connectable or scannable undirected
5733 	 * advertising reports, this means that the remote device is
5734 	 * not connectable and then clearly indicate this in the
5735 	 * device found event.
5736 	 *
5737 	 * When receiving a scan response, then there is no way to
5738 	 * know if the remote device is connectable or not. However
5739 	 * since scan responses are merged with a previously seen
5740 	 * advertising report, the flags field from that report
5741 	 * will be used.
5742 	 *
5743 	 * In the really unlikely case that a controller get confused
5744 	 * and just sends a scan response event, then it is marked as
5745 	 * not connectable as well.
5746 	 */
5747 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5748 	    type == LE_ADV_SCAN_RSP)
5749 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5750 	else
5751 		flags = 0;
5752 
5753 	/* If there's nothing pending either store the data from this
5754 	 * event or send an immediate device found event if the data
5755 	 * should not be stored for later.
5756 	 */
5757 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
5758 		/* If the report will trigger a SCAN_REQ store it for
5759 		 * later merging.
5760 		 */
5761 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5762 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5763 						 rssi, flags, data, len);
5764 			return;
5765 		}
5766 
5767 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5768 				  rssi, flags, data, len, NULL, 0);
5769 		return;
5770 	}
5771 
5772 	/* Check if the pending report is for the same device as the new one */
5773 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5774 		 bdaddr_type == d->last_adv_addr_type);
5775 
5776 	/* If the pending data doesn't match this report or this isn't a
5777 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5778 	 * sending of the pending data.
5779 	 */
5780 	if (type != LE_ADV_SCAN_RSP || !match) {
5781 		/* Send out whatever is in the cache, but skip duplicates */
5782 		if (!match)
5783 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5784 					  d->last_adv_addr_type, NULL,
5785 					  d->last_adv_rssi, d->last_adv_flags,
5786 					  d->last_adv_data,
5787 					  d->last_adv_data_len, NULL, 0);
5788 
5789 		/* If the new report will trigger a SCAN_REQ store it for
5790 		 * later merging.
5791 		 */
5792 		if (!ext_adv && (type == LE_ADV_IND ||
5793 				 type == LE_ADV_SCAN_IND)) {
5794 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5795 						 rssi, flags, data, len);
5796 			return;
5797 		}
5798 
5799 		/* The advertising reports cannot be merged, so clear
5800 		 * the pending report and send out a device found event.
5801 		 */
5802 		clear_pending_adv_report(hdev);
5803 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5804 				  rssi, flags, data, len, NULL, 0);
5805 		return;
5806 	}
5807 
5808 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5809 	 * the new event is a SCAN_RSP. We can therefore proceed with
5810 	 * sending a merged device found event.
5811 	 */
5812 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5813 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5814 			  d->last_adv_data, d->last_adv_data_len, data, len);
5815 	clear_pending_adv_report(hdev);
5816 }
5817 
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5818 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5819 {
5820 	u8 num_reports = skb->data[0];
5821 	void *ptr = &skb->data[1];
5822 
5823 	hci_dev_lock(hdev);
5824 
5825 	while (num_reports--) {
5826 		struct hci_ev_le_advertising_info *ev = ptr;
5827 		s8 rssi;
5828 
5829 		if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5830 			bt_dev_err(hdev, "Malicious advertising data.");
5831 			break;
5832 		}
5833 
5834 		if (ev->length <= HCI_MAX_AD_LENGTH &&
5835 		    ev->data + ev->length <= skb_tail_pointer(skb)) {
5836 			rssi = ev->data[ev->length];
5837 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5838 					   ev->bdaddr_type, NULL, 0, rssi,
5839 					   ev->data, ev->length, false);
5840 		} else {
5841 			bt_dev_err(hdev, "Dropping invalid advertising data");
5842 		}
5843 
5844 		ptr += sizeof(*ev) + ev->length + 1;
5845 	}
5846 
5847 	hci_dev_unlock(hdev);
5848 }
5849 
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)5850 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5851 {
5852 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5853 		switch (evt_type) {
5854 		case LE_LEGACY_ADV_IND:
5855 			return LE_ADV_IND;
5856 		case LE_LEGACY_ADV_DIRECT_IND:
5857 			return LE_ADV_DIRECT_IND;
5858 		case LE_LEGACY_ADV_SCAN_IND:
5859 			return LE_ADV_SCAN_IND;
5860 		case LE_LEGACY_NONCONN_IND:
5861 			return LE_ADV_NONCONN_IND;
5862 		case LE_LEGACY_SCAN_RSP_ADV:
5863 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5864 			return LE_ADV_SCAN_RSP;
5865 		}
5866 
5867 		goto invalid;
5868 	}
5869 
5870 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5871 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5872 			return LE_ADV_DIRECT_IND;
5873 
5874 		return LE_ADV_IND;
5875 	}
5876 
5877 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5878 		return LE_ADV_SCAN_RSP;
5879 
5880 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5881 		return LE_ADV_SCAN_IND;
5882 
5883 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5884 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5885 		return LE_ADV_NONCONN_IND;
5886 
5887 invalid:
5888 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5889 			       evt_type);
5890 
5891 	return LE_ADV_INVALID;
5892 }
5893 
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5894 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5895 {
5896 	u8 num_reports = skb->data[0];
5897 	void *ptr = &skb->data[1];
5898 
5899 	hci_dev_lock(hdev);
5900 
5901 	while (num_reports--) {
5902 		struct hci_ev_le_ext_adv_report *ev = ptr;
5903 		u8 legacy_evt_type;
5904 		u16 evt_type;
5905 
5906 		evt_type = __le16_to_cpu(ev->evt_type);
5907 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5908 		if (legacy_evt_type != LE_ADV_INVALID) {
5909 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5910 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5911 					   ev->data, ev->length,
5912 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5913 		}
5914 
5915 		ptr += sizeof(*ev) + ev->length;
5916 	}
5917 
5918 	hci_dev_unlock(hdev);
5919 }
5920 
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5921 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5922 					    struct sk_buff *skb)
5923 {
5924 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5925 	struct hci_conn *conn;
5926 
5927 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5928 
5929 	hci_dev_lock(hdev);
5930 
5931 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5932 	if (conn) {
5933 		if (!ev->status)
5934 			memcpy(conn->features[0], ev->features, 8);
5935 
5936 		if (conn->state == BT_CONFIG) {
5937 			__u8 status;
5938 
5939 			/* If the local controller supports peripheral-initiated
5940 			 * features exchange, but the remote controller does
5941 			 * not, then it is possible that the error code 0x1a
5942 			 * for unsupported remote feature gets returned.
5943 			 *
5944 			 * In this specific case, allow the connection to
5945 			 * transition into connected state and mark it as
5946 			 * successful.
5947 			 */
5948 			if (!conn->out && ev->status == 0x1a &&
5949 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5950 				status = 0x00;
5951 			else
5952 				status = ev->status;
5953 
5954 			conn->state = BT_CONNECTED;
5955 			hci_connect_cfm(conn, status);
5956 			hci_conn_drop(conn);
5957 		}
5958 	}
5959 
5960 	hci_dev_unlock(hdev);
5961 }
5962 
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5963 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5964 {
5965 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5966 	struct hci_cp_le_ltk_reply cp;
5967 	struct hci_cp_le_ltk_neg_reply neg;
5968 	struct hci_conn *conn;
5969 	struct smp_ltk *ltk;
5970 
5971 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5972 
5973 	hci_dev_lock(hdev);
5974 
5975 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5976 	if (conn == NULL)
5977 		goto not_found;
5978 
5979 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5980 	if (!ltk)
5981 		goto not_found;
5982 
5983 	if (smp_ltk_is_sc(ltk)) {
5984 		/* With SC both EDiv and Rand are set to zero */
5985 		if (ev->ediv || ev->rand)
5986 			goto not_found;
5987 	} else {
5988 		/* For non-SC keys check that EDiv and Rand match */
5989 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5990 			goto not_found;
5991 	}
5992 
5993 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5994 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5995 	cp.handle = cpu_to_le16(conn->handle);
5996 
5997 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5998 
5999 	conn->enc_key_size = ltk->enc_size;
6000 
6001 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6002 
6003 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6004 	 * temporary key used to encrypt a connection following
6005 	 * pairing. It is used during the Encrypted Session Setup to
6006 	 * distribute the keys. Later, security can be re-established
6007 	 * using a distributed LTK.
6008 	 */
6009 	if (ltk->type == SMP_STK) {
6010 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6011 		list_del_rcu(&ltk->list);
6012 		kfree_rcu(ltk, rcu);
6013 	} else {
6014 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6015 	}
6016 
6017 	hci_dev_unlock(hdev);
6018 
6019 	return;
6020 
6021 not_found:
6022 	neg.handle = ev->handle;
6023 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6024 	hci_dev_unlock(hdev);
6025 }
6026 
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6027 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6028 				      u8 reason)
6029 {
6030 	struct hci_cp_le_conn_param_req_neg_reply cp;
6031 
6032 	cp.handle = cpu_to_le16(handle);
6033 	cp.reason = reason;
6034 
6035 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6036 		     &cp);
6037 }
6038 
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)6039 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6040 					     struct sk_buff *skb)
6041 {
6042 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6043 	struct hci_cp_le_conn_param_req_reply cp;
6044 	struct hci_conn *hcon;
6045 	u16 handle, min, max, latency, timeout;
6046 
6047 	handle = le16_to_cpu(ev->handle);
6048 	min = le16_to_cpu(ev->interval_min);
6049 	max = le16_to_cpu(ev->interval_max);
6050 	latency = le16_to_cpu(ev->latency);
6051 	timeout = le16_to_cpu(ev->timeout);
6052 
6053 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6054 	if (!hcon || hcon->state != BT_CONNECTED)
6055 		return send_conn_param_neg_reply(hdev, handle,
6056 						 HCI_ERROR_UNKNOWN_CONN_ID);
6057 
6058 	if (hci_check_conn_params(min, max, latency, timeout))
6059 		return send_conn_param_neg_reply(hdev, handle,
6060 						 HCI_ERROR_INVALID_LL_PARAMS);
6061 
6062 	if (hcon->role == HCI_ROLE_MASTER) {
6063 		struct hci_conn_params *params;
6064 		u8 store_hint;
6065 
6066 		hci_dev_lock(hdev);
6067 
6068 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6069 						hcon->dst_type);
6070 		if (params) {
6071 			params->conn_min_interval = min;
6072 			params->conn_max_interval = max;
6073 			params->conn_latency = latency;
6074 			params->supervision_timeout = timeout;
6075 			store_hint = 0x01;
6076 		} else {
6077 			store_hint = 0x00;
6078 		}
6079 
6080 		hci_dev_unlock(hdev);
6081 
6082 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6083 				    store_hint, min, max, latency, timeout);
6084 	}
6085 
6086 	cp.handle = ev->handle;
6087 	cp.interval_min = ev->interval_min;
6088 	cp.interval_max = ev->interval_max;
6089 	cp.latency = ev->latency;
6090 	cp.timeout = ev->timeout;
6091 	cp.min_ce_len = 0;
6092 	cp.max_ce_len = 0;
6093 
6094 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6095 }
6096 
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)6097 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6098 					 struct sk_buff *skb)
6099 {
6100 	u8 num_reports = skb->data[0];
6101 	struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6102 
6103 	if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6104 		return;
6105 
6106 	hci_dev_lock(hdev);
6107 
6108 	for (; num_reports; num_reports--, ev++)
6109 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6110 				   ev->bdaddr_type, &ev->direct_addr,
6111 				   ev->direct_addr_type, ev->rssi, NULL, 0,
6112 				   false);
6113 
6114 	hci_dev_unlock(hdev);
6115 }
6116 
hci_le_phy_update_evt(struct hci_dev * hdev,struct sk_buff * skb)6117 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6118 {
6119 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6120 	struct hci_conn *conn;
6121 
6122 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6123 
6124 	if (ev->status)
6125 		return;
6126 
6127 	hci_dev_lock(hdev);
6128 
6129 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6130 	if (!conn)
6131 		goto unlock;
6132 
6133 	conn->le_tx_phy = ev->tx_phy;
6134 	conn->le_rx_phy = ev->rx_phy;
6135 
6136 unlock:
6137 	hci_dev_unlock(hdev);
6138 }
6139 
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)6140 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6141 {
6142 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
6143 
6144 	skb_pull(skb, sizeof(*le_ev));
6145 
6146 	switch (le_ev->subevent) {
6147 	case HCI_EV_LE_CONN_COMPLETE:
6148 		hci_le_conn_complete_evt(hdev, skb);
6149 		break;
6150 
6151 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6152 		hci_le_conn_update_complete_evt(hdev, skb);
6153 		break;
6154 
6155 	case HCI_EV_LE_ADVERTISING_REPORT:
6156 		hci_le_adv_report_evt(hdev, skb);
6157 		break;
6158 
6159 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6160 		hci_le_remote_feat_complete_evt(hdev, skb);
6161 		break;
6162 
6163 	case HCI_EV_LE_LTK_REQ:
6164 		hci_le_ltk_request_evt(hdev, skb);
6165 		break;
6166 
6167 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6168 		hci_le_remote_conn_param_req_evt(hdev, skb);
6169 		break;
6170 
6171 	case HCI_EV_LE_DIRECT_ADV_REPORT:
6172 		hci_le_direct_adv_report_evt(hdev, skb);
6173 		break;
6174 
6175 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6176 		hci_le_phy_update_evt(hdev, skb);
6177 		break;
6178 
6179 	case HCI_EV_LE_EXT_ADV_REPORT:
6180 		hci_le_ext_adv_report_evt(hdev, skb);
6181 		break;
6182 
6183 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6184 		hci_le_enh_conn_complete_evt(hdev, skb);
6185 		break;
6186 
6187 	case HCI_EV_LE_EXT_ADV_SET_TERM:
6188 		hci_le_ext_adv_term_evt(hdev, skb);
6189 		break;
6190 
6191 	default:
6192 		break;
6193 	}
6194 }
6195 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)6196 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6197 				 u8 event, struct sk_buff *skb)
6198 {
6199 	struct hci_ev_cmd_complete *ev;
6200 	struct hci_event_hdr *hdr;
6201 
6202 	if (!skb)
6203 		return false;
6204 
6205 	if (skb->len < sizeof(*hdr)) {
6206 		bt_dev_err(hdev, "too short HCI event");
6207 		return false;
6208 	}
6209 
6210 	hdr = (void *) skb->data;
6211 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6212 
6213 	if (event) {
6214 		if (hdr->evt != event)
6215 			return false;
6216 		return true;
6217 	}
6218 
6219 	/* Check if request ended in Command Status - no way to retrieve
6220 	 * any extra parameters in this case.
6221 	 */
6222 	if (hdr->evt == HCI_EV_CMD_STATUS)
6223 		return false;
6224 
6225 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6226 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6227 			   hdr->evt);
6228 		return false;
6229 	}
6230 
6231 	if (skb->len < sizeof(*ev)) {
6232 		bt_dev_err(hdev, "too short cmd_complete event");
6233 		return false;
6234 	}
6235 
6236 	ev = (void *) skb->data;
6237 	skb_pull(skb, sizeof(*ev));
6238 
6239 	if (opcode != __le16_to_cpu(ev->opcode)) {
6240 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6241 		       __le16_to_cpu(ev->opcode));
6242 		return false;
6243 	}
6244 
6245 	return true;
6246 }
6247 
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)6248 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6249 				  struct sk_buff *skb)
6250 {
6251 	struct hci_ev_le_advertising_info *adv;
6252 	struct hci_ev_le_direct_adv_info *direct_adv;
6253 	struct hci_ev_le_ext_adv_report *ext_adv;
6254 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6255 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6256 
6257 	hci_dev_lock(hdev);
6258 
6259 	/* If we are currently suspended and this is the first BT event seen,
6260 	 * save the wake reason associated with the event.
6261 	 */
6262 	if (!hdev->suspended || hdev->wake_reason)
6263 		goto unlock;
6264 
6265 	/* Default to remote wake. Values for wake_reason are documented in the
6266 	 * Bluez mgmt api docs.
6267 	 */
6268 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6269 
6270 	/* Once configured for remote wakeup, we should only wake up for
6271 	 * reconnections. It's useful to see which device is waking us up so
6272 	 * keep track of the bdaddr of the connection event that woke us up.
6273 	 */
6274 	if (event == HCI_EV_CONN_REQUEST) {
6275 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6276 		hdev->wake_addr_type = BDADDR_BREDR;
6277 	} else if (event == HCI_EV_CONN_COMPLETE) {
6278 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6279 		hdev->wake_addr_type = BDADDR_BREDR;
6280 	} else if (event == HCI_EV_LE_META) {
6281 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
6282 		u8 subevent = le_ev->subevent;
6283 		u8 *ptr = &skb->data[sizeof(*le_ev)];
6284 		u8 num_reports = *ptr;
6285 
6286 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6287 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6288 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6289 		    num_reports) {
6290 			adv = (void *)(ptr + 1);
6291 			direct_adv = (void *)(ptr + 1);
6292 			ext_adv = (void *)(ptr + 1);
6293 
6294 			switch (subevent) {
6295 			case HCI_EV_LE_ADVERTISING_REPORT:
6296 				bacpy(&hdev->wake_addr, &adv->bdaddr);
6297 				hdev->wake_addr_type = adv->bdaddr_type;
6298 				break;
6299 			case HCI_EV_LE_DIRECT_ADV_REPORT:
6300 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6301 				hdev->wake_addr_type = direct_adv->bdaddr_type;
6302 				break;
6303 			case HCI_EV_LE_EXT_ADV_REPORT:
6304 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6305 				hdev->wake_addr_type = ext_adv->bdaddr_type;
6306 				break;
6307 			}
6308 		}
6309 	} else {
6310 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6311 	}
6312 
6313 unlock:
6314 	hci_dev_unlock(hdev);
6315 }
6316 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)6317 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6318 {
6319 	struct hci_event_hdr *hdr = (void *) skb->data;
6320 	hci_req_complete_t req_complete = NULL;
6321 	hci_req_complete_skb_t req_complete_skb = NULL;
6322 	struct sk_buff *orig_skb = NULL;
6323 	u8 status = 0, event = hdr->evt, req_evt = 0;
6324 	u16 opcode = HCI_OP_NOP;
6325 
6326 	if (!event) {
6327 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6328 		goto done;
6329 	}
6330 
6331 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6332 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6333 		opcode = __le16_to_cpu(cmd_hdr->opcode);
6334 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6335 				     &req_complete_skb);
6336 		req_evt = event;
6337 	}
6338 
6339 	/* If it looks like we might end up having to call
6340 	 * req_complete_skb, store a pristine copy of the skb since the
6341 	 * various handlers may modify the original one through
6342 	 * skb_pull() calls, etc.
6343 	 */
6344 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6345 	    event == HCI_EV_CMD_COMPLETE)
6346 		orig_skb = skb_clone(skb, GFP_KERNEL);
6347 
6348 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6349 
6350 	/* Store wake reason if we're suspended */
6351 	hci_store_wake_reason(hdev, event, skb);
6352 
6353 	switch (event) {
6354 	case HCI_EV_INQUIRY_COMPLETE:
6355 		hci_inquiry_complete_evt(hdev, skb);
6356 		break;
6357 
6358 	case HCI_EV_INQUIRY_RESULT:
6359 		hci_inquiry_result_evt(hdev, skb);
6360 		break;
6361 
6362 	case HCI_EV_CONN_COMPLETE:
6363 		hci_conn_complete_evt(hdev, skb);
6364 		break;
6365 
6366 	case HCI_EV_CONN_REQUEST:
6367 		hci_conn_request_evt(hdev, skb);
6368 		break;
6369 
6370 	case HCI_EV_DISCONN_COMPLETE:
6371 		hci_disconn_complete_evt(hdev, skb);
6372 		break;
6373 
6374 	case HCI_EV_AUTH_COMPLETE:
6375 		hci_auth_complete_evt(hdev, skb);
6376 		break;
6377 
6378 	case HCI_EV_REMOTE_NAME:
6379 		hci_remote_name_evt(hdev, skb);
6380 		break;
6381 
6382 	case HCI_EV_ENCRYPT_CHANGE:
6383 		hci_encrypt_change_evt(hdev, skb);
6384 		break;
6385 
6386 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6387 		hci_change_link_key_complete_evt(hdev, skb);
6388 		break;
6389 
6390 	case HCI_EV_REMOTE_FEATURES:
6391 		hci_remote_features_evt(hdev, skb);
6392 		break;
6393 
6394 	case HCI_EV_CMD_COMPLETE:
6395 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6396 				     &req_complete, &req_complete_skb);
6397 		break;
6398 
6399 	case HCI_EV_CMD_STATUS:
6400 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6401 				   &req_complete_skb);
6402 		break;
6403 
6404 	case HCI_EV_HARDWARE_ERROR:
6405 		hci_hardware_error_evt(hdev, skb);
6406 		break;
6407 
6408 	case HCI_EV_ROLE_CHANGE:
6409 		hci_role_change_evt(hdev, skb);
6410 		break;
6411 
6412 	case HCI_EV_NUM_COMP_PKTS:
6413 		hci_num_comp_pkts_evt(hdev, skb);
6414 		break;
6415 
6416 	case HCI_EV_MODE_CHANGE:
6417 		hci_mode_change_evt(hdev, skb);
6418 		break;
6419 
6420 	case HCI_EV_PIN_CODE_REQ:
6421 		hci_pin_code_request_evt(hdev, skb);
6422 		break;
6423 
6424 	case HCI_EV_LINK_KEY_REQ:
6425 		hci_link_key_request_evt(hdev, skb);
6426 		break;
6427 
6428 	case HCI_EV_LINK_KEY_NOTIFY:
6429 		hci_link_key_notify_evt(hdev, skb);
6430 		break;
6431 
6432 	case HCI_EV_CLOCK_OFFSET:
6433 		hci_clock_offset_evt(hdev, skb);
6434 		break;
6435 
6436 	case HCI_EV_PKT_TYPE_CHANGE:
6437 		hci_pkt_type_change_evt(hdev, skb);
6438 		break;
6439 
6440 	case HCI_EV_PSCAN_REP_MODE:
6441 		hci_pscan_rep_mode_evt(hdev, skb);
6442 		break;
6443 
6444 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6445 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6446 		break;
6447 
6448 	case HCI_EV_REMOTE_EXT_FEATURES:
6449 		hci_remote_ext_features_evt(hdev, skb);
6450 		break;
6451 
6452 	case HCI_EV_SYNC_CONN_COMPLETE:
6453 		hci_sync_conn_complete_evt(hdev, skb);
6454 		break;
6455 
6456 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6457 		hci_extended_inquiry_result_evt(hdev, skb);
6458 		break;
6459 
6460 	case HCI_EV_KEY_REFRESH_COMPLETE:
6461 		hci_key_refresh_complete_evt(hdev, skb);
6462 		break;
6463 
6464 	case HCI_EV_IO_CAPA_REQUEST:
6465 		hci_io_capa_request_evt(hdev, skb);
6466 		break;
6467 
6468 	case HCI_EV_IO_CAPA_REPLY:
6469 		hci_io_capa_reply_evt(hdev, skb);
6470 		break;
6471 
6472 	case HCI_EV_USER_CONFIRM_REQUEST:
6473 		hci_user_confirm_request_evt(hdev, skb);
6474 		break;
6475 
6476 	case HCI_EV_USER_PASSKEY_REQUEST:
6477 		hci_user_passkey_request_evt(hdev, skb);
6478 		break;
6479 
6480 	case HCI_EV_USER_PASSKEY_NOTIFY:
6481 		hci_user_passkey_notify_evt(hdev, skb);
6482 		break;
6483 
6484 	case HCI_EV_KEYPRESS_NOTIFY:
6485 		hci_keypress_notify_evt(hdev, skb);
6486 		break;
6487 
6488 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6489 		hci_simple_pair_complete_evt(hdev, skb);
6490 		break;
6491 
6492 	case HCI_EV_REMOTE_HOST_FEATURES:
6493 		hci_remote_host_features_evt(hdev, skb);
6494 		break;
6495 
6496 	case HCI_EV_LE_META:
6497 		hci_le_meta_evt(hdev, skb);
6498 		break;
6499 
6500 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6501 		hci_remote_oob_data_request_evt(hdev, skb);
6502 		break;
6503 
6504 #if IS_ENABLED(CONFIG_BT_HS)
6505 	case HCI_EV_CHANNEL_SELECTED:
6506 		hci_chan_selected_evt(hdev, skb);
6507 		break;
6508 
6509 	case HCI_EV_PHY_LINK_COMPLETE:
6510 		hci_phy_link_complete_evt(hdev, skb);
6511 		break;
6512 
6513 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6514 		hci_loglink_complete_evt(hdev, skb);
6515 		break;
6516 
6517 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6518 		hci_disconn_loglink_complete_evt(hdev, skb);
6519 		break;
6520 
6521 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6522 		hci_disconn_phylink_complete_evt(hdev, skb);
6523 		break;
6524 #endif
6525 
6526 	case HCI_EV_NUM_COMP_BLOCKS:
6527 		hci_num_comp_blocks_evt(hdev, skb);
6528 		break;
6529 
6530 	case HCI_EV_VENDOR:
6531 		msft_vendor_evt(hdev, skb);
6532 		break;
6533 
6534 	default:
6535 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6536 		break;
6537 	}
6538 
6539 	if (req_complete) {
6540 		req_complete(hdev, status, opcode);
6541 	} else if (req_complete_skb) {
6542 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6543 			kfree_skb(orig_skb);
6544 			orig_skb = NULL;
6545 		}
6546 		req_complete_skb(hdev, status, opcode, orig_skb);
6547 	}
6548 
6549 done:
6550 	kfree_skb(orig_skb);
6551 	kfree_skb(skb);
6552 	hdev->stat.evt_rx++;
6553 }
6554