• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36 
37 /* Handle HCI Event packets */
38 
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 	__u8 status = *((__u8 *) skb->data);
42 
43 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44 
45 	if (status)
46 		return;
47 
48 	clear_bit(HCI_INQUIRY, &hdev->flags);
49 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 
52 	hci_dev_lock(hdev);
53 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 	hci_dev_unlock(hdev);
55 
56 	hci_conn_check_pending(hdev);
57 }
58 
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 	__u8 status = *((__u8 *) skb->data);
62 
63 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64 
65 	if (status)
66 		return;
67 
68 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 	__u8 status = *((__u8 *) skb->data);
74 
75 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76 
77 	if (status)
78 		return;
79 
80 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 					  struct sk_buff *skb)
87 {
88 	BT_DBG("%s", hdev->name);
89 }
90 
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 	struct hci_rp_role_discovery *rp = (void *) skb->data;
94 	struct hci_conn *conn;
95 
96 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97 
98 	if (rp->status)
99 		return;
100 
101 	hci_dev_lock(hdev);
102 
103 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 	if (conn)
105 		conn->role = rp->role;
106 
107 	hci_dev_unlock(hdev);
108 }
109 
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 	struct hci_conn *conn;
114 
115 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116 
117 	if (rp->status)
118 		return;
119 
120 	hci_dev_lock(hdev);
121 
122 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 	if (conn)
124 		conn->link_policy = __le16_to_cpu(rp->policy);
125 
126 	hci_dev_unlock(hdev);
127 }
128 
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 	struct hci_conn *conn;
133 	void *sent;
134 
135 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136 
137 	if (rp->status)
138 		return;
139 
140 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 	if (!sent)
142 		return;
143 
144 	hci_dev_lock(hdev);
145 
146 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 	if (conn)
148 		conn->link_policy = get_unaligned_le16(sent + 2);
149 
150 	hci_dev_unlock(hdev);
151 }
152 
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 					struct sk_buff *skb)
155 {
156 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 
158 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159 
160 	if (rp->status)
161 		return;
162 
163 	hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165 
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 					 struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 
174 	if (status)
175 		return;
176 
177 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 	if (!sent)
179 		return;
180 
181 	hdev->link_policy = get_unaligned_le16(sent);
182 }
183 
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	/* Reset all non-persistent flags */
193 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
194 
195 	hdev->discovery.state = DISCOVERY_STOPPED;
196 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
198 
199 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200 	hdev->adv_data_len = 0;
201 
202 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203 	hdev->scan_rsp_data_len = 0;
204 
205 	hdev->le_scan_type = LE_SCAN_PASSIVE;
206 
207 	hdev->ssp_debug_mode = 0;
208 
209 	hci_bdaddr_list_clear(&hdev->le_white_list);
210 }
211 
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 	__u8 status = *((__u8 *) skb->data);
215 	void *sent;
216 
217 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 
219 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 	if (!sent)
221 		return;
222 
223 	hci_dev_lock(hdev);
224 
225 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 		mgmt_set_local_name_complete(hdev, sent, status);
227 	else if (!status)
228 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229 
230 	hci_dev_unlock(hdev);
231 }
232 
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)233 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
234 {
235 	struct hci_rp_read_local_name *rp = (void *) skb->data;
236 
237 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
238 
239 	if (rp->status)
240 		return;
241 
242 	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
243 	    test_bit(HCI_CONFIG, &hdev->dev_flags))
244 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
245 }
246 
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)247 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
248 {
249 	__u8 status = *((__u8 *) skb->data);
250 	void *sent;
251 
252 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
253 
254 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
255 	if (!sent)
256 		return;
257 
258 	if (!status) {
259 		__u8 param = *((__u8 *) sent);
260 
261 		if (param == AUTH_ENABLED)
262 			set_bit(HCI_AUTH, &hdev->flags);
263 		else
264 			clear_bit(HCI_AUTH, &hdev->flags);
265 	}
266 
267 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
268 		mgmt_auth_enable_complete(hdev, status);
269 }
270 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)271 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
272 {
273 	__u8 status = *((__u8 *) skb->data);
274 	__u8 param;
275 	void *sent;
276 
277 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
278 
279 	if (status)
280 		return;
281 
282 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
283 	if (!sent)
284 		return;
285 
286 	param = *((__u8 *) sent);
287 
288 	if (param)
289 		set_bit(HCI_ENCRYPT, &hdev->flags);
290 	else
291 		clear_bit(HCI_ENCRYPT, &hdev->flags);
292 }
293 
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)294 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
295 {
296 	__u8 status = *((__u8 *) skb->data);
297 	__u8 param;
298 	void *sent;
299 
300 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
301 
302 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
303 	if (!sent)
304 		return;
305 
306 	param = *((__u8 *) sent);
307 
308 	hci_dev_lock(hdev);
309 
310 	if (status) {
311 		hdev->discov_timeout = 0;
312 		goto done;
313 	}
314 
315 	if (param & SCAN_INQUIRY)
316 		set_bit(HCI_ISCAN, &hdev->flags);
317 	else
318 		clear_bit(HCI_ISCAN, &hdev->flags);
319 
320 	if (param & SCAN_PAGE)
321 		set_bit(HCI_PSCAN, &hdev->flags);
322 	else
323 		clear_bit(HCI_PSCAN, &hdev->flags);
324 
325 done:
326 	hci_dev_unlock(hdev);
327 }
328 
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)329 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
330 {
331 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
332 
333 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
334 
335 	if (rp->status)
336 		return;
337 
338 	memcpy(hdev->dev_class, rp->dev_class, 3);
339 
340 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
341 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
342 }
343 
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)344 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
345 {
346 	__u8 status = *((__u8 *) skb->data);
347 	void *sent;
348 
349 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
350 
351 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
352 	if (!sent)
353 		return;
354 
355 	hci_dev_lock(hdev);
356 
357 	if (status == 0)
358 		memcpy(hdev->dev_class, sent, 3);
359 
360 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
361 		mgmt_set_class_of_dev_complete(hdev, sent, status);
362 
363 	hci_dev_unlock(hdev);
364 }
365 
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)366 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
367 {
368 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
369 	__u16 setting;
370 
371 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
372 
373 	if (rp->status)
374 		return;
375 
376 	setting = __le16_to_cpu(rp->voice_setting);
377 
378 	if (hdev->voice_setting == setting)
379 		return;
380 
381 	hdev->voice_setting = setting;
382 
383 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
384 
385 	if (hdev->notify)
386 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
387 }
388 
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)389 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
390 				       struct sk_buff *skb)
391 {
392 	__u8 status = *((__u8 *) skb->data);
393 	__u16 setting;
394 	void *sent;
395 
396 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
397 
398 	if (status)
399 		return;
400 
401 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
402 	if (!sent)
403 		return;
404 
405 	setting = get_unaligned_le16(sent);
406 
407 	if (hdev->voice_setting == setting)
408 		return;
409 
410 	hdev->voice_setting = setting;
411 
412 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
413 
414 	if (hdev->notify)
415 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
416 }
417 
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)418 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
419 					  struct sk_buff *skb)
420 {
421 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
422 
423 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
424 
425 	if (rp->status)
426 		return;
427 
428 	hdev->num_iac = rp->num_iac;
429 
430 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
431 }
432 
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)433 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
434 {
435 	__u8 status = *((__u8 *) skb->data);
436 	struct hci_cp_write_ssp_mode *sent;
437 
438 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
439 
440 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
441 	if (!sent)
442 		return;
443 
444 	if (!status) {
445 		if (sent->mode)
446 			hdev->features[1][0] |= LMP_HOST_SSP;
447 		else
448 			hdev->features[1][0] &= ~LMP_HOST_SSP;
449 	}
450 
451 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
452 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
453 	else if (!status) {
454 		if (sent->mode)
455 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
456 		else
457 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 	}
459 }
460 
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)461 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
462 {
463 	u8 status = *((u8 *) skb->data);
464 	struct hci_cp_write_sc_support *sent;
465 
466 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
467 
468 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
469 	if (!sent)
470 		return;
471 
472 	if (!status) {
473 		if (sent->support)
474 			hdev->features[1][0] |= LMP_HOST_SC;
475 		else
476 			hdev->features[1][0] &= ~LMP_HOST_SC;
477 	}
478 
479 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
480 		mgmt_sc_enable_complete(hdev, sent->support, status);
481 	else if (!status) {
482 		if (sent->support)
483 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
484 		else
485 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 	}
487 }
488 
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)489 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
490 {
491 	struct hci_rp_read_local_version *rp = (void *) skb->data;
492 
493 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
494 
495 	if (rp->status)
496 		return;
497 
498 	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
499 	    test_bit(HCI_CONFIG, &hdev->dev_flags)) {
500 		hdev->hci_ver = rp->hci_ver;
501 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
502 		hdev->lmp_ver = rp->lmp_ver;
503 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
504 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
505 	}
506 }
507 
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)508 static void hci_cc_read_local_commands(struct hci_dev *hdev,
509 				       struct sk_buff *skb)
510 {
511 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
512 
513 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
514 
515 	if (rp->status)
516 		return;
517 
518 	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
519 	    test_bit(HCI_CONFIG, &hdev->dev_flags))
520 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521 }
522 
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 				       struct sk_buff *skb)
525 {
526 	struct hci_rp_read_local_features *rp = (void *) skb->data;
527 
528 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529 
530 	if (rp->status)
531 		return;
532 
533 	memcpy(hdev->features, rp->features, 8);
534 
535 	/* Adjust default settings according to features
536 	 * supported by device. */
537 
538 	if (hdev->features[0][0] & LMP_3SLOT)
539 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540 
541 	if (hdev->features[0][0] & LMP_5SLOT)
542 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543 
544 	if (hdev->features[0][1] & LMP_HV2) {
545 		hdev->pkt_type  |= (HCI_HV2);
546 		hdev->esco_type |= (ESCO_HV2);
547 	}
548 
549 	if (hdev->features[0][1] & LMP_HV3) {
550 		hdev->pkt_type  |= (HCI_HV3);
551 		hdev->esco_type |= (ESCO_HV3);
552 	}
553 
554 	if (lmp_esco_capable(hdev))
555 		hdev->esco_type |= (ESCO_EV3);
556 
557 	if (hdev->features[0][4] & LMP_EV4)
558 		hdev->esco_type |= (ESCO_EV4);
559 
560 	if (hdev->features[0][4] & LMP_EV5)
561 		hdev->esco_type |= (ESCO_EV5);
562 
563 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 		hdev->esco_type |= (ESCO_2EV3);
565 
566 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 		hdev->esco_type |= (ESCO_3EV3);
568 
569 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571 }
572 
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 					   struct sk_buff *skb)
575 {
576 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577 
578 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579 
580 	if (rp->status)
581 		return;
582 
583 	if (hdev->max_page < rp->max_page)
584 		hdev->max_page = rp->max_page;
585 
586 	if (rp->page < HCI_MAX_PAGES)
587 		memcpy(hdev->features[rp->page], rp->features, 8);
588 }
589 
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 					  struct sk_buff *skb)
592 {
593 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594 
595 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 
597 	if (rp->status)
598 		return;
599 
600 	hdev->flow_ctl_mode = rp->mode;
601 }
602 
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606 
607 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608 
609 	if (rp->status)
610 		return;
611 
612 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
613 	hdev->sco_mtu  = rp->sco_mtu;
614 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616 
617 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 		hdev->sco_mtu  = 64;
619 		hdev->sco_pkts = 8;
620 	}
621 
622 	hdev->acl_cnt = hdev->acl_pkts;
623 	hdev->sco_cnt = hdev->sco_pkts;
624 
625 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627 }
628 
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 {
631 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632 
633 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634 
635 	if (rp->status)
636 		return;
637 
638 	if (test_bit(HCI_INIT, &hdev->flags))
639 		bacpy(&hdev->bdaddr, &rp->bdaddr);
640 
641 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 		bacpy(&hdev->setup_addr, &rp->bdaddr);
643 }
644 
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 					   struct sk_buff *skb)
647 {
648 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649 
650 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 
652 	if (rp->status)
653 		return;
654 
655 	if (test_bit(HCI_INIT, &hdev->flags)) {
656 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 		hdev->page_scan_window = __le16_to_cpu(rp->window);
658 	}
659 }
660 
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 					    struct sk_buff *skb)
663 {
664 	u8 status = *((u8 *) skb->data);
665 	struct hci_cp_write_page_scan_activity *sent;
666 
667 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
668 
669 	if (status)
670 		return;
671 
672 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 	if (!sent)
674 		return;
675 
676 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 	hdev->page_scan_window = __le16_to_cpu(sent->window);
678 }
679 
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 					   struct sk_buff *skb)
682 {
683 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684 
685 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686 
687 	if (rp->status)
688 		return;
689 
690 	if (test_bit(HCI_INIT, &hdev->flags))
691 		hdev->page_scan_type = rp->type;
692 }
693 
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 					struct sk_buff *skb)
696 {
697 	u8 status = *((u8 *) skb->data);
698 	u8 *type;
699 
700 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
701 
702 	if (status)
703 		return;
704 
705 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 	if (type)
707 		hdev->page_scan_type = *type;
708 }
709 
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 					struct sk_buff *skb)
712 {
713 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714 
715 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 
717 	if (rp->status)
718 		return;
719 
720 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 	hdev->block_len = __le16_to_cpu(rp->block_len);
722 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723 
724 	hdev->block_cnt = hdev->num_blocks;
725 
726 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 	       hdev->block_cnt, hdev->block_len);
728 }
729 
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 	struct hci_rp_read_clock *rp = (void *) skb->data;
733 	struct hci_cp_read_clock *cp;
734 	struct hci_conn *conn;
735 
736 	BT_DBG("%s", hdev->name);
737 
738 	if (skb->len < sizeof(*rp))
739 		return;
740 
741 	if (rp->status)
742 		return;
743 
744 	hci_dev_lock(hdev);
745 
746 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 	if (!cp)
748 		goto unlock;
749 
750 	if (cp->which == 0x00) {
751 		hdev->clock = le32_to_cpu(rp->clock);
752 		goto unlock;
753 	}
754 
755 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 	if (conn) {
757 		conn->clock = le32_to_cpu(rp->clock);
758 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759 	}
760 
761 unlock:
762 	hci_dev_unlock(hdev);
763 }
764 
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 				       struct sk_buff *skb)
767 {
768 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769 
770 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771 
772 	if (rp->status)
773 		goto a2mp_rsp;
774 
775 	hdev->amp_status = rp->amp_status;
776 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 	hdev->amp_type = rp->amp_type;
781 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785 
786 a2mp_rsp:
787 	a2mp_send_getinfo_rsp(hdev);
788 }
789 
hci_cc_read_local_amp_assoc(struct hci_dev * hdev,struct sk_buff * skb)790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 					struct sk_buff *skb)
792 {
793 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 	struct amp_assoc *assoc = &hdev->loc_assoc;
795 	size_t rem_len, frag_len;
796 
797 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 
799 	if (rp->status)
800 		goto a2mp_rsp;
801 
802 	frag_len = skb->len - sizeof(*rp);
803 	rem_len = __le16_to_cpu(rp->rem_len);
804 
805 	if (rem_len > frag_len) {
806 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807 
808 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 		assoc->offset += frag_len;
810 
811 		/* Read other fragments */
812 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813 
814 		return;
815 	}
816 
817 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 	assoc->len = assoc->offset + rem_len;
819 	assoc->offset = 0;
820 
821 a2mp_rsp:
822 	/* Send A2MP Rsp when all fragments are received */
823 	a2mp_send_getampassoc_rsp(hdev, rp->status);
824 	a2mp_send_create_phy_link_req(hdev, rp->status);
825 }
826 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 					 struct sk_buff *skb)
829 {
830 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831 
832 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833 
834 	if (rp->status)
835 		return;
836 
837 	hdev->inq_tx_power = rp->tx_power;
838 }
839 
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 {
842 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 	struct hci_cp_pin_code_reply *cp;
844 	struct hci_conn *conn;
845 
846 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847 
848 	hci_dev_lock(hdev);
849 
850 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852 
853 	if (rp->status)
854 		goto unlock;
855 
856 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 	if (!cp)
858 		goto unlock;
859 
860 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 	if (conn)
862 		conn->pin_length = cp->pin_len;
863 
864 unlock:
865 	hci_dev_unlock(hdev);
866 }
867 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871 
872 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873 
874 	hci_dev_lock(hdev);
875 
876 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 						 rp->status);
879 
880 	hci_dev_unlock(hdev);
881 }
882 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 				       struct sk_buff *skb)
885 {
886 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887 
888 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889 
890 	if (rp->status)
891 		return;
892 
893 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 	hdev->le_pkts = rp->le_max_pkt;
895 
896 	hdev->le_cnt = hdev->le_pkts;
897 
898 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899 }
900 
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 					  struct sk_buff *skb)
903 {
904 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905 
906 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907 
908 	if (rp->status)
909 		return;
910 
911 	memcpy(hdev->le_features, rp->features, 8);
912 }
913 
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 					struct sk_buff *skb)
916 {
917 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918 
919 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920 
921 	if (rp->status)
922 		return;
923 
924 	hdev->adv_tx_power = rp->tx_power;
925 }
926 
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 {
929 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930 
931 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932 
933 	hci_dev_lock(hdev);
934 
935 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 						 rp->status);
938 
939 	hci_dev_unlock(hdev);
940 }
941 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 					  struct sk_buff *skb)
944 {
945 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946 
947 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 
949 	hci_dev_lock(hdev);
950 
951 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 						     ACL_LINK, 0, rp->status);
954 
955 	hci_dev_unlock(hdev);
956 }
957 
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 {
960 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961 
962 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963 
964 	hci_dev_lock(hdev);
965 
966 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 						 0, rp->status);
969 
970 	hci_dev_unlock(hdev);
971 }
972 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 					  struct sk_buff *skb)
975 {
976 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977 
978 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979 
980 	hci_dev_lock(hdev);
981 
982 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 						     ACL_LINK, 0, rp->status);
985 
986 	hci_dev_unlock(hdev);
987 }
988 
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 				       struct sk_buff *skb)
991 {
992 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993 
994 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995 
996 	hci_dev_lock(hdev);
997 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
998 					  NULL, NULL, rp->status);
999 	hci_dev_unlock(hdev);
1000 }
1001 
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 					   struct sk_buff *skb)
1004 {
1005 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006 
1007 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 
1009 	hci_dev_lock(hdev);
1010 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1011 					  rp->hash256, rp->randomizer256,
1012 					  rp->status);
1013 	hci_dev_unlock(hdev);
1014 }
1015 
1016 
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 	__u8 status = *((__u8 *) skb->data);
1020 	bdaddr_t *sent;
1021 
1022 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023 
1024 	if (status)
1025 		return;
1026 
1027 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 	if (!sent)
1029 		return;
1030 
1031 	hci_dev_lock(hdev);
1032 
1033 	bacpy(&hdev->random_addr, sent);
1034 
1035 	hci_dev_unlock(hdev);
1036 }
1037 
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 {
1040 	__u8 *sent, status = *((__u8 *) skb->data);
1041 
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043 
1044 	if (status)
1045 		return;
1046 
1047 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 	if (!sent)
1049 		return;
1050 
1051 	hci_dev_lock(hdev);
1052 
1053 	/* If we're doing connection initation as peripheral. Set a
1054 	 * timeout in case something goes wrong.
1055 	 */
1056 	if (*sent) {
1057 		struct hci_conn *conn;
1058 
1059 		set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060 
1061 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 		if (conn)
1063 			queue_delayed_work(hdev->workqueue,
1064 					   &conn->le_conn_timeout,
1065 					   conn->conn_timeout);
1066 	} else {
1067 		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068 	}
1069 
1070 	hci_dev_unlock(hdev);
1071 }
1072 
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 	struct hci_cp_le_set_scan_param *cp;
1076 	__u8 status = *((__u8 *) skb->data);
1077 
1078 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079 
1080 	if (status)
1081 		return;
1082 
1083 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 	if (!cp)
1085 		return;
1086 
1087 	hci_dev_lock(hdev);
1088 
1089 	hdev->le_scan_type = cp->type;
1090 
1091 	hci_dev_unlock(hdev);
1092 }
1093 
has_pending_adv_report(struct hci_dev * hdev)1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 {
1096 	struct discovery_state *d = &hdev->discovery;
1097 
1098 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099 }
1100 
clear_pending_adv_report(struct hci_dev * hdev)1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 	struct discovery_state *d = &hdev->discovery;
1104 
1105 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 	d->last_adv_data_len = 0;
1107 }
1108 
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 				     u8 bdaddr_type, s8 rssi, u32 flags,
1111 				     u8 *data, u8 len)
1112 {
1113 	struct discovery_state *d = &hdev->discovery;
1114 
1115 	bacpy(&d->last_adv_addr, bdaddr);
1116 	d->last_adv_addr_type = bdaddr_type;
1117 	d->last_adv_rssi = rssi;
1118 	d->last_adv_flags = flags;
1119 	memcpy(d->last_adv_data, data, len);
1120 	d->last_adv_data_len = len;
1121 }
1122 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 				      struct sk_buff *skb)
1125 {
1126 	struct hci_cp_le_set_scan_enable *cp;
1127 	__u8 status = *((__u8 *) skb->data);
1128 
1129 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130 
1131 	if (status)
1132 		return;
1133 
1134 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 	if (!cp)
1136 		return;
1137 
1138 	switch (cp->enable) {
1139 	case LE_SCAN_ENABLE:
1140 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 			clear_pending_adv_report(hdev);
1143 		break;
1144 
1145 	case LE_SCAN_DISABLE:
1146 		/* We do this here instead of when setting DISCOVERY_STOPPED
1147 		 * since the latter would potentially require waiting for
1148 		 * inquiry to stop too.
1149 		 */
1150 		if (has_pending_adv_report(hdev)) {
1151 			struct discovery_state *d = &hdev->discovery;
1152 
1153 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 					  d->last_adv_addr_type, NULL,
1155 					  d->last_adv_rssi, d->last_adv_flags,
1156 					  d->last_adv_data,
1157 					  d->last_adv_data_len, NULL, 0);
1158 		}
1159 
1160 		/* Cancel this timer so that we don't try to disable scanning
1161 		 * when it's already disabled.
1162 		 */
1163 		cancel_delayed_work(&hdev->le_scan_disable);
1164 
1165 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166 
1167 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 		 * interrupted scanning due to a connect request. Mark
1169 		 * therefore discovery as stopped. If this was not
1170 		 * because of a connect request advertising might have
1171 		 * been disabled because of active scanning, so
1172 		 * re-enable it again if necessary.
1173 		 */
1174 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 				       &hdev->dev_flags))
1176 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 			 hdev->discovery.state == DISCOVERY_FINDING)
1179 			mgmt_reenable_advertising(hdev);
1180 
1181 		break;
1182 
1183 	default:
1184 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 		break;
1186 	}
1187 }
1188 
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 					   struct sk_buff *skb)
1191 {
1192 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193 
1194 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195 
1196 	if (rp->status)
1197 		return;
1198 
1199 	hdev->le_white_list_size = rp->size;
1200 }
1201 
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 				       struct sk_buff *skb)
1204 {
1205 	__u8 status = *((__u8 *) skb->data);
1206 
1207 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208 
1209 	if (status)
1210 		return;
1211 
1212 	hci_bdaddr_list_clear(&hdev->le_white_list);
1213 }
1214 
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 					struct sk_buff *skb)
1217 {
1218 	struct hci_cp_le_add_to_white_list *sent;
1219 	__u8 status = *((__u8 *) skb->data);
1220 
1221 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222 
1223 	if (status)
1224 		return;
1225 
1226 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 	if (!sent)
1228 		return;
1229 
1230 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 			   sent->bdaddr_type);
1232 }
1233 
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 					  struct sk_buff *skb)
1236 {
1237 	struct hci_cp_le_del_from_white_list *sent;
1238 	__u8 status = *((__u8 *) skb->data);
1239 
1240 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241 
1242 	if (status)
1243 		return;
1244 
1245 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 	if (!sent)
1247 		return;
1248 
1249 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 			    sent->bdaddr_type);
1251 }
1252 
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 					    struct sk_buff *skb)
1255 {
1256 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257 
1258 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259 
1260 	if (rp->status)
1261 		return;
1262 
1263 	memcpy(hdev->le_states, rp->le_states, 8);
1264 }
1265 
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 					   struct sk_buff *skb)
1268 {
1269 	struct hci_cp_write_le_host_supported *sent;
1270 	__u8 status = *((__u8 *) skb->data);
1271 
1272 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273 
1274 	if (status)
1275 		return;
1276 
1277 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 	if (!sent)
1279 		return;
1280 
1281 	if (sent->le) {
1282 		hdev->features[1][0] |= LMP_HOST_LE;
1283 		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 	} else {
1285 		hdev->features[1][0] &= ~LMP_HOST_LE;
1286 		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288 	}
1289 
1290 	if (sent->simul)
1291 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 	else
1293 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294 }
1295 
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 {
1298 	struct hci_cp_le_set_adv_param *cp;
1299 	u8 status = *((u8 *) skb->data);
1300 
1301 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302 
1303 	if (status)
1304 		return;
1305 
1306 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 	if (!cp)
1308 		return;
1309 
1310 	hci_dev_lock(hdev);
1311 	hdev->adv_addr_type = cp->own_address_type;
1312 	hci_dev_unlock(hdev);
1313 }
1314 
hci_cc_write_remote_amp_assoc(struct hci_dev * hdev,struct sk_buff * skb)1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 					  struct sk_buff *skb)
1317 {
1318 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319 
1320 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 	       hdev->name, rp->status, rp->phy_handle);
1322 
1323 	if (rp->status)
1324 		return;
1325 
1326 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327 }
1328 
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 	struct hci_conn *conn;
1333 
1334 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335 
1336 	if (rp->status)
1337 		return;
1338 
1339 	hci_dev_lock(hdev);
1340 
1341 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 	if (conn)
1343 		conn->rssi = rp->rssi;
1344 
1345 	hci_dev_unlock(hdev);
1346 }
1347 
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 {
1350 	struct hci_cp_read_tx_power *sent;
1351 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 	struct hci_conn *conn;
1353 
1354 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355 
1356 	if (rp->status)
1357 		return;
1358 
1359 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 	if (!sent)
1361 		return;
1362 
1363 	hci_dev_lock(hdev);
1364 
1365 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 	if (!conn)
1367 		goto unlock;
1368 
1369 	switch (sent->type) {
1370 	case 0x00:
1371 		conn->tx_power = rp->tx_power;
1372 		break;
1373 	case 0x01:
1374 		conn->max_tx_power = rp->tx_power;
1375 		break;
1376 	}
1377 
1378 unlock:
1379 	hci_dev_unlock(hdev);
1380 }
1381 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 {
1384 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385 
1386 	if (status) {
1387 		hci_conn_check_pending(hdev);
1388 		return;
1389 	}
1390 
1391 	set_bit(HCI_INQUIRY, &hdev->flags);
1392 }
1393 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 {
1396 	struct hci_cp_create_conn *cp;
1397 	struct hci_conn *conn;
1398 
1399 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 
1401 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 	if (!cp)
1403 		return;
1404 
1405 	hci_dev_lock(hdev);
1406 
1407 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408 
1409 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410 
1411 	if (status) {
1412 		if (conn && conn->state == BT_CONNECT) {
1413 			if (status != 0x0c || conn->attempt > 2) {
1414 				conn->state = BT_CLOSED;
1415 				hci_proto_connect_cfm(conn, status);
1416 				hci_conn_del(conn);
1417 			} else
1418 				conn->state = BT_CONNECT2;
1419 		}
1420 	} else {
1421 		if (!conn) {
1422 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 					    HCI_ROLE_MASTER);
1424 			if (!conn)
1425 				BT_ERR("No memory for new connection");
1426 		}
1427 	}
1428 
1429 	hci_dev_unlock(hdev);
1430 }
1431 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1433 {
1434 	struct hci_cp_add_sco *cp;
1435 	struct hci_conn *acl, *sco;
1436 	__u16 handle;
1437 
1438 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1439 
1440 	if (!status)
1441 		return;
1442 
1443 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 	if (!cp)
1445 		return;
1446 
1447 	handle = __le16_to_cpu(cp->handle);
1448 
1449 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1450 
1451 	hci_dev_lock(hdev);
1452 
1453 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 	if (acl) {
1455 		sco = acl->link;
1456 		if (sco) {
1457 			sco->state = BT_CLOSED;
1458 
1459 			hci_proto_connect_cfm(sco, status);
1460 			hci_conn_del(sco);
1461 		}
1462 	}
1463 
1464 	hci_dev_unlock(hdev);
1465 }
1466 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1468 {
1469 	struct hci_cp_auth_requested *cp;
1470 	struct hci_conn *conn;
1471 
1472 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473 
1474 	if (!status)
1475 		return;
1476 
1477 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 	if (!cp)
1479 		return;
1480 
1481 	hci_dev_lock(hdev);
1482 
1483 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 	if (conn) {
1485 		if (conn->state == BT_CONFIG) {
1486 			hci_proto_connect_cfm(conn, status);
1487 			hci_conn_drop(conn);
1488 		}
1489 	}
1490 
1491 	hci_dev_unlock(hdev);
1492 }
1493 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1495 {
1496 	struct hci_cp_set_conn_encrypt *cp;
1497 	struct hci_conn *conn;
1498 
1499 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500 
1501 	if (!status)
1502 		return;
1503 
1504 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 	if (!cp)
1506 		return;
1507 
1508 	hci_dev_lock(hdev);
1509 
1510 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 	if (conn) {
1512 		if (conn->state == BT_CONFIG) {
1513 			hci_proto_connect_cfm(conn, status);
1514 			hci_conn_drop(conn);
1515 		}
1516 	}
1517 
1518 	hci_dev_unlock(hdev);
1519 }
1520 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 				    struct hci_conn *conn)
1523 {
1524 	if (conn->state != BT_CONFIG || !conn->out)
1525 		return 0;
1526 
1527 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 		return 0;
1529 
1530 	/* Only request authentication for SSP connections or non-SSP
1531 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 	 * is requested.
1533 	 */
1534 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 		return 0;
1539 
1540 	return 1;
1541 }
1542 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 				   struct inquiry_entry *e)
1545 {
1546 	struct hci_cp_remote_name_req cp;
1547 
1548 	memset(&cp, 0, sizeof(cp));
1549 
1550 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 	cp.pscan_mode = e->data.pscan_mode;
1553 	cp.clock_offset = e->data.clock_offset;
1554 
1555 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1556 }
1557 
hci_resolve_next_name(struct hci_dev * hdev)1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1559 {
1560 	struct discovery_state *discov = &hdev->discovery;
1561 	struct inquiry_entry *e;
1562 
1563 	if (list_empty(&discov->resolve))
1564 		return false;
1565 
1566 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 	if (!e)
1568 		return false;
1569 
1570 	if (hci_resolve_name(hdev, e) == 0) {
1571 		e->name_state = NAME_PENDING;
1572 		return true;
1573 	}
1574 
1575 	return false;
1576 }
1577 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1580 {
1581 	struct discovery_state *discov = &hdev->discovery;
1582 	struct inquiry_entry *e;
1583 
1584 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1585 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1586 				      name_len, conn->dev_class);
1587 
1588 	if (discov->state == DISCOVERY_STOPPED)
1589 		return;
1590 
1591 	if (discov->state == DISCOVERY_STOPPING)
1592 		goto discov_complete;
1593 
1594 	if (discov->state != DISCOVERY_RESOLVING)
1595 		return;
1596 
1597 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1598 	/* If the device was not found in a list of found devices names of which
1599 	 * are pending. there is no need to continue resolving a next name as it
1600 	 * will be done upon receiving another Remote Name Request Complete
1601 	 * Event */
1602 	if (!e)
1603 		return;
1604 
1605 	list_del(&e->list);
1606 	if (name) {
1607 		e->name_state = NAME_KNOWN;
1608 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1609 				 e->data.rssi, name, name_len);
1610 	} else {
1611 		e->name_state = NAME_NOT_KNOWN;
1612 	}
1613 
1614 	if (hci_resolve_next_name(hdev))
1615 		return;
1616 
1617 discov_complete:
1618 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1619 }
1620 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)1621 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1622 {
1623 	struct hci_cp_remote_name_req *cp;
1624 	struct hci_conn *conn;
1625 
1626 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1627 
1628 	/* If successful wait for the name req complete event before
1629 	 * checking for the need to do authentication */
1630 	if (!status)
1631 		return;
1632 
1633 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1634 	if (!cp)
1635 		return;
1636 
1637 	hci_dev_lock(hdev);
1638 
1639 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1640 
1641 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1642 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1643 
1644 	if (!conn)
1645 		goto unlock;
1646 
1647 	if (!hci_outgoing_auth_needed(hdev, conn))
1648 		goto unlock;
1649 
1650 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1651 		struct hci_cp_auth_requested auth_cp;
1652 
1653 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1654 
1655 		auth_cp.handle = __cpu_to_le16(conn->handle);
1656 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1657 			     sizeof(auth_cp), &auth_cp);
1658 	}
1659 
1660 unlock:
1661 	hci_dev_unlock(hdev);
1662 }
1663 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1664 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1665 {
1666 	struct hci_cp_read_remote_features *cp;
1667 	struct hci_conn *conn;
1668 
1669 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670 
1671 	if (!status)
1672 		return;
1673 
1674 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1675 	if (!cp)
1676 		return;
1677 
1678 	hci_dev_lock(hdev);
1679 
1680 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1681 	if (conn) {
1682 		if (conn->state == BT_CONFIG) {
1683 			hci_proto_connect_cfm(conn, status);
1684 			hci_conn_drop(conn);
1685 		}
1686 	}
1687 
1688 	hci_dev_unlock(hdev);
1689 }
1690 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1691 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1692 {
1693 	struct hci_cp_read_remote_ext_features *cp;
1694 	struct hci_conn *conn;
1695 
1696 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1697 
1698 	if (!status)
1699 		return;
1700 
1701 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1702 	if (!cp)
1703 		return;
1704 
1705 	hci_dev_lock(hdev);
1706 
1707 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1708 	if (conn) {
1709 		if (conn->state == BT_CONFIG) {
1710 			hci_proto_connect_cfm(conn, status);
1711 			hci_conn_drop(conn);
1712 		}
1713 	}
1714 
1715 	hci_dev_unlock(hdev);
1716 }
1717 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)1718 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1719 {
1720 	struct hci_cp_setup_sync_conn *cp;
1721 	struct hci_conn *acl, *sco;
1722 	__u16 handle;
1723 
1724 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1725 
1726 	if (!status)
1727 		return;
1728 
1729 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1730 	if (!cp)
1731 		return;
1732 
1733 	handle = __le16_to_cpu(cp->handle);
1734 
1735 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1736 
1737 	hci_dev_lock(hdev);
1738 
1739 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1740 	if (acl) {
1741 		sco = acl->link;
1742 		if (sco) {
1743 			sco->state = BT_CLOSED;
1744 
1745 			hci_proto_connect_cfm(sco, status);
1746 			hci_conn_del(sco);
1747 		}
1748 	}
1749 
1750 	hci_dev_unlock(hdev);
1751 }
1752 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)1753 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1754 {
1755 	struct hci_cp_sniff_mode *cp;
1756 	struct hci_conn *conn;
1757 
1758 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1759 
1760 	if (!status)
1761 		return;
1762 
1763 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1764 	if (!cp)
1765 		return;
1766 
1767 	hci_dev_lock(hdev);
1768 
1769 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1770 	if (conn) {
1771 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1772 
1773 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1774 			hci_sco_setup(conn, status);
1775 	}
1776 
1777 	hci_dev_unlock(hdev);
1778 }
1779 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)1780 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1781 {
1782 	struct hci_cp_exit_sniff_mode *cp;
1783 	struct hci_conn *conn;
1784 
1785 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1786 
1787 	if (!status)
1788 		return;
1789 
1790 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1791 	if (!cp)
1792 		return;
1793 
1794 	hci_dev_lock(hdev);
1795 
1796 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1797 	if (conn) {
1798 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1799 
1800 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1801 			hci_sco_setup(conn, status);
1802 	}
1803 
1804 	hci_dev_unlock(hdev);
1805 }
1806 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)1807 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1808 {
1809 	struct hci_cp_disconnect *cp;
1810 	struct hci_conn *conn;
1811 
1812 	if (!status)
1813 		return;
1814 
1815 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1816 	if (!cp)
1817 		return;
1818 
1819 	hci_dev_lock(hdev);
1820 
1821 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1822 	if (conn)
1823 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1824 				       conn->dst_type, status);
1825 
1826 	hci_dev_unlock(hdev);
1827 }
1828 
hci_cs_create_phylink(struct hci_dev * hdev,u8 status)1829 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1830 {
1831 	struct hci_cp_create_phy_link *cp;
1832 
1833 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1834 
1835 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1836 	if (!cp)
1837 		return;
1838 
1839 	hci_dev_lock(hdev);
1840 
1841 	if (status) {
1842 		struct hci_conn *hcon;
1843 
1844 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1845 		if (hcon)
1846 			hci_conn_del(hcon);
1847 	} else {
1848 		amp_write_remote_assoc(hdev, cp->phy_handle);
1849 	}
1850 
1851 	hci_dev_unlock(hdev);
1852 }
1853 
hci_cs_accept_phylink(struct hci_dev * hdev,u8 status)1854 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1855 {
1856 	struct hci_cp_accept_phy_link *cp;
1857 
1858 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1859 
1860 	if (status)
1861 		return;
1862 
1863 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1864 	if (!cp)
1865 		return;
1866 
1867 	amp_write_remote_assoc(hdev, cp->phy_handle);
1868 }
1869 
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)1870 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1871 {
1872 	struct hci_cp_le_create_conn *cp;
1873 	struct hci_conn *conn;
1874 
1875 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1876 
1877 	/* All connection failure handling is taken care of by the
1878 	 * hci_le_conn_failed function which is triggered by the HCI
1879 	 * request completion callbacks used for connecting.
1880 	 */
1881 	if (status)
1882 		return;
1883 
1884 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1885 	if (!cp)
1886 		return;
1887 
1888 	hci_dev_lock(hdev);
1889 
1890 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1891 	if (!conn)
1892 		goto unlock;
1893 
1894 	/* Store the initiator and responder address information which
1895 	 * is needed for SMP. These values will not change during the
1896 	 * lifetime of the connection.
1897 	 */
1898 	conn->init_addr_type = cp->own_address_type;
1899 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1900 		bacpy(&conn->init_addr, &hdev->random_addr);
1901 	else
1902 		bacpy(&conn->init_addr, &hdev->bdaddr);
1903 
1904 	conn->resp_addr_type = cp->peer_addr_type;
1905 	bacpy(&conn->resp_addr, &cp->peer_addr);
1906 
1907 	/* We don't want the connection attempt to stick around
1908 	 * indefinitely since LE doesn't have a page timeout concept
1909 	 * like BR/EDR. Set a timer for any connection that doesn't use
1910 	 * the white list for connecting.
1911 	 */
1912 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1913 		queue_delayed_work(conn->hdev->workqueue,
1914 				   &conn->le_conn_timeout,
1915 				   conn->conn_timeout);
1916 
1917 unlock:
1918 	hci_dev_unlock(hdev);
1919 }
1920 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)1921 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1922 {
1923 	struct hci_cp_le_start_enc *cp;
1924 	struct hci_conn *conn;
1925 
1926 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927 
1928 	if (!status)
1929 		return;
1930 
1931 	hci_dev_lock(hdev);
1932 
1933 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1934 	if (!cp)
1935 		goto unlock;
1936 
1937 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1938 	if (!conn)
1939 		goto unlock;
1940 
1941 	if (conn->state != BT_CONNECTED)
1942 		goto unlock;
1943 
1944 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1945 	hci_conn_drop(conn);
1946 
1947 unlock:
1948 	hci_dev_unlock(hdev);
1949 }
1950 
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1951 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1952 {
1953 	__u8 status = *((__u8 *) skb->data);
1954 	struct discovery_state *discov = &hdev->discovery;
1955 	struct inquiry_entry *e;
1956 
1957 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1958 
1959 	hci_conn_check_pending(hdev);
1960 
1961 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1962 		return;
1963 
1964 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1965 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1966 
1967 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1968 		return;
1969 
1970 	hci_dev_lock(hdev);
1971 
1972 	if (discov->state != DISCOVERY_FINDING)
1973 		goto unlock;
1974 
1975 	if (list_empty(&discov->resolve)) {
1976 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1977 		goto unlock;
1978 	}
1979 
1980 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1981 	if (e && hci_resolve_name(hdev, e) == 0) {
1982 		e->name_state = NAME_PENDING;
1983 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1984 	} else {
1985 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1986 	}
1987 
1988 unlock:
1989 	hci_dev_unlock(hdev);
1990 }
1991 
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)1992 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993 {
1994 	struct inquiry_data data;
1995 	struct inquiry_info *info = (void *) (skb->data + 1);
1996 	int num_rsp = *((__u8 *) skb->data);
1997 
1998 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1999 
2000 	if (!num_rsp)
2001 		return;
2002 
2003 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2004 		return;
2005 
2006 	hci_dev_lock(hdev);
2007 
2008 	for (; num_rsp; num_rsp--, info++) {
2009 		u32 flags;
2010 
2011 		bacpy(&data.bdaddr, &info->bdaddr);
2012 		data.pscan_rep_mode	= info->pscan_rep_mode;
2013 		data.pscan_period_mode	= info->pscan_period_mode;
2014 		data.pscan_mode		= info->pscan_mode;
2015 		memcpy(data.dev_class, info->dev_class, 3);
2016 		data.clock_offset	= info->clock_offset;
2017 		data.rssi		= 0x00;
2018 		data.ssp_mode		= 0x00;
2019 
2020 		flags = hci_inquiry_cache_update(hdev, &data, false);
2021 
2022 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2023 				  info->dev_class, 0, flags, NULL, 0, NULL, 0);
2024 	}
2025 
2026 	hci_dev_unlock(hdev);
2027 }
2028 
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2029 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2030 {
2031 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2032 	struct hci_conn *conn;
2033 
2034 	BT_DBG("%s", hdev->name);
2035 
2036 	hci_dev_lock(hdev);
2037 
2038 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2039 	if (!conn) {
2040 		if (ev->link_type != SCO_LINK)
2041 			goto unlock;
2042 
2043 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2044 		if (!conn)
2045 			goto unlock;
2046 
2047 		conn->type = SCO_LINK;
2048 	}
2049 
2050 	if (!ev->status) {
2051 		conn->handle = __le16_to_cpu(ev->handle);
2052 
2053 		if (conn->type == ACL_LINK) {
2054 			conn->state = BT_CONFIG;
2055 			hci_conn_hold(conn);
2056 
2057 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2058 			    !hci_find_link_key(hdev, &ev->bdaddr))
2059 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2060 			else
2061 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2062 		} else
2063 			conn->state = BT_CONNECTED;
2064 
2065 		hci_conn_add_sysfs(conn);
2066 
2067 		if (test_bit(HCI_AUTH, &hdev->flags))
2068 			set_bit(HCI_CONN_AUTH, &conn->flags);
2069 
2070 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2071 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2072 
2073 		/* Get remote features */
2074 		if (conn->type == ACL_LINK) {
2075 			struct hci_cp_read_remote_features cp;
2076 			cp.handle = ev->handle;
2077 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2078 				     sizeof(cp), &cp);
2079 
2080 			hci_update_page_scan(hdev, NULL);
2081 		}
2082 
2083 		/* Set packet type for incoming connection */
2084 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2085 			struct hci_cp_change_conn_ptype cp;
2086 			cp.handle = ev->handle;
2087 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2088 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2089 				     &cp);
2090 		}
2091 	} else {
2092 		conn->state = BT_CLOSED;
2093 		if (conn->type == ACL_LINK)
2094 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2095 					    conn->dst_type, ev->status);
2096 	}
2097 
2098 	if (conn->type == ACL_LINK)
2099 		hci_sco_setup(conn, ev->status);
2100 
2101 	if (ev->status) {
2102 		hci_proto_connect_cfm(conn, ev->status);
2103 		hci_conn_del(conn);
2104 	} else if (ev->link_type != ACL_LINK)
2105 		hci_proto_connect_cfm(conn, ev->status);
2106 
2107 unlock:
2108 	hci_dev_unlock(hdev);
2109 
2110 	hci_conn_check_pending(hdev);
2111 }
2112 
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2113 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2114 {
2115 	struct hci_cp_reject_conn_req cp;
2116 
2117 	bacpy(&cp.bdaddr, bdaddr);
2118 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2119 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2120 }
2121 
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2122 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2123 {
2124 	struct hci_ev_conn_request *ev = (void *) skb->data;
2125 	int mask = hdev->link_mode;
2126 	struct inquiry_entry *ie;
2127 	struct hci_conn *conn;
2128 	__u8 flags = 0;
2129 
2130 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2131 	       ev->link_type);
2132 
2133 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2134 				      &flags);
2135 
2136 	if (!(mask & HCI_LM_ACCEPT)) {
2137 		hci_reject_conn(hdev, &ev->bdaddr);
2138 		return;
2139 	}
2140 
2141 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2142 				   BDADDR_BREDR)) {
2143 		hci_reject_conn(hdev, &ev->bdaddr);
2144 		return;
2145 	}
2146 
2147 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2148 	 * connection. These features are only touched through mgmt so
2149 	 * only do the checks if HCI_MGMT is set.
2150 	 */
2151 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2152 	    !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2153 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2154 				    BDADDR_BREDR)) {
2155 		    hci_reject_conn(hdev, &ev->bdaddr);
2156 		    return;
2157 	}
2158 
2159 	/* Connection accepted */
2160 
2161 	hci_dev_lock(hdev);
2162 
2163 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2164 	if (ie)
2165 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2166 
2167 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2168 			&ev->bdaddr);
2169 	if (!conn) {
2170 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2171 				    HCI_ROLE_SLAVE);
2172 		if (!conn) {
2173 			BT_ERR("No memory for new connection");
2174 			hci_dev_unlock(hdev);
2175 			return;
2176 		}
2177 	}
2178 
2179 	memcpy(conn->dev_class, ev->dev_class, 3);
2180 
2181 	hci_dev_unlock(hdev);
2182 
2183 	if (ev->link_type == ACL_LINK ||
2184 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2185 		struct hci_cp_accept_conn_req cp;
2186 		conn->state = BT_CONNECT;
2187 
2188 		bacpy(&cp.bdaddr, &ev->bdaddr);
2189 
2190 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2191 			cp.role = 0x00; /* Become master */
2192 		else
2193 			cp.role = 0x01; /* Remain slave */
2194 
2195 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2196 	} else if (!(flags & HCI_PROTO_DEFER)) {
2197 		struct hci_cp_accept_sync_conn_req cp;
2198 		conn->state = BT_CONNECT;
2199 
2200 		bacpy(&cp.bdaddr, &ev->bdaddr);
2201 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2202 
2203 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2204 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2205 		cp.max_latency    = cpu_to_le16(0xffff);
2206 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2207 		cp.retrans_effort = 0xff;
2208 
2209 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2210 			     &cp);
2211 	} else {
2212 		conn->state = BT_CONNECT2;
2213 		hci_proto_connect_cfm(conn, 0);
2214 	}
2215 }
2216 
hci_to_mgmt_reason(u8 err)2217 static u8 hci_to_mgmt_reason(u8 err)
2218 {
2219 	switch (err) {
2220 	case HCI_ERROR_CONNECTION_TIMEOUT:
2221 		return MGMT_DEV_DISCONN_TIMEOUT;
2222 	case HCI_ERROR_REMOTE_USER_TERM:
2223 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2224 	case HCI_ERROR_REMOTE_POWER_OFF:
2225 		return MGMT_DEV_DISCONN_REMOTE;
2226 	case HCI_ERROR_LOCAL_HOST_TERM:
2227 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2228 	default:
2229 		return MGMT_DEV_DISCONN_UNKNOWN;
2230 	}
2231 }
2232 
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2233 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2234 {
2235 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2236 	u8 reason = hci_to_mgmt_reason(ev->reason);
2237 	struct hci_conn_params *params;
2238 	struct hci_conn *conn;
2239 	bool mgmt_connected;
2240 	u8 type;
2241 
2242 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2243 
2244 	hci_dev_lock(hdev);
2245 
2246 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2247 	if (!conn)
2248 		goto unlock;
2249 
2250 	if (ev->status) {
2251 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2252 				       conn->dst_type, ev->status);
2253 		goto unlock;
2254 	}
2255 
2256 	conn->state = BT_CLOSED;
2257 
2258 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2259 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2260 				reason, mgmt_connected);
2261 
2262 	if (conn->type == ACL_LINK) {
2263 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2264 			hci_remove_link_key(hdev, &conn->dst);
2265 
2266 		hci_update_page_scan(hdev, NULL);
2267 	}
2268 
2269 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2270 	if (params) {
2271 		switch (params->auto_connect) {
2272 		case HCI_AUTO_CONN_LINK_LOSS:
2273 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2274 				break;
2275 			/* Fall through */
2276 
2277 		case HCI_AUTO_CONN_DIRECT:
2278 		case HCI_AUTO_CONN_ALWAYS:
2279 			list_del_init(&params->action);
2280 			list_add(&params->action, &hdev->pend_le_conns);
2281 			hci_update_background_scan(hdev);
2282 			break;
2283 
2284 		default:
2285 			break;
2286 		}
2287 	}
2288 
2289 	type = conn->type;
2290 
2291 	hci_proto_disconn_cfm(conn, ev->reason);
2292 	hci_conn_del(conn);
2293 
2294 	/* Re-enable advertising if necessary, since it might
2295 	 * have been disabled by the connection. From the
2296 	 * HCI_LE_Set_Advertise_Enable command description in
2297 	 * the core specification (v4.0):
2298 	 * "The Controller shall continue advertising until the Host
2299 	 * issues an LE_Set_Advertise_Enable command with
2300 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2301 	 * or until a connection is created or until the Advertising
2302 	 * is timed out due to Directed Advertising."
2303 	 */
2304 	if (type == LE_LINK)
2305 		mgmt_reenable_advertising(hdev);
2306 
2307 unlock:
2308 	hci_dev_unlock(hdev);
2309 }
2310 
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2311 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2312 {
2313 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2314 	struct hci_conn *conn;
2315 
2316 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2317 
2318 	hci_dev_lock(hdev);
2319 
2320 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2321 	if (!conn)
2322 		goto unlock;
2323 
2324 	if (!ev->status) {
2325 		if (!hci_conn_ssp_enabled(conn) &&
2326 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2327 			BT_INFO("re-auth of legacy device is not possible.");
2328 		} else {
2329 			set_bit(HCI_CONN_AUTH, &conn->flags);
2330 			conn->sec_level = conn->pending_sec_level;
2331 		}
2332 	} else {
2333 		mgmt_auth_failed(conn, ev->status);
2334 	}
2335 
2336 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2337 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2338 
2339 	if (conn->state == BT_CONFIG) {
2340 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2341 			struct hci_cp_set_conn_encrypt cp;
2342 			cp.handle  = ev->handle;
2343 			cp.encrypt = 0x01;
2344 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2345 				     &cp);
2346 		} else {
2347 			conn->state = BT_CONNECTED;
2348 			hci_proto_connect_cfm(conn, ev->status);
2349 			hci_conn_drop(conn);
2350 		}
2351 	} else {
2352 		hci_auth_cfm(conn, ev->status);
2353 
2354 		hci_conn_hold(conn);
2355 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2356 		hci_conn_drop(conn);
2357 	}
2358 
2359 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2360 		if (!ev->status) {
2361 			struct hci_cp_set_conn_encrypt cp;
2362 			cp.handle  = ev->handle;
2363 			cp.encrypt = 0x01;
2364 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2365 				     &cp);
2366 		} else {
2367 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2368 			hci_encrypt_cfm(conn, ev->status, 0x00);
2369 		}
2370 	}
2371 
2372 unlock:
2373 	hci_dev_unlock(hdev);
2374 }
2375 
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2376 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2377 {
2378 	struct hci_ev_remote_name *ev = (void *) skb->data;
2379 	struct hci_conn *conn;
2380 
2381 	BT_DBG("%s", hdev->name);
2382 
2383 	hci_conn_check_pending(hdev);
2384 
2385 	hci_dev_lock(hdev);
2386 
2387 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2388 
2389 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2390 		goto check_auth;
2391 
2392 	if (ev->status == 0)
2393 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2394 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2395 	else
2396 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2397 
2398 check_auth:
2399 	if (!conn)
2400 		goto unlock;
2401 
2402 	if (!hci_outgoing_auth_needed(hdev, conn))
2403 		goto unlock;
2404 
2405 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2406 		struct hci_cp_auth_requested cp;
2407 
2408 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2409 
2410 		cp.handle = __cpu_to_le16(conn->handle);
2411 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2412 	}
2413 
2414 unlock:
2415 	hci_dev_unlock(hdev);
2416 }
2417 
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2418 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2419 {
2420 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2421 	struct hci_conn *conn;
2422 
2423 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2424 
2425 	hci_dev_lock(hdev);
2426 
2427 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2428 	if (!conn)
2429 		goto unlock;
2430 
2431 	if (!ev->status) {
2432 		if (ev->encrypt) {
2433 			/* Encryption implies authentication */
2434 			set_bit(HCI_CONN_AUTH, &conn->flags);
2435 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2436 			conn->sec_level = conn->pending_sec_level;
2437 
2438 			/* P-256 authentication key implies FIPS */
2439 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2440 				set_bit(HCI_CONN_FIPS, &conn->flags);
2441 
2442 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2443 			    conn->type == LE_LINK)
2444 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2445 		} else {
2446 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2447 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2448 		}
2449 	}
2450 
2451 	/* We should disregard the current RPA and generate a new one
2452 	 * whenever the encryption procedure fails.
2453 	 */
2454 	if (ev->status && conn->type == LE_LINK)
2455 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2456 
2457 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2458 
2459 	if (ev->status && conn->state == BT_CONNECTED) {
2460 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2461 		hci_conn_drop(conn);
2462 		goto unlock;
2463 	}
2464 
2465 	if (conn->state == BT_CONFIG) {
2466 		if (!ev->status)
2467 			conn->state = BT_CONNECTED;
2468 
2469 		/* In Secure Connections Only mode, do not allow any
2470 		 * connections that are not encrypted with AES-CCM
2471 		 * using a P-256 authenticated combination key.
2472 		 */
2473 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2474 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2475 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2476 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2477 			hci_conn_drop(conn);
2478 			goto unlock;
2479 		}
2480 
2481 		hci_proto_connect_cfm(conn, ev->status);
2482 		hci_conn_drop(conn);
2483 	} else
2484 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2485 
2486 unlock:
2487 	hci_dev_unlock(hdev);
2488 }
2489 
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2490 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2491 					     struct sk_buff *skb)
2492 {
2493 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2494 	struct hci_conn *conn;
2495 
2496 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2497 
2498 	hci_dev_lock(hdev);
2499 
2500 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2501 	if (conn) {
2502 		if (!ev->status)
2503 			set_bit(HCI_CONN_SECURE, &conn->flags);
2504 
2505 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2506 
2507 		hci_key_change_cfm(conn, ev->status);
2508 	}
2509 
2510 	hci_dev_unlock(hdev);
2511 }
2512 
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2513 static void hci_remote_features_evt(struct hci_dev *hdev,
2514 				    struct sk_buff *skb)
2515 {
2516 	struct hci_ev_remote_features *ev = (void *) skb->data;
2517 	struct hci_conn *conn;
2518 
2519 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2520 
2521 	hci_dev_lock(hdev);
2522 
2523 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2524 	if (!conn)
2525 		goto unlock;
2526 
2527 	if (!ev->status)
2528 		memcpy(conn->features[0], ev->features, 8);
2529 
2530 	if (conn->state != BT_CONFIG)
2531 		goto unlock;
2532 
2533 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2534 		struct hci_cp_read_remote_ext_features cp;
2535 		cp.handle = ev->handle;
2536 		cp.page = 0x01;
2537 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2538 			     sizeof(cp), &cp);
2539 		goto unlock;
2540 	}
2541 
2542 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2543 		struct hci_cp_remote_name_req cp;
2544 		memset(&cp, 0, sizeof(cp));
2545 		bacpy(&cp.bdaddr, &conn->dst);
2546 		cp.pscan_rep_mode = 0x02;
2547 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2548 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2549 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2550 				      conn->dst_type, 0, NULL, 0,
2551 				      conn->dev_class);
2552 
2553 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2554 		conn->state = BT_CONNECTED;
2555 		hci_proto_connect_cfm(conn, ev->status);
2556 		hci_conn_drop(conn);
2557 	}
2558 
2559 unlock:
2560 	hci_dev_unlock(hdev);
2561 }
2562 
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2563 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2564 {
2565 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2566 	u8 status = skb->data[sizeof(*ev)];
2567 	__u16 opcode;
2568 
2569 	skb_pull(skb, sizeof(*ev));
2570 
2571 	opcode = __le16_to_cpu(ev->opcode);
2572 
2573 	switch (opcode) {
2574 	case HCI_OP_INQUIRY_CANCEL:
2575 		hci_cc_inquiry_cancel(hdev, skb);
2576 		break;
2577 
2578 	case HCI_OP_PERIODIC_INQ:
2579 		hci_cc_periodic_inq(hdev, skb);
2580 		break;
2581 
2582 	case HCI_OP_EXIT_PERIODIC_INQ:
2583 		hci_cc_exit_periodic_inq(hdev, skb);
2584 		break;
2585 
2586 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2587 		hci_cc_remote_name_req_cancel(hdev, skb);
2588 		break;
2589 
2590 	case HCI_OP_ROLE_DISCOVERY:
2591 		hci_cc_role_discovery(hdev, skb);
2592 		break;
2593 
2594 	case HCI_OP_READ_LINK_POLICY:
2595 		hci_cc_read_link_policy(hdev, skb);
2596 		break;
2597 
2598 	case HCI_OP_WRITE_LINK_POLICY:
2599 		hci_cc_write_link_policy(hdev, skb);
2600 		break;
2601 
2602 	case HCI_OP_READ_DEF_LINK_POLICY:
2603 		hci_cc_read_def_link_policy(hdev, skb);
2604 		break;
2605 
2606 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2607 		hci_cc_write_def_link_policy(hdev, skb);
2608 		break;
2609 
2610 	case HCI_OP_RESET:
2611 		hci_cc_reset(hdev, skb);
2612 		break;
2613 
2614 	case HCI_OP_WRITE_LOCAL_NAME:
2615 		hci_cc_write_local_name(hdev, skb);
2616 		break;
2617 
2618 	case HCI_OP_READ_LOCAL_NAME:
2619 		hci_cc_read_local_name(hdev, skb);
2620 		break;
2621 
2622 	case HCI_OP_WRITE_AUTH_ENABLE:
2623 		hci_cc_write_auth_enable(hdev, skb);
2624 		break;
2625 
2626 	case HCI_OP_WRITE_ENCRYPT_MODE:
2627 		hci_cc_write_encrypt_mode(hdev, skb);
2628 		break;
2629 
2630 	case HCI_OP_WRITE_SCAN_ENABLE:
2631 		hci_cc_write_scan_enable(hdev, skb);
2632 		break;
2633 
2634 	case HCI_OP_READ_CLASS_OF_DEV:
2635 		hci_cc_read_class_of_dev(hdev, skb);
2636 		break;
2637 
2638 	case HCI_OP_WRITE_CLASS_OF_DEV:
2639 		hci_cc_write_class_of_dev(hdev, skb);
2640 		break;
2641 
2642 	case HCI_OP_READ_VOICE_SETTING:
2643 		hci_cc_read_voice_setting(hdev, skb);
2644 		break;
2645 
2646 	case HCI_OP_WRITE_VOICE_SETTING:
2647 		hci_cc_write_voice_setting(hdev, skb);
2648 		break;
2649 
2650 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2651 		hci_cc_read_num_supported_iac(hdev, skb);
2652 		break;
2653 
2654 	case HCI_OP_WRITE_SSP_MODE:
2655 		hci_cc_write_ssp_mode(hdev, skb);
2656 		break;
2657 
2658 	case HCI_OP_WRITE_SC_SUPPORT:
2659 		hci_cc_write_sc_support(hdev, skb);
2660 		break;
2661 
2662 	case HCI_OP_READ_LOCAL_VERSION:
2663 		hci_cc_read_local_version(hdev, skb);
2664 		break;
2665 
2666 	case HCI_OP_READ_LOCAL_COMMANDS:
2667 		hci_cc_read_local_commands(hdev, skb);
2668 		break;
2669 
2670 	case HCI_OP_READ_LOCAL_FEATURES:
2671 		hci_cc_read_local_features(hdev, skb);
2672 		break;
2673 
2674 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2675 		hci_cc_read_local_ext_features(hdev, skb);
2676 		break;
2677 
2678 	case HCI_OP_READ_BUFFER_SIZE:
2679 		hci_cc_read_buffer_size(hdev, skb);
2680 		break;
2681 
2682 	case HCI_OP_READ_BD_ADDR:
2683 		hci_cc_read_bd_addr(hdev, skb);
2684 		break;
2685 
2686 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2687 		hci_cc_read_page_scan_activity(hdev, skb);
2688 		break;
2689 
2690 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2691 		hci_cc_write_page_scan_activity(hdev, skb);
2692 		break;
2693 
2694 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2695 		hci_cc_read_page_scan_type(hdev, skb);
2696 		break;
2697 
2698 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2699 		hci_cc_write_page_scan_type(hdev, skb);
2700 		break;
2701 
2702 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2703 		hci_cc_read_data_block_size(hdev, skb);
2704 		break;
2705 
2706 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2707 		hci_cc_read_flow_control_mode(hdev, skb);
2708 		break;
2709 
2710 	case HCI_OP_READ_LOCAL_AMP_INFO:
2711 		hci_cc_read_local_amp_info(hdev, skb);
2712 		break;
2713 
2714 	case HCI_OP_READ_CLOCK:
2715 		hci_cc_read_clock(hdev, skb);
2716 		break;
2717 
2718 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2719 		hci_cc_read_local_amp_assoc(hdev, skb);
2720 		break;
2721 
2722 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2723 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2724 		break;
2725 
2726 	case HCI_OP_PIN_CODE_REPLY:
2727 		hci_cc_pin_code_reply(hdev, skb);
2728 		break;
2729 
2730 	case HCI_OP_PIN_CODE_NEG_REPLY:
2731 		hci_cc_pin_code_neg_reply(hdev, skb);
2732 		break;
2733 
2734 	case HCI_OP_READ_LOCAL_OOB_DATA:
2735 		hci_cc_read_local_oob_data(hdev, skb);
2736 		break;
2737 
2738 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2739 		hci_cc_read_local_oob_ext_data(hdev, skb);
2740 		break;
2741 
2742 	case HCI_OP_LE_READ_BUFFER_SIZE:
2743 		hci_cc_le_read_buffer_size(hdev, skb);
2744 		break;
2745 
2746 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2747 		hci_cc_le_read_local_features(hdev, skb);
2748 		break;
2749 
2750 	case HCI_OP_LE_READ_ADV_TX_POWER:
2751 		hci_cc_le_read_adv_tx_power(hdev, skb);
2752 		break;
2753 
2754 	case HCI_OP_USER_CONFIRM_REPLY:
2755 		hci_cc_user_confirm_reply(hdev, skb);
2756 		break;
2757 
2758 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2759 		hci_cc_user_confirm_neg_reply(hdev, skb);
2760 		break;
2761 
2762 	case HCI_OP_USER_PASSKEY_REPLY:
2763 		hci_cc_user_passkey_reply(hdev, skb);
2764 		break;
2765 
2766 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2767 		hci_cc_user_passkey_neg_reply(hdev, skb);
2768 		break;
2769 
2770 	case HCI_OP_LE_SET_RANDOM_ADDR:
2771 		hci_cc_le_set_random_addr(hdev, skb);
2772 		break;
2773 
2774 	case HCI_OP_LE_SET_ADV_ENABLE:
2775 		hci_cc_le_set_adv_enable(hdev, skb);
2776 		break;
2777 
2778 	case HCI_OP_LE_SET_SCAN_PARAM:
2779 		hci_cc_le_set_scan_param(hdev, skb);
2780 		break;
2781 
2782 	case HCI_OP_LE_SET_SCAN_ENABLE:
2783 		hci_cc_le_set_scan_enable(hdev, skb);
2784 		break;
2785 
2786 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2787 		hci_cc_le_read_white_list_size(hdev, skb);
2788 		break;
2789 
2790 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2791 		hci_cc_le_clear_white_list(hdev, skb);
2792 		break;
2793 
2794 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2795 		hci_cc_le_add_to_white_list(hdev, skb);
2796 		break;
2797 
2798 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2799 		hci_cc_le_del_from_white_list(hdev, skb);
2800 		break;
2801 
2802 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2803 		hci_cc_le_read_supported_states(hdev, skb);
2804 		break;
2805 
2806 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2807 		hci_cc_write_le_host_supported(hdev, skb);
2808 		break;
2809 
2810 	case HCI_OP_LE_SET_ADV_PARAM:
2811 		hci_cc_set_adv_param(hdev, skb);
2812 		break;
2813 
2814 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2815 		hci_cc_write_remote_amp_assoc(hdev, skb);
2816 		break;
2817 
2818 	case HCI_OP_READ_RSSI:
2819 		hci_cc_read_rssi(hdev, skb);
2820 		break;
2821 
2822 	case HCI_OP_READ_TX_POWER:
2823 		hci_cc_read_tx_power(hdev, skb);
2824 		break;
2825 
2826 	default:
2827 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2828 		break;
2829 	}
2830 
2831 	if (opcode != HCI_OP_NOP)
2832 		cancel_delayed_work(&hdev->cmd_timer);
2833 
2834 	hci_req_cmd_complete(hdev, opcode, status);
2835 
2836 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2837 		atomic_set(&hdev->cmd_cnt, 1);
2838 		if (!skb_queue_empty(&hdev->cmd_q))
2839 			queue_work(hdev->workqueue, &hdev->cmd_work);
2840 	}
2841 }
2842 
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb)2843 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2844 {
2845 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2846 	__u16 opcode;
2847 
2848 	skb_pull(skb, sizeof(*ev));
2849 
2850 	opcode = __le16_to_cpu(ev->opcode);
2851 
2852 	switch (opcode) {
2853 	case HCI_OP_INQUIRY:
2854 		hci_cs_inquiry(hdev, ev->status);
2855 		break;
2856 
2857 	case HCI_OP_CREATE_CONN:
2858 		hci_cs_create_conn(hdev, ev->status);
2859 		break;
2860 
2861 	case HCI_OP_ADD_SCO:
2862 		hci_cs_add_sco(hdev, ev->status);
2863 		break;
2864 
2865 	case HCI_OP_AUTH_REQUESTED:
2866 		hci_cs_auth_requested(hdev, ev->status);
2867 		break;
2868 
2869 	case HCI_OP_SET_CONN_ENCRYPT:
2870 		hci_cs_set_conn_encrypt(hdev, ev->status);
2871 		break;
2872 
2873 	case HCI_OP_REMOTE_NAME_REQ:
2874 		hci_cs_remote_name_req(hdev, ev->status);
2875 		break;
2876 
2877 	case HCI_OP_READ_REMOTE_FEATURES:
2878 		hci_cs_read_remote_features(hdev, ev->status);
2879 		break;
2880 
2881 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2882 		hci_cs_read_remote_ext_features(hdev, ev->status);
2883 		break;
2884 
2885 	case HCI_OP_SETUP_SYNC_CONN:
2886 		hci_cs_setup_sync_conn(hdev, ev->status);
2887 		break;
2888 
2889 	case HCI_OP_SNIFF_MODE:
2890 		hci_cs_sniff_mode(hdev, ev->status);
2891 		break;
2892 
2893 	case HCI_OP_EXIT_SNIFF_MODE:
2894 		hci_cs_exit_sniff_mode(hdev, ev->status);
2895 		break;
2896 
2897 	case HCI_OP_DISCONNECT:
2898 		hci_cs_disconnect(hdev, ev->status);
2899 		break;
2900 
2901 	case HCI_OP_CREATE_PHY_LINK:
2902 		hci_cs_create_phylink(hdev, ev->status);
2903 		break;
2904 
2905 	case HCI_OP_ACCEPT_PHY_LINK:
2906 		hci_cs_accept_phylink(hdev, ev->status);
2907 		break;
2908 
2909 	case HCI_OP_LE_CREATE_CONN:
2910 		hci_cs_le_create_conn(hdev, ev->status);
2911 		break;
2912 
2913 	case HCI_OP_LE_START_ENC:
2914 		hci_cs_le_start_enc(hdev, ev->status);
2915 		break;
2916 
2917 	default:
2918 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2919 		break;
2920 	}
2921 
2922 	if (opcode != HCI_OP_NOP)
2923 		cancel_delayed_work(&hdev->cmd_timer);
2924 
2925 	if (ev->status ||
2926 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2927 		hci_req_cmd_complete(hdev, opcode, ev->status);
2928 
2929 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2930 		atomic_set(&hdev->cmd_cnt, 1);
2931 		if (!skb_queue_empty(&hdev->cmd_q))
2932 			queue_work(hdev->workqueue, &hdev->cmd_work);
2933 	}
2934 }
2935 
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2936 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2937 {
2938 	struct hci_ev_role_change *ev = (void *) skb->data;
2939 	struct hci_conn *conn;
2940 
2941 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2942 
2943 	hci_dev_lock(hdev);
2944 
2945 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2946 	if (conn) {
2947 		if (!ev->status)
2948 			conn->role = ev->role;
2949 
2950 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2951 
2952 		hci_role_switch_cfm(conn, ev->status, ev->role);
2953 	}
2954 
2955 	hci_dev_unlock(hdev);
2956 }
2957 
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)2958 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2959 {
2960 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2961 	int i;
2962 
2963 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2964 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2965 		return;
2966 	}
2967 
2968 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2969 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2970 		BT_DBG("%s bad parameters", hdev->name);
2971 		return;
2972 	}
2973 
2974 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2975 
2976 	for (i = 0; i < ev->num_hndl; i++) {
2977 		struct hci_comp_pkts_info *info = &ev->handles[i];
2978 		struct hci_conn *conn;
2979 		__u16  handle, count;
2980 
2981 		handle = __le16_to_cpu(info->handle);
2982 		count  = __le16_to_cpu(info->count);
2983 
2984 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2985 		if (!conn)
2986 			continue;
2987 
2988 		conn->sent -= count;
2989 
2990 		switch (conn->type) {
2991 		case ACL_LINK:
2992 			hdev->acl_cnt += count;
2993 			if (hdev->acl_cnt > hdev->acl_pkts)
2994 				hdev->acl_cnt = hdev->acl_pkts;
2995 			break;
2996 
2997 		case LE_LINK:
2998 			if (hdev->le_pkts) {
2999 				hdev->le_cnt += count;
3000 				if (hdev->le_cnt > hdev->le_pkts)
3001 					hdev->le_cnt = hdev->le_pkts;
3002 			} else {
3003 				hdev->acl_cnt += count;
3004 				if (hdev->acl_cnt > hdev->acl_pkts)
3005 					hdev->acl_cnt = hdev->acl_pkts;
3006 			}
3007 			break;
3008 
3009 		case SCO_LINK:
3010 			hdev->sco_cnt += count;
3011 			if (hdev->sco_cnt > hdev->sco_pkts)
3012 				hdev->sco_cnt = hdev->sco_pkts;
3013 			break;
3014 
3015 		default:
3016 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3017 			break;
3018 		}
3019 	}
3020 
3021 	queue_work(hdev->workqueue, &hdev->tx_work);
3022 }
3023 
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3024 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3025 						 __u16 handle)
3026 {
3027 	struct hci_chan *chan;
3028 
3029 	switch (hdev->dev_type) {
3030 	case HCI_BREDR:
3031 		return hci_conn_hash_lookup_handle(hdev, handle);
3032 	case HCI_AMP:
3033 		chan = hci_chan_lookup_handle(hdev, handle);
3034 		if (chan)
3035 			return chan->conn;
3036 		break;
3037 	default:
3038 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3039 		break;
3040 	}
3041 
3042 	return NULL;
3043 }
3044 
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3045 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3046 {
3047 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3048 	int i;
3049 
3050 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3051 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3052 		return;
3053 	}
3054 
3055 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3056 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3057 		BT_DBG("%s bad parameters", hdev->name);
3058 		return;
3059 	}
3060 
3061 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3062 	       ev->num_hndl);
3063 
3064 	for (i = 0; i < ev->num_hndl; i++) {
3065 		struct hci_comp_blocks_info *info = &ev->handles[i];
3066 		struct hci_conn *conn = NULL;
3067 		__u16  handle, block_count;
3068 
3069 		handle = __le16_to_cpu(info->handle);
3070 		block_count = __le16_to_cpu(info->blocks);
3071 
3072 		conn = __hci_conn_lookup_handle(hdev, handle);
3073 		if (!conn)
3074 			continue;
3075 
3076 		conn->sent -= block_count;
3077 
3078 		switch (conn->type) {
3079 		case ACL_LINK:
3080 		case AMP_LINK:
3081 			hdev->block_cnt += block_count;
3082 			if (hdev->block_cnt > hdev->num_blocks)
3083 				hdev->block_cnt = hdev->num_blocks;
3084 			break;
3085 
3086 		default:
3087 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3088 			break;
3089 		}
3090 	}
3091 
3092 	queue_work(hdev->workqueue, &hdev->tx_work);
3093 }
3094 
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3095 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3096 {
3097 	struct hci_ev_mode_change *ev = (void *) skb->data;
3098 	struct hci_conn *conn;
3099 
3100 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3101 
3102 	hci_dev_lock(hdev);
3103 
3104 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3105 	if (conn) {
3106 		conn->mode = ev->mode;
3107 
3108 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3109 					&conn->flags)) {
3110 			if (conn->mode == HCI_CM_ACTIVE)
3111 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3112 			else
3113 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3114 		}
3115 
3116 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3117 			hci_sco_setup(conn, ev->status);
3118 	}
3119 
3120 	hci_dev_unlock(hdev);
3121 }
3122 
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3123 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3124 {
3125 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3126 	struct hci_conn *conn;
3127 
3128 	BT_DBG("%s", hdev->name);
3129 
3130 	hci_dev_lock(hdev);
3131 
3132 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3133 	if (!conn)
3134 		goto unlock;
3135 
3136 	if (conn->state == BT_CONNECTED) {
3137 		hci_conn_hold(conn);
3138 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3139 		hci_conn_drop(conn);
3140 	}
3141 
3142 	if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3143 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3144 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3145 			     sizeof(ev->bdaddr), &ev->bdaddr);
3146 	} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3147 		u8 secure;
3148 
3149 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3150 			secure = 1;
3151 		else
3152 			secure = 0;
3153 
3154 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3155 	}
3156 
3157 unlock:
3158 	hci_dev_unlock(hdev);
3159 }
3160 
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3161 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3162 {
3163 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3164 	struct hci_cp_link_key_reply cp;
3165 	struct hci_conn *conn;
3166 	struct link_key *key;
3167 
3168 	BT_DBG("%s", hdev->name);
3169 
3170 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3171 		return;
3172 
3173 	hci_dev_lock(hdev);
3174 
3175 	key = hci_find_link_key(hdev, &ev->bdaddr);
3176 	if (!key) {
3177 		BT_DBG("%s link key not found for %pMR", hdev->name,
3178 		       &ev->bdaddr);
3179 		goto not_found;
3180 	}
3181 
3182 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3183 	       &ev->bdaddr);
3184 
3185 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3186 	if (conn) {
3187 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3188 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3189 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3190 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3191 			goto not_found;
3192 		}
3193 
3194 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3195 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3196 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3197 			BT_DBG("%s ignoring key unauthenticated for high security",
3198 			       hdev->name);
3199 			goto not_found;
3200 		}
3201 
3202 		conn->key_type = key->type;
3203 		conn->pin_length = key->pin_len;
3204 	}
3205 
3206 	bacpy(&cp.bdaddr, &ev->bdaddr);
3207 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3208 
3209 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3210 
3211 	hci_dev_unlock(hdev);
3212 
3213 	return;
3214 
3215 not_found:
3216 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3217 	hci_dev_unlock(hdev);
3218 }
3219 
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3220 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3221 {
3222 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3223 	struct hci_conn *conn;
3224 	struct link_key *key;
3225 	bool persistent;
3226 	u8 pin_len = 0;
3227 
3228 	BT_DBG("%s", hdev->name);
3229 
3230 	hci_dev_lock(hdev);
3231 
3232 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3233 	if (conn) {
3234 		hci_conn_hold(conn);
3235 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3236 		pin_len = conn->pin_length;
3237 
3238 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3239 			conn->key_type = ev->key_type;
3240 
3241 		hci_conn_drop(conn);
3242 	}
3243 
3244 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3245 		goto unlock;
3246 
3247 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3248 			        ev->key_type, pin_len, &persistent);
3249 	if (!key)
3250 		goto unlock;
3251 
3252 	mgmt_new_link_key(hdev, key, persistent);
3253 
3254 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3255 	 * is set. If it's not set simply remove the key from the kernel
3256 	 * list (we've still notified user space about it but with
3257 	 * store_hint being 0).
3258 	 */
3259 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3260 	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3261 		list_del(&key->list);
3262 		kfree(key);
3263 	} else if (conn) {
3264 		if (persistent)
3265 			clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3266 		else
3267 			set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3268 	}
3269 
3270 unlock:
3271 	hci_dev_unlock(hdev);
3272 }
3273 
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)3274 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3275 {
3276 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3277 	struct hci_conn *conn;
3278 
3279 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3280 
3281 	hci_dev_lock(hdev);
3282 
3283 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3284 	if (conn && !ev->status) {
3285 		struct inquiry_entry *ie;
3286 
3287 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3288 		if (ie) {
3289 			ie->data.clock_offset = ev->clock_offset;
3290 			ie->timestamp = jiffies;
3291 		}
3292 	}
3293 
3294 	hci_dev_unlock(hdev);
3295 }
3296 
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3297 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3298 {
3299 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3300 	struct hci_conn *conn;
3301 
3302 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3303 
3304 	hci_dev_lock(hdev);
3305 
3306 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3307 	if (conn && !ev->status)
3308 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3309 
3310 	hci_dev_unlock(hdev);
3311 }
3312 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)3313 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3314 {
3315 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3316 	struct inquiry_entry *ie;
3317 
3318 	BT_DBG("%s", hdev->name);
3319 
3320 	hci_dev_lock(hdev);
3321 
3322 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3323 	if (ie) {
3324 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3325 		ie->timestamp = jiffies;
3326 	}
3327 
3328 	hci_dev_unlock(hdev);
3329 }
3330 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)3331 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3332 					     struct sk_buff *skb)
3333 {
3334 	struct inquiry_data data;
3335 	int num_rsp = *((__u8 *) skb->data);
3336 
3337 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3338 
3339 	if (!num_rsp)
3340 		return;
3341 
3342 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3343 		return;
3344 
3345 	hci_dev_lock(hdev);
3346 
3347 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3348 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3349 		info = (void *) (skb->data + 1);
3350 
3351 		for (; num_rsp; num_rsp--, info++) {
3352 			u32 flags;
3353 
3354 			bacpy(&data.bdaddr, &info->bdaddr);
3355 			data.pscan_rep_mode	= info->pscan_rep_mode;
3356 			data.pscan_period_mode	= info->pscan_period_mode;
3357 			data.pscan_mode		= info->pscan_mode;
3358 			memcpy(data.dev_class, info->dev_class, 3);
3359 			data.clock_offset	= info->clock_offset;
3360 			data.rssi		= info->rssi;
3361 			data.ssp_mode		= 0x00;
3362 
3363 			flags = hci_inquiry_cache_update(hdev, &data, false);
3364 
3365 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3366 					  info->dev_class, info->rssi,
3367 					  flags, NULL, 0, NULL, 0);
3368 		}
3369 	} else {
3370 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3371 
3372 		for (; num_rsp; num_rsp--, info++) {
3373 			u32 flags;
3374 
3375 			bacpy(&data.bdaddr, &info->bdaddr);
3376 			data.pscan_rep_mode	= info->pscan_rep_mode;
3377 			data.pscan_period_mode	= info->pscan_period_mode;
3378 			data.pscan_mode		= 0x00;
3379 			memcpy(data.dev_class, info->dev_class, 3);
3380 			data.clock_offset	= info->clock_offset;
3381 			data.rssi		= info->rssi;
3382 			data.ssp_mode		= 0x00;
3383 
3384 			flags = hci_inquiry_cache_update(hdev, &data, false);
3385 
3386 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3387 					  info->dev_class, info->rssi,
3388 					  flags, NULL, 0, NULL, 0);
3389 		}
3390 	}
3391 
3392 	hci_dev_unlock(hdev);
3393 }
3394 
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3395 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3396 					struct sk_buff *skb)
3397 {
3398 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3399 	struct hci_conn *conn;
3400 
3401 	BT_DBG("%s", hdev->name);
3402 
3403 	hci_dev_lock(hdev);
3404 
3405 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3406 	if (!conn)
3407 		goto unlock;
3408 
3409 	if (ev->page < HCI_MAX_PAGES)
3410 		memcpy(conn->features[ev->page], ev->features, 8);
3411 
3412 	if (!ev->status && ev->page == 0x01) {
3413 		struct inquiry_entry *ie;
3414 
3415 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3416 		if (ie)
3417 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3418 
3419 		if (ev->features[0] & LMP_HOST_SSP) {
3420 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3421 		} else {
3422 			/* It is mandatory by the Bluetooth specification that
3423 			 * Extended Inquiry Results are only used when Secure
3424 			 * Simple Pairing is enabled, but some devices violate
3425 			 * this.
3426 			 *
3427 			 * To make these devices work, the internal SSP
3428 			 * enabled flag needs to be cleared if the remote host
3429 			 * features do not indicate SSP support */
3430 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3431 		}
3432 
3433 		if (ev->features[0] & LMP_HOST_SC)
3434 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3435 	}
3436 
3437 	if (conn->state != BT_CONFIG)
3438 		goto unlock;
3439 
3440 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3441 		struct hci_cp_remote_name_req cp;
3442 		memset(&cp, 0, sizeof(cp));
3443 		bacpy(&cp.bdaddr, &conn->dst);
3444 		cp.pscan_rep_mode = 0x02;
3445 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3446 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3447 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3448 				      conn->dst_type, 0, NULL, 0,
3449 				      conn->dev_class);
3450 
3451 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3452 		conn->state = BT_CONNECTED;
3453 		hci_proto_connect_cfm(conn, ev->status);
3454 		hci_conn_drop(conn);
3455 	}
3456 
3457 unlock:
3458 	hci_dev_unlock(hdev);
3459 }
3460 
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3461 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3462 				       struct sk_buff *skb)
3463 {
3464 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3465 	struct hci_conn *conn;
3466 
3467 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3468 
3469 	hci_dev_lock(hdev);
3470 
3471 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3472 	if (!conn) {
3473 		if (ev->link_type == ESCO_LINK)
3474 			goto unlock;
3475 
3476 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3477 		if (!conn)
3478 			goto unlock;
3479 
3480 		conn->type = SCO_LINK;
3481 	}
3482 
3483 	switch (ev->status) {
3484 	case 0x00:
3485 		conn->handle = __le16_to_cpu(ev->handle);
3486 		conn->state  = BT_CONNECTED;
3487 
3488 		hci_conn_add_sysfs(conn);
3489 		break;
3490 
3491 	case 0x10:	/* Connection Accept Timeout */
3492 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3493 	case 0x11:	/* Unsupported Feature or Parameter Value */
3494 	case 0x1c:	/* SCO interval rejected */
3495 	case 0x1a:	/* Unsupported Remote Feature */
3496 	case 0x1f:	/* Unspecified error */
3497 	case 0x20:	/* Unsupported LMP Parameter value */
3498 		if (conn->out) {
3499 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3500 					(hdev->esco_type & EDR_ESCO_MASK);
3501 			if (hci_setup_sync(conn, conn->link->handle))
3502 				goto unlock;
3503 		}
3504 		/* fall through */
3505 
3506 	default:
3507 		conn->state = BT_CLOSED;
3508 		break;
3509 	}
3510 
3511 	hci_proto_connect_cfm(conn, ev->status);
3512 	if (ev->status)
3513 		hci_conn_del(conn);
3514 
3515 unlock:
3516 	hci_dev_unlock(hdev);
3517 }
3518 
eir_get_length(u8 * eir,size_t eir_len)3519 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3520 {
3521 	size_t parsed = 0;
3522 
3523 	while (parsed < eir_len) {
3524 		u8 field_len = eir[0];
3525 
3526 		if (field_len == 0)
3527 			return parsed;
3528 
3529 		parsed += field_len + 1;
3530 		eir += field_len + 1;
3531 	}
3532 
3533 	return eir_len;
3534 }
3535 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)3536 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3537 					    struct sk_buff *skb)
3538 {
3539 	struct inquiry_data data;
3540 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3541 	int num_rsp = *((__u8 *) skb->data);
3542 	size_t eir_len;
3543 
3544 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3545 
3546 	if (!num_rsp)
3547 		return;
3548 
3549 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3550 		return;
3551 
3552 	hci_dev_lock(hdev);
3553 
3554 	for (; num_rsp; num_rsp--, info++) {
3555 		u32 flags;
3556 		bool name_known;
3557 
3558 		bacpy(&data.bdaddr, &info->bdaddr);
3559 		data.pscan_rep_mode	= info->pscan_rep_mode;
3560 		data.pscan_period_mode	= info->pscan_period_mode;
3561 		data.pscan_mode		= 0x00;
3562 		memcpy(data.dev_class, info->dev_class, 3);
3563 		data.clock_offset	= info->clock_offset;
3564 		data.rssi		= info->rssi;
3565 		data.ssp_mode		= 0x01;
3566 
3567 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3568 			name_known = eir_has_data_type(info->data,
3569 						       sizeof(info->data),
3570 						       EIR_NAME_COMPLETE);
3571 		else
3572 			name_known = true;
3573 
3574 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3575 
3576 		eir_len = eir_get_length(info->data, sizeof(info->data));
3577 
3578 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3579 				  info->dev_class, info->rssi,
3580 				  flags, info->data, eir_len, NULL, 0);
3581 	}
3582 
3583 	hci_dev_unlock(hdev);
3584 }
3585 
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3586 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3587 					 struct sk_buff *skb)
3588 {
3589 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3590 	struct hci_conn *conn;
3591 
3592 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3593 	       __le16_to_cpu(ev->handle));
3594 
3595 	hci_dev_lock(hdev);
3596 
3597 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3598 	if (!conn)
3599 		goto unlock;
3600 
3601 	/* For BR/EDR the necessary steps are taken through the
3602 	 * auth_complete event.
3603 	 */
3604 	if (conn->type != LE_LINK)
3605 		goto unlock;
3606 
3607 	if (!ev->status)
3608 		conn->sec_level = conn->pending_sec_level;
3609 
3610 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3611 
3612 	if (ev->status && conn->state == BT_CONNECTED) {
3613 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3614 		hci_conn_drop(conn);
3615 		goto unlock;
3616 	}
3617 
3618 	if (conn->state == BT_CONFIG) {
3619 		if (!ev->status)
3620 			conn->state = BT_CONNECTED;
3621 
3622 		hci_proto_connect_cfm(conn, ev->status);
3623 		hci_conn_drop(conn);
3624 	} else {
3625 		hci_auth_cfm(conn, ev->status);
3626 
3627 		hci_conn_hold(conn);
3628 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3629 		hci_conn_drop(conn);
3630 	}
3631 
3632 unlock:
3633 	hci_dev_unlock(hdev);
3634 }
3635 
hci_get_auth_req(struct hci_conn * conn)3636 static u8 hci_get_auth_req(struct hci_conn *conn)
3637 {
3638 	/* If remote requests no-bonding follow that lead */
3639 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3640 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3641 		return conn->remote_auth | (conn->auth_type & 0x01);
3642 
3643 	/* If both remote and local have enough IO capabilities, require
3644 	 * MITM protection
3645 	 */
3646 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3647 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3648 		return conn->remote_auth | 0x01;
3649 
3650 	/* No MITM protection possible so ignore remote requirement */
3651 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3652 }
3653 
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3654 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3655 {
3656 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3657 	struct hci_conn *conn;
3658 
3659 	BT_DBG("%s", hdev->name);
3660 
3661 	hci_dev_lock(hdev);
3662 
3663 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3664 	if (!conn)
3665 		goto unlock;
3666 
3667 	hci_conn_hold(conn);
3668 
3669 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3670 		goto unlock;
3671 
3672 	/* Allow pairing if we're pairable, the initiators of the
3673 	 * pairing or if the remote is not requesting bonding.
3674 	 */
3675 	if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3676 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3677 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3678 		struct hci_cp_io_capability_reply cp;
3679 
3680 		bacpy(&cp.bdaddr, &ev->bdaddr);
3681 		/* Change the IO capability from KeyboardDisplay
3682 		 * to DisplayYesNo as it is not supported by BT spec. */
3683 		cp.capability = (conn->io_capability == 0x04) ?
3684 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3685 
3686 		/* If we are initiators, there is no remote information yet */
3687 		if (conn->remote_auth == 0xff) {
3688 			/* Request MITM protection if our IO caps allow it
3689 			 * except for the no-bonding case.
3690 			 */
3691 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3692 			    conn->auth_type != HCI_AT_NO_BONDING)
3693 				conn->auth_type |= 0x01;
3694 		} else {
3695 			conn->auth_type = hci_get_auth_req(conn);
3696 		}
3697 
3698 		/* If we're not bondable, force one of the non-bondable
3699 		 * authentication requirement values.
3700 		 */
3701 		if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3702 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3703 
3704 		cp.authentication = conn->auth_type;
3705 
3706 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3707 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3708 			cp.oob_data = 0x01;
3709 		else
3710 			cp.oob_data = 0x00;
3711 
3712 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3713 			     sizeof(cp), &cp);
3714 	} else {
3715 		struct hci_cp_io_capability_neg_reply cp;
3716 
3717 		bacpy(&cp.bdaddr, &ev->bdaddr);
3718 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3719 
3720 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3721 			     sizeof(cp), &cp);
3722 	}
3723 
3724 unlock:
3725 	hci_dev_unlock(hdev);
3726 }
3727 
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)3728 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3729 {
3730 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3731 	struct hci_conn *conn;
3732 
3733 	BT_DBG("%s", hdev->name);
3734 
3735 	hci_dev_lock(hdev);
3736 
3737 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3738 	if (!conn)
3739 		goto unlock;
3740 
3741 	conn->remote_cap = ev->capability;
3742 	conn->remote_auth = ev->authentication;
3743 	if (ev->oob_data)
3744 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3745 
3746 unlock:
3747 	hci_dev_unlock(hdev);
3748 }
3749 
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3750 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3751 					 struct sk_buff *skb)
3752 {
3753 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3754 	int loc_mitm, rem_mitm, confirm_hint = 0;
3755 	struct hci_conn *conn;
3756 
3757 	BT_DBG("%s", hdev->name);
3758 
3759 	hci_dev_lock(hdev);
3760 
3761 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3762 		goto unlock;
3763 
3764 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3765 	if (!conn)
3766 		goto unlock;
3767 
3768 	loc_mitm = (conn->auth_type & 0x01);
3769 	rem_mitm = (conn->remote_auth & 0x01);
3770 
3771 	/* If we require MITM but the remote device can't provide that
3772 	 * (it has NoInputNoOutput) then reject the confirmation
3773 	 * request. We check the security level here since it doesn't
3774 	 * necessarily match conn->auth_type.
3775 	 */
3776 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3777 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3778 		BT_DBG("Rejecting request: remote device can't provide MITM");
3779 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3780 			     sizeof(ev->bdaddr), &ev->bdaddr);
3781 		goto unlock;
3782 	}
3783 
3784 	/* If no side requires MITM protection; auto-accept */
3785 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3786 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3787 
3788 		/* If we're not the initiators request authorization to
3789 		 * proceed from user space (mgmt_user_confirm with
3790 		 * confirm_hint set to 1). The exception is if neither
3791 		 * side had MITM or if the local IO capability is
3792 		 * NoInputNoOutput, in which case we do auto-accept
3793 		 */
3794 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3795 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3796 		    (loc_mitm || rem_mitm)) {
3797 			BT_DBG("Confirming auto-accept as acceptor");
3798 			confirm_hint = 1;
3799 			goto confirm;
3800 		}
3801 
3802 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3803 		       hdev->auto_accept_delay);
3804 
3805 		if (hdev->auto_accept_delay > 0) {
3806 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3807 			queue_delayed_work(conn->hdev->workqueue,
3808 					   &conn->auto_accept_work, delay);
3809 			goto unlock;
3810 		}
3811 
3812 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3813 			     sizeof(ev->bdaddr), &ev->bdaddr);
3814 		goto unlock;
3815 	}
3816 
3817 confirm:
3818 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3819 				  le32_to_cpu(ev->passkey), confirm_hint);
3820 
3821 unlock:
3822 	hci_dev_unlock(hdev);
3823 }
3824 
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3825 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3826 					 struct sk_buff *skb)
3827 {
3828 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3829 
3830 	BT_DBG("%s", hdev->name);
3831 
3832 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3833 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3834 }
3835 
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3836 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3837 					struct sk_buff *skb)
3838 {
3839 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3840 	struct hci_conn *conn;
3841 
3842 	BT_DBG("%s", hdev->name);
3843 
3844 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3845 	if (!conn)
3846 		return;
3847 
3848 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3849 	conn->passkey_entered = 0;
3850 
3851 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3852 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3853 					 conn->dst_type, conn->passkey_notify,
3854 					 conn->passkey_entered);
3855 }
3856 
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3857 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3858 {
3859 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3860 	struct hci_conn *conn;
3861 
3862 	BT_DBG("%s", hdev->name);
3863 
3864 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3865 	if (!conn)
3866 		return;
3867 
3868 	switch (ev->type) {
3869 	case HCI_KEYPRESS_STARTED:
3870 		conn->passkey_entered = 0;
3871 		return;
3872 
3873 	case HCI_KEYPRESS_ENTERED:
3874 		conn->passkey_entered++;
3875 		break;
3876 
3877 	case HCI_KEYPRESS_ERASED:
3878 		conn->passkey_entered--;
3879 		break;
3880 
3881 	case HCI_KEYPRESS_CLEARED:
3882 		conn->passkey_entered = 0;
3883 		break;
3884 
3885 	case HCI_KEYPRESS_COMPLETED:
3886 		return;
3887 	}
3888 
3889 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3890 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3891 					 conn->dst_type, conn->passkey_notify,
3892 					 conn->passkey_entered);
3893 }
3894 
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3895 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3896 					 struct sk_buff *skb)
3897 {
3898 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3899 	struct hci_conn *conn;
3900 
3901 	BT_DBG("%s", hdev->name);
3902 
3903 	hci_dev_lock(hdev);
3904 
3905 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3906 	if (!conn)
3907 		goto unlock;
3908 
3909 	/* Reset the authentication requirement to unknown */
3910 	conn->remote_auth = 0xff;
3911 
3912 	/* To avoid duplicate auth_failed events to user space we check
3913 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3914 	 * initiated the authentication. A traditional auth_complete
3915 	 * event gets always produced as initiator and is also mapped to
3916 	 * the mgmt_auth_failed event */
3917 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3918 		mgmt_auth_failed(conn, ev->status);
3919 
3920 	hci_conn_drop(conn);
3921 
3922 unlock:
3923 	hci_dev_unlock(hdev);
3924 }
3925 
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3926 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3927 					 struct sk_buff *skb)
3928 {
3929 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3930 	struct inquiry_entry *ie;
3931 	struct hci_conn *conn;
3932 
3933 	BT_DBG("%s", hdev->name);
3934 
3935 	hci_dev_lock(hdev);
3936 
3937 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3938 	if (conn)
3939 		memcpy(conn->features[1], ev->features, 8);
3940 
3941 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3942 	if (ie)
3943 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3944 
3945 	hci_dev_unlock(hdev);
3946 }
3947 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3948 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3949 					    struct sk_buff *skb)
3950 {
3951 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3952 	struct oob_data *data;
3953 
3954 	BT_DBG("%s", hdev->name);
3955 
3956 	hci_dev_lock(hdev);
3957 
3958 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3959 		goto unlock;
3960 
3961 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3962 	if (data) {
3963 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3964 			struct hci_cp_remote_oob_ext_data_reply cp;
3965 
3966 			bacpy(&cp.bdaddr, &ev->bdaddr);
3967 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3968 			memcpy(cp.randomizer192, data->randomizer192,
3969 			       sizeof(cp.randomizer192));
3970 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3971 			memcpy(cp.randomizer256, data->randomizer256,
3972 			       sizeof(cp.randomizer256));
3973 
3974 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3975 				     sizeof(cp), &cp);
3976 		} else {
3977 			struct hci_cp_remote_oob_data_reply cp;
3978 
3979 			bacpy(&cp.bdaddr, &ev->bdaddr);
3980 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3981 			memcpy(cp.randomizer, data->randomizer192,
3982 			       sizeof(cp.randomizer));
3983 
3984 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3985 				     sizeof(cp), &cp);
3986 		}
3987 	} else {
3988 		struct hci_cp_remote_oob_data_neg_reply cp;
3989 
3990 		bacpy(&cp.bdaddr, &ev->bdaddr);
3991 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3992 			     sizeof(cp), &cp);
3993 	}
3994 
3995 unlock:
3996 	hci_dev_unlock(hdev);
3997 }
3998 
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3999 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4000 				      struct sk_buff *skb)
4001 {
4002 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4003 	struct hci_conn *hcon, *bredr_hcon;
4004 
4005 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4006 	       ev->status);
4007 
4008 	hci_dev_lock(hdev);
4009 
4010 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4011 	if (!hcon) {
4012 		hci_dev_unlock(hdev);
4013 		return;
4014 	}
4015 
4016 	if (ev->status) {
4017 		hci_conn_del(hcon);
4018 		hci_dev_unlock(hdev);
4019 		return;
4020 	}
4021 
4022 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4023 
4024 	hcon->state = BT_CONNECTED;
4025 	bacpy(&hcon->dst, &bredr_hcon->dst);
4026 
4027 	hci_conn_hold(hcon);
4028 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4029 	hci_conn_drop(hcon);
4030 
4031 	hci_conn_add_sysfs(hcon);
4032 
4033 	amp_physical_cfm(bredr_hcon, hcon);
4034 
4035 	hci_dev_unlock(hdev);
4036 }
4037 
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4038 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4039 {
4040 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4041 	struct hci_conn *hcon;
4042 	struct hci_chan *hchan;
4043 	struct amp_mgr *mgr;
4044 
4045 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4046 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4047 	       ev->status);
4048 
4049 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4050 	if (!hcon)
4051 		return;
4052 
4053 	/* Create AMP hchan */
4054 	hchan = hci_chan_create(hcon);
4055 	if (!hchan)
4056 		return;
4057 
4058 	hchan->handle = le16_to_cpu(ev->handle);
4059 
4060 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4061 
4062 	mgr = hcon->amp_mgr;
4063 	if (mgr && mgr->bredr_chan) {
4064 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4065 
4066 		l2cap_chan_lock(bredr_chan);
4067 
4068 		bredr_chan->conn->mtu = hdev->block_mtu;
4069 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4070 		hci_conn_hold(hcon);
4071 
4072 		l2cap_chan_unlock(bredr_chan);
4073 	}
4074 }
4075 
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4076 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4077 					     struct sk_buff *skb)
4078 {
4079 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4080 	struct hci_chan *hchan;
4081 
4082 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4083 	       le16_to_cpu(ev->handle), ev->status);
4084 
4085 	if (ev->status)
4086 		return;
4087 
4088 	hci_dev_lock(hdev);
4089 
4090 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4091 	if (!hchan)
4092 		goto unlock;
4093 
4094 	amp_destroy_logical_link(hchan, ev->reason);
4095 
4096 unlock:
4097 	hci_dev_unlock(hdev);
4098 }
4099 
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4100 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4101 					     struct sk_buff *skb)
4102 {
4103 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4104 	struct hci_conn *hcon;
4105 
4106 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4107 
4108 	if (ev->status)
4109 		return;
4110 
4111 	hci_dev_lock(hdev);
4112 
4113 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4114 	if (hcon) {
4115 		hcon->state = BT_CLOSED;
4116 		hci_conn_del(hcon);
4117 	}
4118 
4119 	hci_dev_unlock(hdev);
4120 }
4121 
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4122 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4123 {
4124 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4125 	struct hci_conn_params *params;
4126 	struct hci_conn *conn;
4127 	struct smp_irk *irk;
4128 	u8 addr_type;
4129 
4130 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4131 
4132 	hci_dev_lock(hdev);
4133 
4134 	/* All controllers implicitly stop advertising in the event of a
4135 	 * connection, so ensure that the state bit is cleared.
4136 	 */
4137 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4138 
4139 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4140 	if (!conn) {
4141 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4142 		if (!conn) {
4143 			BT_ERR("No memory for new connection");
4144 			goto unlock;
4145 		}
4146 
4147 		conn->dst_type = ev->bdaddr_type;
4148 
4149 		/* If we didn't have a hci_conn object previously
4150 		 * but we're in master role this must be something
4151 		 * initiated using a white list. Since white list based
4152 		 * connections are not "first class citizens" we don't
4153 		 * have full tracking of them. Therefore, we go ahead
4154 		 * with a "best effort" approach of determining the
4155 		 * initiator address based on the HCI_PRIVACY flag.
4156 		 */
4157 		if (conn->out) {
4158 			conn->resp_addr_type = ev->bdaddr_type;
4159 			bacpy(&conn->resp_addr, &ev->bdaddr);
4160 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4161 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4162 				bacpy(&conn->init_addr, &hdev->rpa);
4163 			} else {
4164 				hci_copy_identity_address(hdev,
4165 							  &conn->init_addr,
4166 							  &conn->init_addr_type);
4167 			}
4168 		}
4169 	} else {
4170 		cancel_delayed_work(&conn->le_conn_timeout);
4171 	}
4172 
4173 	if (!conn->out) {
4174 		/* Set the responder (our side) address type based on
4175 		 * the advertising address type.
4176 		 */
4177 		conn->resp_addr_type = hdev->adv_addr_type;
4178 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4179 			bacpy(&conn->resp_addr, &hdev->random_addr);
4180 		else
4181 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4182 
4183 		conn->init_addr_type = ev->bdaddr_type;
4184 		bacpy(&conn->init_addr, &ev->bdaddr);
4185 
4186 		/* For incoming connections, set the default minimum
4187 		 * and maximum connection interval. They will be used
4188 		 * to check if the parameters are in range and if not
4189 		 * trigger the connection update procedure.
4190 		 */
4191 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4192 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4193 	}
4194 
4195 	/* Lookup the identity address from the stored connection
4196 	 * address and address type.
4197 	 *
4198 	 * When establishing connections to an identity address, the
4199 	 * connection procedure will store the resolvable random
4200 	 * address first. Now if it can be converted back into the
4201 	 * identity address, start using the identity address from
4202 	 * now on.
4203 	 */
4204 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4205 	if (irk) {
4206 		bacpy(&conn->dst, &irk->bdaddr);
4207 		conn->dst_type = irk->addr_type;
4208 	}
4209 
4210 	if (ev->status) {
4211 		hci_le_conn_failed(conn, ev->status);
4212 		goto unlock;
4213 	}
4214 
4215 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4216 		addr_type = BDADDR_LE_PUBLIC;
4217 	else
4218 		addr_type = BDADDR_LE_RANDOM;
4219 
4220 	/* Drop the connection if the device is blocked */
4221 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4222 		hci_conn_drop(conn);
4223 		goto unlock;
4224 	}
4225 
4226 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4227 		mgmt_device_connected(hdev, &conn->dst, conn->type,
4228 				      conn->dst_type, 0, NULL, 0, NULL);
4229 
4230 	conn->sec_level = BT_SECURITY_LOW;
4231 	conn->handle = __le16_to_cpu(ev->handle);
4232 	conn->state = BT_CONNECTED;
4233 
4234 	conn->le_conn_interval = le16_to_cpu(ev->interval);
4235 	conn->le_conn_latency = le16_to_cpu(ev->latency);
4236 	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4237 
4238 	hci_conn_add_sysfs(conn);
4239 
4240 	hci_proto_connect_cfm(conn, ev->status);
4241 
4242 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4243 					   conn->dst_type);
4244 	if (params) {
4245 		list_del_init(&params->action);
4246 		if (params->conn) {
4247 			hci_conn_drop(params->conn);
4248 			hci_conn_put(params->conn);
4249 			params->conn = NULL;
4250 		}
4251 	}
4252 
4253 unlock:
4254 	hci_update_background_scan(hdev);
4255 	hci_dev_unlock(hdev);
4256 }
4257 
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4258 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4259 					    struct sk_buff *skb)
4260 {
4261 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4262 	struct hci_conn *conn;
4263 
4264 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4265 
4266 	if (ev->status)
4267 		return;
4268 
4269 	hci_dev_lock(hdev);
4270 
4271 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4272 	if (conn) {
4273 		conn->le_conn_interval = le16_to_cpu(ev->interval);
4274 		conn->le_conn_latency = le16_to_cpu(ev->latency);
4275 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4276 	}
4277 
4278 	hci_dev_unlock(hdev);
4279 }
4280 
4281 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type)4282 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4283 				  u8 addr_type, u8 adv_type)
4284 {
4285 	struct hci_conn *conn;
4286 	struct hci_conn_params *params;
4287 
4288 	/* If the event is not connectable don't proceed further */
4289 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4290 		return;
4291 
4292 	/* Ignore if the device is blocked */
4293 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4294 		return;
4295 
4296 	/* Most controller will fail if we try to create new connections
4297 	 * while we have an existing one in slave role.
4298 	 */
4299 	if (hdev->conn_hash.le_num_slave > 0)
4300 		return;
4301 
4302 	/* If we're not connectable only connect devices that we have in
4303 	 * our pend_le_conns list.
4304 	 */
4305 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4306 					   addr, addr_type);
4307 	if (!params)
4308 		return;
4309 
4310 	switch (params->auto_connect) {
4311 	case HCI_AUTO_CONN_DIRECT:
4312 		/* Only devices advertising with ADV_DIRECT_IND are
4313 		 * triggering a connection attempt. This is allowing
4314 		 * incoming connections from slave devices.
4315 		 */
4316 		if (adv_type != LE_ADV_DIRECT_IND)
4317 			return;
4318 		break;
4319 	case HCI_AUTO_CONN_ALWAYS:
4320 		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4321 		 * are triggering a connection attempt. This means
4322 		 * that incoming connectioms from slave device are
4323 		 * accepted and also outgoing connections to slave
4324 		 * devices are established when found.
4325 		 */
4326 		break;
4327 	default:
4328 		return;
4329 	}
4330 
4331 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4332 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4333 	if (!IS_ERR(conn)) {
4334 		/* Store the pointer since we don't really have any
4335 		 * other owner of the object besides the params that
4336 		 * triggered it. This way we can abort the connection if
4337 		 * the parameters get removed and keep the reference
4338 		 * count consistent once the connection is established.
4339 		 */
4340 		params->conn = hci_conn_get(conn);
4341 		return;
4342 	}
4343 
4344 	switch (PTR_ERR(conn)) {
4345 	case -EBUSY:
4346 		/* If hci_connect() returns -EBUSY it means there is already
4347 		 * an LE connection attempt going on. Since controllers don't
4348 		 * support more than one connection attempt at the time, we
4349 		 * don't consider this an error case.
4350 		 */
4351 		break;
4352 	default:
4353 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4354 	}
4355 }
4356 
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u8 * data,u8 len)4357 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4358 			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4359 {
4360 	struct discovery_state *d = &hdev->discovery;
4361 	struct smp_irk *irk;
4362 	bool match;
4363 	u32 flags;
4364 
4365 	/* Check if we need to convert to identity address */
4366 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4367 	if (irk) {
4368 		bdaddr = &irk->bdaddr;
4369 		bdaddr_type = irk->addr_type;
4370 	}
4371 
4372 	/* Check if we have been requested to connect to this device */
4373 	check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4374 
4375 	/* Passive scanning shouldn't trigger any device found events,
4376 	 * except for devices marked as CONN_REPORT for which we do send
4377 	 * device found events.
4378 	 */
4379 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4380 		if (type == LE_ADV_DIRECT_IND)
4381 			return;
4382 
4383 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4384 					       bdaddr, bdaddr_type))
4385 			return;
4386 
4387 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4388 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4389 		else
4390 			flags = 0;
4391 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4392 				  rssi, flags, data, len, NULL, 0);
4393 		return;
4394 	}
4395 
4396 	/* When receiving non-connectable or scannable undirected
4397 	 * advertising reports, this means that the remote device is
4398 	 * not connectable and then clearly indicate this in the
4399 	 * device found event.
4400 	 *
4401 	 * When receiving a scan response, then there is no way to
4402 	 * know if the remote device is connectable or not. However
4403 	 * since scan responses are merged with a previously seen
4404 	 * advertising report, the flags field from that report
4405 	 * will be used.
4406 	 *
4407 	 * In the really unlikely case that a controller get confused
4408 	 * and just sends a scan response event, then it is marked as
4409 	 * not connectable as well.
4410 	 */
4411 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4412 	    type == LE_ADV_SCAN_RSP)
4413 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4414 	else
4415 		flags = 0;
4416 
4417 	/* If there's nothing pending either store the data from this
4418 	 * event or send an immediate device found event if the data
4419 	 * should not be stored for later.
4420 	 */
4421 	if (!has_pending_adv_report(hdev)) {
4422 		/* If the report will trigger a SCAN_REQ store it for
4423 		 * later merging.
4424 		 */
4425 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4426 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4427 						 rssi, flags, data, len);
4428 			return;
4429 		}
4430 
4431 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4432 				  rssi, flags, data, len, NULL, 0);
4433 		return;
4434 	}
4435 
4436 	/* Check if the pending report is for the same device as the new one */
4437 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4438 		 bdaddr_type == d->last_adv_addr_type);
4439 
4440 	/* If the pending data doesn't match this report or this isn't a
4441 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4442 	 * sending of the pending data.
4443 	 */
4444 	if (type != LE_ADV_SCAN_RSP || !match) {
4445 		/* Send out whatever is in the cache, but skip duplicates */
4446 		if (!match)
4447 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4448 					  d->last_adv_addr_type, NULL,
4449 					  d->last_adv_rssi, d->last_adv_flags,
4450 					  d->last_adv_data,
4451 					  d->last_adv_data_len, NULL, 0);
4452 
4453 		/* If the new report will trigger a SCAN_REQ store it for
4454 		 * later merging.
4455 		 */
4456 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4457 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4458 						 rssi, flags, data, len);
4459 			return;
4460 		}
4461 
4462 		/* The advertising reports cannot be merged, so clear
4463 		 * the pending report and send out a device found event.
4464 		 */
4465 		clear_pending_adv_report(hdev);
4466 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4467 				  rssi, flags, data, len, NULL, 0);
4468 		return;
4469 	}
4470 
4471 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4472 	 * the new event is a SCAN_RSP. We can therefore proceed with
4473 	 * sending a merged device found event.
4474 	 */
4475 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4476 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4477 			  d->last_adv_data, d->last_adv_data_len, data, len);
4478 	clear_pending_adv_report(hdev);
4479 }
4480 
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)4481 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4482 {
4483 	u8 num_reports = skb->data[0];
4484 	void *ptr = &skb->data[1];
4485 
4486 	hci_dev_lock(hdev);
4487 
4488 	while (num_reports--) {
4489 		struct hci_ev_le_advertising_info *ev = ptr;
4490 		s8 rssi;
4491 
4492 		rssi = ev->data[ev->length];
4493 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4494 				   ev->bdaddr_type, rssi, ev->data, ev->length);
4495 
4496 		ptr += sizeof(*ev) + ev->length + 1;
4497 	}
4498 
4499 	hci_dev_unlock(hdev);
4500 }
4501 
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4502 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4503 {
4504 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4505 	struct hci_cp_le_ltk_reply cp;
4506 	struct hci_cp_le_ltk_neg_reply neg;
4507 	struct hci_conn *conn;
4508 	struct smp_ltk *ltk;
4509 
4510 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4511 
4512 	hci_dev_lock(hdev);
4513 
4514 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4515 	if (conn == NULL)
4516 		goto not_found;
4517 
4518 	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4519 	if (ltk == NULL)
4520 		goto not_found;
4521 
4522 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4523 	cp.handle = cpu_to_le16(conn->handle);
4524 
4525 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4526 
4527 	conn->enc_key_size = ltk->enc_size;
4528 
4529 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4530 
4531 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4532 	 * temporary key used to encrypt a connection following
4533 	 * pairing. It is used during the Encrypted Session Setup to
4534 	 * distribute the keys. Later, security can be re-established
4535 	 * using a distributed LTK.
4536 	 */
4537 	if (ltk->type == SMP_STK) {
4538 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4539 		list_del(&ltk->list);
4540 		kfree(ltk);
4541 	} else {
4542 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4543 	}
4544 
4545 	hci_dev_unlock(hdev);
4546 
4547 	return;
4548 
4549 not_found:
4550 	neg.handle = ev->handle;
4551 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4552 	hci_dev_unlock(hdev);
4553 }
4554 
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)4555 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4556 				      u8 reason)
4557 {
4558 	struct hci_cp_le_conn_param_req_neg_reply cp;
4559 
4560 	cp.handle = cpu_to_le16(handle);
4561 	cp.reason = reason;
4562 
4563 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4564 		     &cp);
4565 }
4566 
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)4567 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4568 					     struct sk_buff *skb)
4569 {
4570 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4571 	struct hci_cp_le_conn_param_req_reply cp;
4572 	struct hci_conn *hcon;
4573 	u16 handle, min, max, latency, timeout;
4574 
4575 	handle = le16_to_cpu(ev->handle);
4576 	min = le16_to_cpu(ev->interval_min);
4577 	max = le16_to_cpu(ev->interval_max);
4578 	latency = le16_to_cpu(ev->latency);
4579 	timeout = le16_to_cpu(ev->timeout);
4580 
4581 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
4582 	if (!hcon || hcon->state != BT_CONNECTED)
4583 		return send_conn_param_neg_reply(hdev, handle,
4584 						 HCI_ERROR_UNKNOWN_CONN_ID);
4585 
4586 	if (hci_check_conn_params(min, max, latency, timeout))
4587 		return send_conn_param_neg_reply(hdev, handle,
4588 						 HCI_ERROR_INVALID_LL_PARAMS);
4589 
4590 	if (hcon->role == HCI_ROLE_MASTER) {
4591 		struct hci_conn_params *params;
4592 		u8 store_hint;
4593 
4594 		hci_dev_lock(hdev);
4595 
4596 		params = hci_conn_params_lookup(hdev, &hcon->dst,
4597 						hcon->dst_type);
4598 		if (params) {
4599 			params->conn_min_interval = min;
4600 			params->conn_max_interval = max;
4601 			params->conn_latency = latency;
4602 			params->supervision_timeout = timeout;
4603 			store_hint = 0x01;
4604 		} else{
4605 			store_hint = 0x00;
4606 		}
4607 
4608 		hci_dev_unlock(hdev);
4609 
4610 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4611 				    store_hint, min, max, latency, timeout);
4612 	}
4613 
4614 	cp.handle = ev->handle;
4615 	cp.interval_min = ev->interval_min;
4616 	cp.interval_max = ev->interval_max;
4617 	cp.latency = ev->latency;
4618 	cp.timeout = ev->timeout;
4619 	cp.min_ce_len = 0;
4620 	cp.max_ce_len = 0;
4621 
4622 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4623 }
4624 
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)4625 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4626 {
4627 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4628 
4629 	skb_pull(skb, sizeof(*le_ev));
4630 
4631 	switch (le_ev->subevent) {
4632 	case HCI_EV_LE_CONN_COMPLETE:
4633 		hci_le_conn_complete_evt(hdev, skb);
4634 		break;
4635 
4636 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4637 		hci_le_conn_update_complete_evt(hdev, skb);
4638 		break;
4639 
4640 	case HCI_EV_LE_ADVERTISING_REPORT:
4641 		hci_le_adv_report_evt(hdev, skb);
4642 		break;
4643 
4644 	case HCI_EV_LE_LTK_REQ:
4645 		hci_le_ltk_request_evt(hdev, skb);
4646 		break;
4647 
4648 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4649 		hci_le_remote_conn_param_req_evt(hdev, skb);
4650 		break;
4651 
4652 	default:
4653 		break;
4654 	}
4655 }
4656 
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)4657 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4658 {
4659 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4660 	struct hci_conn *hcon;
4661 
4662 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4663 
4664 	skb_pull(skb, sizeof(*ev));
4665 
4666 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4667 	if (!hcon)
4668 		return;
4669 
4670 	amp_read_loc_assoc_final_data(hdev, hcon);
4671 }
4672 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)4673 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4674 {
4675 	struct hci_event_hdr *hdr = (void *) skb->data;
4676 	__u8 event = hdr->evt;
4677 
4678 	hci_dev_lock(hdev);
4679 
4680 	/* Received events are (currently) only needed when a request is
4681 	 * ongoing so avoid unnecessary memory allocation.
4682 	 */
4683 	if (hci_req_pending(hdev)) {
4684 		kfree_skb(hdev->recv_evt);
4685 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4686 	}
4687 
4688 	hci_dev_unlock(hdev);
4689 
4690 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4691 
4692 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4693 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4694 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4695 
4696 		hci_req_cmd_complete(hdev, opcode, 0);
4697 	}
4698 
4699 	switch (event) {
4700 	case HCI_EV_INQUIRY_COMPLETE:
4701 		hci_inquiry_complete_evt(hdev, skb);
4702 		break;
4703 
4704 	case HCI_EV_INQUIRY_RESULT:
4705 		hci_inquiry_result_evt(hdev, skb);
4706 		break;
4707 
4708 	case HCI_EV_CONN_COMPLETE:
4709 		hci_conn_complete_evt(hdev, skb);
4710 		break;
4711 
4712 	case HCI_EV_CONN_REQUEST:
4713 		hci_conn_request_evt(hdev, skb);
4714 		break;
4715 
4716 	case HCI_EV_DISCONN_COMPLETE:
4717 		hci_disconn_complete_evt(hdev, skb);
4718 		break;
4719 
4720 	case HCI_EV_AUTH_COMPLETE:
4721 		hci_auth_complete_evt(hdev, skb);
4722 		break;
4723 
4724 	case HCI_EV_REMOTE_NAME:
4725 		hci_remote_name_evt(hdev, skb);
4726 		break;
4727 
4728 	case HCI_EV_ENCRYPT_CHANGE:
4729 		hci_encrypt_change_evt(hdev, skb);
4730 		break;
4731 
4732 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4733 		hci_change_link_key_complete_evt(hdev, skb);
4734 		break;
4735 
4736 	case HCI_EV_REMOTE_FEATURES:
4737 		hci_remote_features_evt(hdev, skb);
4738 		break;
4739 
4740 	case HCI_EV_CMD_COMPLETE:
4741 		hci_cmd_complete_evt(hdev, skb);
4742 		break;
4743 
4744 	case HCI_EV_CMD_STATUS:
4745 		hci_cmd_status_evt(hdev, skb);
4746 		break;
4747 
4748 	case HCI_EV_ROLE_CHANGE:
4749 		hci_role_change_evt(hdev, skb);
4750 		break;
4751 
4752 	case HCI_EV_NUM_COMP_PKTS:
4753 		hci_num_comp_pkts_evt(hdev, skb);
4754 		break;
4755 
4756 	case HCI_EV_MODE_CHANGE:
4757 		hci_mode_change_evt(hdev, skb);
4758 		break;
4759 
4760 	case HCI_EV_PIN_CODE_REQ:
4761 		hci_pin_code_request_evt(hdev, skb);
4762 		break;
4763 
4764 	case HCI_EV_LINK_KEY_REQ:
4765 		hci_link_key_request_evt(hdev, skb);
4766 		break;
4767 
4768 	case HCI_EV_LINK_KEY_NOTIFY:
4769 		hci_link_key_notify_evt(hdev, skb);
4770 		break;
4771 
4772 	case HCI_EV_CLOCK_OFFSET:
4773 		hci_clock_offset_evt(hdev, skb);
4774 		break;
4775 
4776 	case HCI_EV_PKT_TYPE_CHANGE:
4777 		hci_pkt_type_change_evt(hdev, skb);
4778 		break;
4779 
4780 	case HCI_EV_PSCAN_REP_MODE:
4781 		hci_pscan_rep_mode_evt(hdev, skb);
4782 		break;
4783 
4784 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4785 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4786 		break;
4787 
4788 	case HCI_EV_REMOTE_EXT_FEATURES:
4789 		hci_remote_ext_features_evt(hdev, skb);
4790 		break;
4791 
4792 	case HCI_EV_SYNC_CONN_COMPLETE:
4793 		hci_sync_conn_complete_evt(hdev, skb);
4794 		break;
4795 
4796 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4797 		hci_extended_inquiry_result_evt(hdev, skb);
4798 		break;
4799 
4800 	case HCI_EV_KEY_REFRESH_COMPLETE:
4801 		hci_key_refresh_complete_evt(hdev, skb);
4802 		break;
4803 
4804 	case HCI_EV_IO_CAPA_REQUEST:
4805 		hci_io_capa_request_evt(hdev, skb);
4806 		break;
4807 
4808 	case HCI_EV_IO_CAPA_REPLY:
4809 		hci_io_capa_reply_evt(hdev, skb);
4810 		break;
4811 
4812 	case HCI_EV_USER_CONFIRM_REQUEST:
4813 		hci_user_confirm_request_evt(hdev, skb);
4814 		break;
4815 
4816 	case HCI_EV_USER_PASSKEY_REQUEST:
4817 		hci_user_passkey_request_evt(hdev, skb);
4818 		break;
4819 
4820 	case HCI_EV_USER_PASSKEY_NOTIFY:
4821 		hci_user_passkey_notify_evt(hdev, skb);
4822 		break;
4823 
4824 	case HCI_EV_KEYPRESS_NOTIFY:
4825 		hci_keypress_notify_evt(hdev, skb);
4826 		break;
4827 
4828 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4829 		hci_simple_pair_complete_evt(hdev, skb);
4830 		break;
4831 
4832 	case HCI_EV_REMOTE_HOST_FEATURES:
4833 		hci_remote_host_features_evt(hdev, skb);
4834 		break;
4835 
4836 	case HCI_EV_LE_META:
4837 		hci_le_meta_evt(hdev, skb);
4838 		break;
4839 
4840 	case HCI_EV_CHANNEL_SELECTED:
4841 		hci_chan_selected_evt(hdev, skb);
4842 		break;
4843 
4844 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4845 		hci_remote_oob_data_request_evt(hdev, skb);
4846 		break;
4847 
4848 	case HCI_EV_PHY_LINK_COMPLETE:
4849 		hci_phy_link_complete_evt(hdev, skb);
4850 		break;
4851 
4852 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4853 		hci_loglink_complete_evt(hdev, skb);
4854 		break;
4855 
4856 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4857 		hci_disconn_loglink_complete_evt(hdev, skb);
4858 		break;
4859 
4860 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4861 		hci_disconn_phylink_complete_evt(hdev, skb);
4862 		break;
4863 
4864 	case HCI_EV_NUM_COMP_BLOCKS:
4865 		hci_num_comp_blocks_evt(hdev, skb);
4866 		break;
4867 
4868 	default:
4869 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4870 		break;
4871 	}
4872 
4873 	kfree_skb(skb);
4874 	hdev->stat.evt_rx++;
4875 }
4876