1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "a2mp.h"
38 #include "amp.h"
39 #include "smp.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 /* Handle HCI Event packets */
45
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)46 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 u8 *new_status)
48 {
49 __u8 status = *((__u8 *) skb->data);
50
51 BT_DBG("%s status 0x%2.2x", hdev->name, status);
52
53 /* It is possible that we receive Inquiry Complete event right
54 * before we receive Inquiry Cancel Command Complete event, in
55 * which case the latter event should have status of Command
56 * Disallowed (0x0c). This should not be treated as error, since
57 * we actually achieve what Inquiry Cancel wants to achieve,
58 * which is to end the last Inquiry session.
59 */
60 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
61 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
62 status = 0x00;
63 }
64
65 *new_status = status;
66
67 if (status)
68 return;
69
70 clear_bit(HCI_INQUIRY, &hdev->flags);
71 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
72 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73
74 hci_dev_lock(hdev);
75 /* Set discovery state to stopped if we're not doing LE active
76 * scanning.
77 */
78 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
79 hdev->le_scan_type != LE_SCAN_ACTIVE)
80 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_dev_unlock(hdev);
82
83 hci_conn_check_pending(hdev);
84 }
85
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)86 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 __u8 status = *((__u8 *) skb->data);
89
90 BT_DBG("%s status 0x%2.2x", hdev->name, status);
91
92 if (status)
93 return;
94
95 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 }
97
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)98 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
99 {
100 __u8 status = *((__u8 *) skb->data);
101
102 BT_DBG("%s status 0x%2.2x", hdev->name, status);
103
104 if (status)
105 return;
106
107 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
108
109 hci_conn_check_pending(hdev);
110 }
111
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)112 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 struct sk_buff *skb)
114 {
115 BT_DBG("%s", hdev->name);
116 }
117
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)118 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
119 {
120 struct hci_rp_role_discovery *rp = (void *) skb->data;
121 struct hci_conn *conn;
122
123 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
124
125 if (rp->status)
126 return;
127
128 hci_dev_lock(hdev);
129
130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
131 if (conn)
132 conn->role = rp->role;
133
134 hci_dev_unlock(hdev);
135 }
136
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)137 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
138 {
139 struct hci_rp_read_link_policy *rp = (void *) skb->data;
140 struct hci_conn *conn;
141
142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
143
144 if (rp->status)
145 return;
146
147 hci_dev_lock(hdev);
148
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 if (conn)
151 conn->link_policy = __le16_to_cpu(rp->policy);
152
153 hci_dev_unlock(hdev);
154 }
155
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)156 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 struct hci_rp_write_link_policy *rp = (void *) skb->data;
159 struct hci_conn *conn;
160 void *sent;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
168 if (!sent)
169 return;
170
171 hci_dev_lock(hdev);
172
173 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
174 if (conn)
175 conn->link_policy = get_unaligned_le16(sent + 2);
176
177 hci_dev_unlock(hdev);
178 }
179
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)180 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct sk_buff *skb)
182 {
183 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
186
187 if (rp->status)
188 return;
189
190 hdev->link_policy = __le16_to_cpu(rp->policy);
191 }
192
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)193 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 struct sk_buff *skb)
195 {
196 __u8 status = *((__u8 *) skb->data);
197 void *sent;
198
199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200
201 if (status)
202 return;
203
204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
205 if (!sent)
206 return;
207
208 hdev->link_policy = get_unaligned_le16(sent);
209 }
210
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)211 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
212 {
213 __u8 status = *((__u8 *) skb->data);
214
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
216
217 clear_bit(HCI_RESET, &hdev->flags);
218
219 if (status)
220 return;
221
222 /* Reset all non-persistent flags */
223 hci_dev_clear_volatile_flags(hdev);
224
225 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
226
227 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
228 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
229
230 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
231 hdev->adv_data_len = 0;
232
233 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
234 hdev->scan_rsp_data_len = 0;
235
236 hdev->le_scan_type = LE_SCAN_PASSIVE;
237
238 hdev->ssp_debug_mode = 0;
239
240 hci_bdaddr_list_clear(&hdev->le_white_list);
241 hci_bdaddr_list_clear(&hdev->le_resolv_list);
242 }
243
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)244 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
245 struct sk_buff *skb)
246 {
247 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
248 struct hci_cp_read_stored_link_key *sent;
249
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251
252 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
253 if (!sent)
254 return;
255
256 if (!rp->status && sent->read_all == 0x01) {
257 hdev->stored_max_keys = rp->max_keys;
258 hdev->stored_num_keys = rp->num_keys;
259 }
260 }
261
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)262 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
263 struct sk_buff *skb)
264 {
265 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
266
267 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
268
269 if (rp->status)
270 return;
271
272 if (rp->num_keys <= hdev->stored_num_keys)
273 hdev->stored_num_keys -= rp->num_keys;
274 else
275 hdev->stored_num_keys = 0;
276 }
277
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)278 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
279 {
280 __u8 status = *((__u8 *) skb->data);
281 void *sent;
282
283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
284
285 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
286 if (!sent)
287 return;
288
289 hci_dev_lock(hdev);
290
291 if (hci_dev_test_flag(hdev, HCI_MGMT))
292 mgmt_set_local_name_complete(hdev, sent, status);
293 else if (!status)
294 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
295
296 hci_dev_unlock(hdev);
297 }
298
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)299 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
300 {
301 struct hci_rp_read_local_name *rp = (void *) skb->data;
302
303 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
304
305 if (rp->status)
306 return;
307
308 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
309 hci_dev_test_flag(hdev, HCI_CONFIG))
310 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
311 }
312
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)313 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
314 {
315 __u8 status = *((__u8 *) skb->data);
316 void *sent;
317
318 BT_DBG("%s status 0x%2.2x", hdev->name, status);
319
320 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
321 if (!sent)
322 return;
323
324 hci_dev_lock(hdev);
325
326 if (!status) {
327 __u8 param = *((__u8 *) sent);
328
329 if (param == AUTH_ENABLED)
330 set_bit(HCI_AUTH, &hdev->flags);
331 else
332 clear_bit(HCI_AUTH, &hdev->flags);
333 }
334
335 if (hci_dev_test_flag(hdev, HCI_MGMT))
336 mgmt_auth_enable_complete(hdev, status);
337
338 hci_dev_unlock(hdev);
339 }
340
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)341 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
346
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348
349 if (status)
350 return;
351
352 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
353 if (!sent)
354 return;
355
356 param = *((__u8 *) sent);
357
358 if (param)
359 set_bit(HCI_ENCRYPT, &hdev->flags);
360 else
361 clear_bit(HCI_ENCRYPT, &hdev->flags);
362 }
363
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)364 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
365 {
366 __u8 status = *((__u8 *) skb->data);
367 __u8 param;
368 void *sent;
369
370 BT_DBG("%s status 0x%2.2x", hdev->name, status);
371
372 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
373 if (!sent)
374 return;
375
376 param = *((__u8 *) sent);
377
378 hci_dev_lock(hdev);
379
380 if (status) {
381 hdev->discov_timeout = 0;
382 goto done;
383 }
384
385 if (param & SCAN_INQUIRY)
386 set_bit(HCI_ISCAN, &hdev->flags);
387 else
388 clear_bit(HCI_ISCAN, &hdev->flags);
389
390 if (param & SCAN_PAGE)
391 set_bit(HCI_PSCAN, &hdev->flags);
392 else
393 clear_bit(HCI_PSCAN, &hdev->flags);
394
395 done:
396 hci_dev_unlock(hdev);
397 }
398
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)399 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
400 {
401 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
402
403 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
404
405 if (rp->status)
406 return;
407
408 memcpy(hdev->dev_class, rp->dev_class, 3);
409
410 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
411 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
412 }
413
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)414 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
415 {
416 __u8 status = *((__u8 *) skb->data);
417 void *sent;
418
419 BT_DBG("%s status 0x%2.2x", hdev->name, status);
420
421 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
422 if (!sent)
423 return;
424
425 hci_dev_lock(hdev);
426
427 if (status == 0)
428 memcpy(hdev->dev_class, sent, 3);
429
430 if (hci_dev_test_flag(hdev, HCI_MGMT))
431 mgmt_set_class_of_dev_complete(hdev, sent, status);
432
433 hci_dev_unlock(hdev);
434 }
435
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)436 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
439 __u16 setting;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
442
443 if (rp->status)
444 return;
445
446 setting = __le16_to_cpu(rp->voice_setting);
447
448 if (hdev->voice_setting == setting)
449 return;
450
451 hdev->voice_setting = setting;
452
453 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
454
455 if (hdev->notify)
456 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
457 }
458
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)459 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
460 struct sk_buff *skb)
461 {
462 __u8 status = *((__u8 *) skb->data);
463 __u16 setting;
464 void *sent;
465
466 BT_DBG("%s status 0x%2.2x", hdev->name, status);
467
468 if (status)
469 return;
470
471 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
472 if (!sent)
473 return;
474
475 setting = get_unaligned_le16(sent);
476
477 if (hdev->voice_setting == setting)
478 return;
479
480 hdev->voice_setting = setting;
481
482 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
483
484 if (hdev->notify)
485 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
486 }
487
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)488 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
489 struct sk_buff *skb)
490 {
491 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
492
493 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
494
495 if (rp->status)
496 return;
497
498 hdev->num_iac = rp->num_iac;
499
500 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
501 }
502
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)503 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
504 {
505 __u8 status = *((__u8 *) skb->data);
506 struct hci_cp_write_ssp_mode *sent;
507
508 BT_DBG("%s status 0x%2.2x", hdev->name, status);
509
510 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
511 if (!sent)
512 return;
513
514 hci_dev_lock(hdev);
515
516 if (!status) {
517 if (sent->mode)
518 hdev->features[1][0] |= LMP_HOST_SSP;
519 else
520 hdev->features[1][0] &= ~LMP_HOST_SSP;
521 }
522
523 if (hci_dev_test_flag(hdev, HCI_MGMT))
524 mgmt_ssp_enable_complete(hdev, sent->mode, status);
525 else if (!status) {
526 if (sent->mode)
527 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
528 else
529 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
530 }
531
532 hci_dev_unlock(hdev);
533 }
534
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)535 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
536 {
537 u8 status = *((u8 *) skb->data);
538 struct hci_cp_write_sc_support *sent;
539
540 BT_DBG("%s status 0x%2.2x", hdev->name, status);
541
542 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
543 if (!sent)
544 return;
545
546 hci_dev_lock(hdev);
547
548 if (!status) {
549 if (sent->support)
550 hdev->features[1][0] |= LMP_HOST_SC;
551 else
552 hdev->features[1][0] &= ~LMP_HOST_SC;
553 }
554
555 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
556 if (sent->support)
557 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
558 else
559 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
560 }
561
562 hci_dev_unlock(hdev);
563 }
564
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)565 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
566 {
567 struct hci_rp_read_local_version *rp = (void *) skb->data;
568
569 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
570
571 if (rp->status)
572 return;
573
574 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
575 hci_dev_test_flag(hdev, HCI_CONFIG)) {
576 hdev->hci_ver = rp->hci_ver;
577 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
578 hdev->lmp_ver = rp->lmp_ver;
579 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
580 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
581 }
582 }
583
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)584 static void hci_cc_read_local_commands(struct hci_dev *hdev,
585 struct sk_buff *skb)
586 {
587 struct hci_rp_read_local_commands *rp = (void *) skb->data;
588
589 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
590
591 if (rp->status)
592 return;
593
594 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
595 hci_dev_test_flag(hdev, HCI_CONFIG))
596 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
597 }
598
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)599 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
600 struct sk_buff *skb)
601 {
602 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
603 struct hci_conn *conn;
604
605 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606
607 if (rp->status)
608 return;
609
610 hci_dev_lock(hdev);
611
612 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
613 if (conn)
614 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
615
616 hci_dev_unlock(hdev);
617 }
618
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)619 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
620 struct sk_buff *skb)
621 {
622 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
623 struct hci_conn *conn;
624 void *sent;
625
626 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
627
628 if (rp->status)
629 return;
630
631 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
632 if (!sent)
633 return;
634
635 hci_dev_lock(hdev);
636
637 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 if (conn)
639 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
640
641 hci_dev_unlock(hdev);
642 }
643
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)644 static void hci_cc_read_local_features(struct hci_dev *hdev,
645 struct sk_buff *skb)
646 {
647 struct hci_rp_read_local_features *rp = (void *) skb->data;
648
649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650
651 if (rp->status)
652 return;
653
654 memcpy(hdev->features, rp->features, 8);
655
656 /* Adjust default settings according to features
657 * supported by device. */
658
659 if (hdev->features[0][0] & LMP_3SLOT)
660 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
661
662 if (hdev->features[0][0] & LMP_5SLOT)
663 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
664
665 if (hdev->features[0][1] & LMP_HV2) {
666 hdev->pkt_type |= (HCI_HV2);
667 hdev->esco_type |= (ESCO_HV2);
668 }
669
670 if (hdev->features[0][1] & LMP_HV3) {
671 hdev->pkt_type |= (HCI_HV3);
672 hdev->esco_type |= (ESCO_HV3);
673 }
674
675 if (lmp_esco_capable(hdev))
676 hdev->esco_type |= (ESCO_EV3);
677
678 if (hdev->features[0][4] & LMP_EV4)
679 hdev->esco_type |= (ESCO_EV4);
680
681 if (hdev->features[0][4] & LMP_EV5)
682 hdev->esco_type |= (ESCO_EV5);
683
684 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
685 hdev->esco_type |= (ESCO_2EV3);
686
687 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
688 hdev->esco_type |= (ESCO_3EV3);
689
690 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
691 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
692 }
693
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)694 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
695 struct sk_buff *skb)
696 {
697 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
698
699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
700
701 if (rp->status)
702 return;
703
704 if (hdev->max_page < rp->max_page)
705 hdev->max_page = rp->max_page;
706
707 if (rp->page < HCI_MAX_PAGES)
708 memcpy(hdev->features[rp->page], rp->features, 8);
709 }
710
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)711 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
712 struct sk_buff *skb)
713 {
714 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
715
716 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
717
718 if (rp->status)
719 return;
720
721 hdev->flow_ctl_mode = rp->mode;
722 }
723
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)724 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
725 {
726 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
727
728 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729
730 if (rp->status)
731 return;
732
733 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
734 hdev->sco_mtu = rp->sco_mtu;
735 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
736 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
737
738 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
739 hdev->sco_mtu = 64;
740 hdev->sco_pkts = 8;
741 }
742
743 hdev->acl_cnt = hdev->acl_pkts;
744 hdev->sco_cnt = hdev->sco_pkts;
745
746 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
747 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
748 }
749
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)750 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
751 {
752 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
753
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755
756 if (rp->status)
757 return;
758
759 if (test_bit(HCI_INIT, &hdev->flags))
760 bacpy(&hdev->bdaddr, &rp->bdaddr);
761
762 if (hci_dev_test_flag(hdev, HCI_SETUP))
763 bacpy(&hdev->setup_addr, &rp->bdaddr);
764 }
765
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)766 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
767 struct sk_buff *skb)
768 {
769 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
770
771 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772
773 if (rp->status)
774 return;
775
776 if (test_bit(HCI_INIT, &hdev->flags)) {
777 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
778 hdev->page_scan_window = __le16_to_cpu(rp->window);
779 }
780 }
781
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)782 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
783 struct sk_buff *skb)
784 {
785 u8 status = *((u8 *) skb->data);
786 struct hci_cp_write_page_scan_activity *sent;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, status);
789
790 if (status)
791 return;
792
793 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
794 if (!sent)
795 return;
796
797 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
798 hdev->page_scan_window = __le16_to_cpu(sent->window);
799 }
800
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)801 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
802 struct sk_buff *skb)
803 {
804 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
805
806 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
807
808 if (rp->status)
809 return;
810
811 if (test_bit(HCI_INIT, &hdev->flags))
812 hdev->page_scan_type = rp->type;
813 }
814
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)815 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
816 struct sk_buff *skb)
817 {
818 u8 status = *((u8 *) skb->data);
819 u8 *type;
820
821 BT_DBG("%s status 0x%2.2x", hdev->name, status);
822
823 if (status)
824 return;
825
826 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
827 if (type)
828 hdev->page_scan_type = *type;
829 }
830
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)831 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 struct sk_buff *skb)
833 {
834 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837
838 if (rp->status)
839 return;
840
841 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
842 hdev->block_len = __le16_to_cpu(rp->block_len);
843 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
844
845 hdev->block_cnt = hdev->num_blocks;
846
847 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
848 hdev->block_cnt, hdev->block_len);
849 }
850
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)851 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
852 {
853 struct hci_rp_read_clock *rp = (void *) skb->data;
854 struct hci_cp_read_clock *cp;
855 struct hci_conn *conn;
856
857 BT_DBG("%s", hdev->name);
858
859 if (skb->len < sizeof(*rp))
860 return;
861
862 if (rp->status)
863 return;
864
865 hci_dev_lock(hdev);
866
867 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
868 if (!cp)
869 goto unlock;
870
871 if (cp->which == 0x00) {
872 hdev->clock = le32_to_cpu(rp->clock);
873 goto unlock;
874 }
875
876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
877 if (conn) {
878 conn->clock = le32_to_cpu(rp->clock);
879 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
880 }
881
882 unlock:
883 hci_dev_unlock(hdev);
884 }
885
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)886 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
887 struct sk_buff *skb)
888 {
889 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
890
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
892
893 if (rp->status)
894 return;
895
896 hdev->amp_status = rp->amp_status;
897 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
898 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
899 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
900 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
901 hdev->amp_type = rp->amp_type;
902 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
903 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
904 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
905 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
906 }
907
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)908 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
909 struct sk_buff *skb)
910 {
911 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
912
913 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914
915 if (rp->status)
916 return;
917
918 hdev->inq_tx_power = rp->tx_power;
919 }
920
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)921 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
922 {
923 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
924 struct hci_cp_pin_code_reply *cp;
925 struct hci_conn *conn;
926
927 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
928
929 hci_dev_lock(hdev);
930
931 if (hci_dev_test_flag(hdev, HCI_MGMT))
932 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
933
934 if (rp->status)
935 goto unlock;
936
937 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
938 if (!cp)
939 goto unlock;
940
941 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
942 if (conn)
943 conn->pin_length = cp->pin_len;
944
945 unlock:
946 hci_dev_unlock(hdev);
947 }
948
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)949 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
950 {
951 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
952
953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954
955 hci_dev_lock(hdev);
956
957 if (hci_dev_test_flag(hdev, HCI_MGMT))
958 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
959 rp->status);
960
961 hci_dev_unlock(hdev);
962 }
963
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)964 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
965 struct sk_buff *skb)
966 {
967 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
968
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970
971 if (rp->status)
972 return;
973
974 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
975 hdev->le_pkts = rp->le_max_pkt;
976
977 hdev->le_cnt = hdev->le_pkts;
978
979 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
980 }
981
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)982 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
983 struct sk_buff *skb)
984 {
985 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
986
987 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988
989 if (rp->status)
990 return;
991
992 memcpy(hdev->le_features, rp->features, 8);
993 }
994
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)995 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
996 struct sk_buff *skb)
997 {
998 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001
1002 if (rp->status)
1003 return;
1004
1005 hdev->adv_tx_power = rp->tx_power;
1006 }
1007
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1008 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1009 {
1010 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1011
1012 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1013
1014 hci_dev_lock(hdev);
1015
1016 if (hci_dev_test_flag(hdev, HCI_MGMT))
1017 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1018 rp->status);
1019
1020 hci_dev_unlock(hdev);
1021 }
1022
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1023 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1024 struct sk_buff *skb)
1025 {
1026 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1027
1028 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1029
1030 hci_dev_lock(hdev);
1031
1032 if (hci_dev_test_flag(hdev, HCI_MGMT))
1033 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1034 ACL_LINK, 0, rp->status);
1035
1036 hci_dev_unlock(hdev);
1037 }
1038
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1039 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1040 {
1041 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1042
1043 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044
1045 hci_dev_lock(hdev);
1046
1047 if (hci_dev_test_flag(hdev, HCI_MGMT))
1048 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1049 0, rp->status);
1050
1051 hci_dev_unlock(hdev);
1052 }
1053
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1054 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1056 {
1057 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1058
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060
1061 hci_dev_lock(hdev);
1062
1063 if (hci_dev_test_flag(hdev, HCI_MGMT))
1064 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1065 ACL_LINK, 0, rp->status);
1066
1067 hci_dev_unlock(hdev);
1068 }
1069
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1070 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1071 struct sk_buff *skb)
1072 {
1073 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076 }
1077
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1078 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1079 struct sk_buff *skb)
1080 {
1081 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1082
1083 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1084 }
1085
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1086 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1087 {
1088 __u8 status = *((__u8 *) skb->data);
1089 bdaddr_t *sent;
1090
1091 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1092
1093 if (status)
1094 return;
1095
1096 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1097 if (!sent)
1098 return;
1099
1100 hci_dev_lock(hdev);
1101
1102 bacpy(&hdev->random_addr, sent);
1103
1104 hci_dev_unlock(hdev);
1105 }
1106
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1107 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1108 {
1109 __u8 status = *((__u8 *) skb->data);
1110 struct hci_cp_le_set_default_phy *cp;
1111
1112 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1113
1114 if (status)
1115 return;
1116
1117 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1118 if (!cp)
1119 return;
1120
1121 hci_dev_lock(hdev);
1122
1123 hdev->le_tx_def_phys = cp->tx_phys;
1124 hdev->le_rx_def_phys = cp->rx_phys;
1125
1126 hci_dev_unlock(hdev);
1127 }
1128
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1129 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1130 struct sk_buff *skb)
1131 {
1132 __u8 status = *((__u8 *) skb->data);
1133 struct hci_cp_le_set_adv_set_rand_addr *cp;
1134 struct adv_info *adv_instance;
1135
1136 if (status)
1137 return;
1138
1139 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1140 if (!cp)
1141 return;
1142
1143 hci_dev_lock(hdev);
1144
1145 if (!hdev->cur_adv_instance) {
1146 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1147 bacpy(&hdev->random_addr, &cp->bdaddr);
1148 } else {
1149 adv_instance = hci_find_adv_instance(hdev,
1150 hdev->cur_adv_instance);
1151 if (adv_instance)
1152 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1153 }
1154
1155 hci_dev_unlock(hdev);
1156 }
1157
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1158 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1159 {
1160 __u8 *sent, status = *((__u8 *) skb->data);
1161
1162 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1163
1164 if (status)
1165 return;
1166
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1168 if (!sent)
1169 return;
1170
1171 hci_dev_lock(hdev);
1172
1173 /* If we're doing connection initiation as peripheral. Set a
1174 * timeout in case something goes wrong.
1175 */
1176 if (*sent) {
1177 struct hci_conn *conn;
1178
1179 hci_dev_set_flag(hdev, HCI_LE_ADV);
1180
1181 conn = hci_lookup_le_connect(hdev);
1182 if (conn)
1183 queue_delayed_work(hdev->workqueue,
1184 &conn->le_conn_timeout,
1185 conn->conn_timeout);
1186 } else {
1187 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1188 }
1189
1190 hci_dev_unlock(hdev);
1191 }
1192
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1193 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1194 struct sk_buff *skb)
1195 {
1196 struct hci_cp_le_set_ext_adv_enable *cp;
1197 __u8 status = *((__u8 *) skb->data);
1198
1199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1200
1201 if (status)
1202 return;
1203
1204 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1205 if (!cp)
1206 return;
1207
1208 hci_dev_lock(hdev);
1209
1210 if (cp->enable) {
1211 struct hci_conn *conn;
1212
1213 hci_dev_set_flag(hdev, HCI_LE_ADV);
1214
1215 conn = hci_lookup_le_connect(hdev);
1216 if (conn)
1217 queue_delayed_work(hdev->workqueue,
1218 &conn->le_conn_timeout,
1219 conn->conn_timeout);
1220 } else {
1221 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1227 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1228 {
1229 struct hci_cp_le_set_scan_param *cp;
1230 __u8 status = *((__u8 *) skb->data);
1231
1232 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1233
1234 if (status)
1235 return;
1236
1237 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1238 if (!cp)
1239 return;
1240
1241 hci_dev_lock(hdev);
1242
1243 hdev->le_scan_type = cp->type;
1244
1245 hci_dev_unlock(hdev);
1246 }
1247
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1248 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1249 struct sk_buff *skb)
1250 {
1251 struct hci_cp_le_set_ext_scan_params *cp;
1252 __u8 status = *((__u8 *) skb->data);
1253 struct hci_cp_le_scan_phy_params *phy_param;
1254
1255 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1256
1257 if (status)
1258 return;
1259
1260 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1261 if (!cp)
1262 return;
1263
1264 phy_param = (void *)cp->data;
1265
1266 hci_dev_lock(hdev);
1267
1268 hdev->le_scan_type = phy_param->type;
1269
1270 hci_dev_unlock(hdev);
1271 }
1272
has_pending_adv_report(struct hci_dev * hdev)1273 static bool has_pending_adv_report(struct hci_dev *hdev)
1274 {
1275 struct discovery_state *d = &hdev->discovery;
1276
1277 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1278 }
1279
clear_pending_adv_report(struct hci_dev * hdev)1280 static void clear_pending_adv_report(struct hci_dev *hdev)
1281 {
1282 struct discovery_state *d = &hdev->discovery;
1283
1284 bacpy(&d->last_adv_addr, BDADDR_ANY);
1285 d->last_adv_data_len = 0;
1286 }
1287
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1288 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1289 u8 bdaddr_type, s8 rssi, u32 flags,
1290 u8 *data, u8 len)
1291 {
1292 struct discovery_state *d = &hdev->discovery;
1293
1294 if (len > HCI_MAX_AD_LENGTH)
1295 return;
1296
1297 bacpy(&d->last_adv_addr, bdaddr);
1298 d->last_adv_addr_type = bdaddr_type;
1299 d->last_adv_rssi = rssi;
1300 d->last_adv_flags = flags;
1301 memcpy(d->last_adv_data, data, len);
1302 d->last_adv_data_len = len;
1303 }
1304
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1305 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1306 {
1307 hci_dev_lock(hdev);
1308
1309 switch (enable) {
1310 case LE_SCAN_ENABLE:
1311 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1312 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1313 clear_pending_adv_report(hdev);
1314 break;
1315
1316 case LE_SCAN_DISABLE:
1317 /* We do this here instead of when setting DISCOVERY_STOPPED
1318 * since the latter would potentially require waiting for
1319 * inquiry to stop too.
1320 */
1321 if (has_pending_adv_report(hdev)) {
1322 struct discovery_state *d = &hdev->discovery;
1323
1324 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1325 d->last_adv_addr_type, NULL,
1326 d->last_adv_rssi, d->last_adv_flags,
1327 d->last_adv_data,
1328 d->last_adv_data_len, NULL, 0);
1329 }
1330
1331 /* Cancel this timer so that we don't try to disable scanning
1332 * when it's already disabled.
1333 */
1334 cancel_delayed_work(&hdev->le_scan_disable);
1335
1336 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1337
1338 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1339 * interrupted scanning due to a connect request. Mark
1340 * therefore discovery as stopped. If this was not
1341 * because of a connect request advertising might have
1342 * been disabled because of active scanning, so
1343 * re-enable it again if necessary.
1344 */
1345 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1346 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1347 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1348 hdev->discovery.state == DISCOVERY_FINDING)
1349 hci_req_reenable_advertising(hdev);
1350
1351 break;
1352
1353 default:
1354 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1355 enable);
1356 break;
1357 }
1358
1359 hci_dev_unlock(hdev);
1360 }
1361
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1362 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1363 struct sk_buff *skb)
1364 {
1365 struct hci_cp_le_set_scan_enable *cp;
1366 __u8 status = *((__u8 *) skb->data);
1367
1368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1369
1370 if (status)
1371 return;
1372
1373 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1374 if (!cp)
1375 return;
1376
1377 le_set_scan_enable_complete(hdev, cp->enable);
1378 }
1379
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1380 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1381 struct sk_buff *skb)
1382 {
1383 struct hci_cp_le_set_ext_scan_enable *cp;
1384 __u8 status = *((__u8 *) skb->data);
1385
1386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1387
1388 if (status)
1389 return;
1390
1391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1392 if (!cp)
1393 return;
1394
1395 le_set_scan_enable_complete(hdev, cp->enable);
1396 }
1397
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1398 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1399 struct sk_buff *skb)
1400 {
1401 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1402
1403 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1404 rp->num_of_sets);
1405
1406 if (rp->status)
1407 return;
1408
1409 hdev->le_num_of_adv_sets = rp->num_of_sets;
1410 }
1411
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1412 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1413 struct sk_buff *skb)
1414 {
1415 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1416
1417 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1418
1419 if (rp->status)
1420 return;
1421
1422 hdev->le_white_list_size = rp->size;
1423 }
1424
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1425 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1426 struct sk_buff *skb)
1427 {
1428 __u8 status = *((__u8 *) skb->data);
1429
1430 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1431
1432 if (status)
1433 return;
1434
1435 hci_bdaddr_list_clear(&hdev->le_white_list);
1436 }
1437
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1438 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1439 struct sk_buff *skb)
1440 {
1441 struct hci_cp_le_add_to_white_list *sent;
1442 __u8 status = *((__u8 *) skb->data);
1443
1444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1445
1446 if (status)
1447 return;
1448
1449 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1450 if (!sent)
1451 return;
1452
1453 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1454 sent->bdaddr_type);
1455 }
1456
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1457 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1458 struct sk_buff *skb)
1459 {
1460 struct hci_cp_le_del_from_white_list *sent;
1461 __u8 status = *((__u8 *) skb->data);
1462
1463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1464
1465 if (status)
1466 return;
1467
1468 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1469 if (!sent)
1470 return;
1471
1472 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1473 sent->bdaddr_type);
1474 }
1475
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1476 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1477 struct sk_buff *skb)
1478 {
1479 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1480
1481 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1482
1483 if (rp->status)
1484 return;
1485
1486 memcpy(hdev->le_states, rp->le_states, 8);
1487 }
1488
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1489 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1490 struct sk_buff *skb)
1491 {
1492 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1493
1494 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1495
1496 if (rp->status)
1497 return;
1498
1499 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1500 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1501 }
1502
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1503 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1504 struct sk_buff *skb)
1505 {
1506 struct hci_cp_le_write_def_data_len *sent;
1507 __u8 status = *((__u8 *) skb->data);
1508
1509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1510
1511 if (status)
1512 return;
1513
1514 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1515 if (!sent)
1516 return;
1517
1518 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1519 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1520 }
1521
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1522 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1523 struct sk_buff *skb)
1524 {
1525 struct hci_cp_le_add_to_resolv_list *sent;
1526 __u8 status = *((__u8 *) skb->data);
1527
1528 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1529
1530 if (status)
1531 return;
1532
1533 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1534 if (!sent)
1535 return;
1536
1537 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1538 sent->bdaddr_type, sent->peer_irk,
1539 sent->local_irk);
1540 }
1541
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1542 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1543 struct sk_buff *skb)
1544 {
1545 struct hci_cp_le_del_from_resolv_list *sent;
1546 __u8 status = *((__u8 *) skb->data);
1547
1548 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1549
1550 if (status)
1551 return;
1552
1553 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1554 if (!sent)
1555 return;
1556
1557 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1558 sent->bdaddr_type);
1559 }
1560
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1561 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1562 struct sk_buff *skb)
1563 {
1564 __u8 status = *((__u8 *) skb->data);
1565
1566 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1567
1568 if (status)
1569 return;
1570
1571 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1572 }
1573
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1574 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1575 struct sk_buff *skb)
1576 {
1577 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1578
1579 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1580
1581 if (rp->status)
1582 return;
1583
1584 hdev->le_resolv_list_size = rp->size;
1585 }
1586
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1587 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1588 struct sk_buff *skb)
1589 {
1590 __u8 *sent, status = *((__u8 *) skb->data);
1591
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593
1594 if (status)
1595 return;
1596
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1598 if (!sent)
1599 return;
1600
1601 hci_dev_lock(hdev);
1602
1603 if (*sent)
1604 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1605 else
1606 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1607
1608 hci_dev_unlock(hdev);
1609 }
1610
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1611 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1612 struct sk_buff *skb)
1613 {
1614 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1615
1616 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1617
1618 if (rp->status)
1619 return;
1620
1621 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1622 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1623 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1624 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1625 }
1626
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1627 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1628 struct sk_buff *skb)
1629 {
1630 struct hci_cp_write_le_host_supported *sent;
1631 __u8 status = *((__u8 *) skb->data);
1632
1633 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1634
1635 if (status)
1636 return;
1637
1638 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1639 if (!sent)
1640 return;
1641
1642 hci_dev_lock(hdev);
1643
1644 if (sent->le) {
1645 hdev->features[1][0] |= LMP_HOST_LE;
1646 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1647 } else {
1648 hdev->features[1][0] &= ~LMP_HOST_LE;
1649 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1650 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1651 }
1652
1653 if (sent->simul)
1654 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1655 else
1656 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1657
1658 hci_dev_unlock(hdev);
1659 }
1660
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1661 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1662 {
1663 struct hci_cp_le_set_adv_param *cp;
1664 u8 status = *((u8 *) skb->data);
1665
1666 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1667
1668 if (status)
1669 return;
1670
1671 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1672 if (!cp)
1673 return;
1674
1675 hci_dev_lock(hdev);
1676 hdev->adv_addr_type = cp->own_address_type;
1677 hci_dev_unlock(hdev);
1678 }
1679
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1680 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1681 {
1682 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1683 struct hci_cp_le_set_ext_adv_params *cp;
1684 struct adv_info *adv_instance;
1685
1686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1687
1688 if (rp->status)
1689 return;
1690
1691 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1692 if (!cp)
1693 return;
1694
1695 hci_dev_lock(hdev);
1696 hdev->adv_addr_type = cp->own_addr_type;
1697 if (!hdev->cur_adv_instance) {
1698 /* Store in hdev for instance 0 */
1699 hdev->adv_tx_power = rp->tx_power;
1700 } else {
1701 adv_instance = hci_find_adv_instance(hdev,
1702 hdev->cur_adv_instance);
1703 if (adv_instance)
1704 adv_instance->tx_power = rp->tx_power;
1705 }
1706 /* Update adv data as tx power is known now */
1707 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1708 hci_dev_unlock(hdev);
1709 }
1710
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1711 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1712 {
1713 struct hci_rp_read_rssi *rp = (void *) skb->data;
1714 struct hci_conn *conn;
1715
1716 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1717
1718 if (rp->status)
1719 return;
1720
1721 hci_dev_lock(hdev);
1722
1723 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1724 if (conn)
1725 conn->rssi = rp->rssi;
1726
1727 hci_dev_unlock(hdev);
1728 }
1729
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1730 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1731 {
1732 struct hci_cp_read_tx_power *sent;
1733 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1734 struct hci_conn *conn;
1735
1736 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1737
1738 if (rp->status)
1739 return;
1740
1741 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1742 if (!sent)
1743 return;
1744
1745 hci_dev_lock(hdev);
1746
1747 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1748 if (!conn)
1749 goto unlock;
1750
1751 switch (sent->type) {
1752 case 0x00:
1753 conn->tx_power = rp->tx_power;
1754 break;
1755 case 0x01:
1756 conn->max_tx_power = rp->tx_power;
1757 break;
1758 }
1759
1760 unlock:
1761 hci_dev_unlock(hdev);
1762 }
1763
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1764 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1765 {
1766 u8 status = *((u8 *) skb->data);
1767 u8 *mode;
1768
1769 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1770
1771 if (status)
1772 return;
1773
1774 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1775 if (mode)
1776 hdev->ssp_debug_mode = *mode;
1777 }
1778
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1779 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1780 {
1781 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1782
1783 if (status) {
1784 hci_conn_check_pending(hdev);
1785 return;
1786 }
1787
1788 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
1789 set_bit(HCI_INQUIRY, &hdev->flags);
1790 }
1791
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1792 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1793 {
1794 struct hci_cp_create_conn *cp;
1795 struct hci_conn *conn;
1796
1797 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1798
1799 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1800 if (!cp)
1801 return;
1802
1803 hci_dev_lock(hdev);
1804
1805 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1806
1807 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1808
1809 if (status) {
1810 if (conn && conn->state == BT_CONNECT) {
1811 if (status != 0x0c || conn->attempt > 2) {
1812 conn->state = BT_CLOSED;
1813 hci_connect_cfm(conn, status);
1814 hci_conn_del(conn);
1815 } else
1816 conn->state = BT_CONNECT2;
1817 }
1818 } else {
1819 if (!conn) {
1820 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1821 HCI_ROLE_MASTER);
1822 if (!conn)
1823 bt_dev_err(hdev, "no memory for new connection");
1824 }
1825 }
1826
1827 hci_dev_unlock(hdev);
1828 }
1829
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1830 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1831 {
1832 struct hci_cp_add_sco *cp;
1833 struct hci_conn *acl, *sco;
1834 __u16 handle;
1835
1836 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1837
1838 if (!status)
1839 return;
1840
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1842 if (!cp)
1843 return;
1844
1845 handle = __le16_to_cpu(cp->handle);
1846
1847 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1848
1849 hci_dev_lock(hdev);
1850
1851 acl = hci_conn_hash_lookup_handle(hdev, handle);
1852 if (acl) {
1853 sco = acl->link;
1854 if (sco) {
1855 sco->state = BT_CLOSED;
1856
1857 hci_connect_cfm(sco, status);
1858 hci_conn_del(sco);
1859 }
1860 }
1861
1862 hci_dev_unlock(hdev);
1863 }
1864
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1865 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1866 {
1867 struct hci_cp_auth_requested *cp;
1868 struct hci_conn *conn;
1869
1870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1871
1872 if (!status)
1873 return;
1874
1875 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1876 if (!cp)
1877 return;
1878
1879 hci_dev_lock(hdev);
1880
1881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1882 if (conn) {
1883 if (conn->state == BT_CONFIG) {
1884 hci_connect_cfm(conn, status);
1885 hci_conn_drop(conn);
1886 }
1887 }
1888
1889 hci_dev_unlock(hdev);
1890 }
1891
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1892 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1893 {
1894 struct hci_cp_set_conn_encrypt *cp;
1895 struct hci_conn *conn;
1896
1897 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1898
1899 if (!status)
1900 return;
1901
1902 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1903 if (!cp)
1904 return;
1905
1906 hci_dev_lock(hdev);
1907
1908 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1909 if (conn) {
1910 if (conn->state == BT_CONFIG) {
1911 hci_connect_cfm(conn, status);
1912 hci_conn_drop(conn);
1913 }
1914 }
1915
1916 hci_dev_unlock(hdev);
1917 }
1918
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1919 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1920 struct hci_conn *conn)
1921 {
1922 if (conn->state != BT_CONFIG || !conn->out)
1923 return 0;
1924
1925 if (conn->pending_sec_level == BT_SECURITY_SDP)
1926 return 0;
1927
1928 /* Only request authentication for SSP connections or non-SSP
1929 * devices with sec_level MEDIUM or HIGH or if MITM protection
1930 * is requested.
1931 */
1932 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1933 conn->pending_sec_level != BT_SECURITY_FIPS &&
1934 conn->pending_sec_level != BT_SECURITY_HIGH &&
1935 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1936 return 0;
1937
1938 return 1;
1939 }
1940
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1941 static int hci_resolve_name(struct hci_dev *hdev,
1942 struct inquiry_entry *e)
1943 {
1944 struct hci_cp_remote_name_req cp;
1945
1946 memset(&cp, 0, sizeof(cp));
1947
1948 bacpy(&cp.bdaddr, &e->data.bdaddr);
1949 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1950 cp.pscan_mode = e->data.pscan_mode;
1951 cp.clock_offset = e->data.clock_offset;
1952
1953 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1954 }
1955
hci_resolve_next_name(struct hci_dev * hdev)1956 static bool hci_resolve_next_name(struct hci_dev *hdev)
1957 {
1958 struct discovery_state *discov = &hdev->discovery;
1959 struct inquiry_entry *e;
1960
1961 if (list_empty(&discov->resolve))
1962 return false;
1963
1964 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1965 if (!e)
1966 return false;
1967
1968 if (hci_resolve_name(hdev, e) == 0) {
1969 e->name_state = NAME_PENDING;
1970 return true;
1971 }
1972
1973 return false;
1974 }
1975
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)1976 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1977 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1978 {
1979 struct discovery_state *discov = &hdev->discovery;
1980 struct inquiry_entry *e;
1981
1982 /* Update the mgmt connected state if necessary. Be careful with
1983 * conn objects that exist but are not (yet) connected however.
1984 * Only those in BT_CONFIG or BT_CONNECTED states can be
1985 * considered connected.
1986 */
1987 if (conn &&
1988 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1989 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1990 mgmt_device_connected(hdev, conn, 0, name, name_len);
1991
1992 if (discov->state == DISCOVERY_STOPPED)
1993 return;
1994
1995 if (discov->state == DISCOVERY_STOPPING)
1996 goto discov_complete;
1997
1998 if (discov->state != DISCOVERY_RESOLVING)
1999 return;
2000
2001 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2002 /* If the device was not found in a list of found devices names of which
2003 * are pending. there is no need to continue resolving a next name as it
2004 * will be done upon receiving another Remote Name Request Complete
2005 * Event */
2006 if (!e)
2007 return;
2008
2009 list_del(&e->list);
2010 if (name) {
2011 e->name_state = NAME_KNOWN;
2012 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2013 e->data.rssi, name, name_len);
2014 } else {
2015 e->name_state = NAME_NOT_KNOWN;
2016 }
2017
2018 if (hci_resolve_next_name(hdev))
2019 return;
2020
2021 discov_complete:
2022 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2023 }
2024
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2025 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2026 {
2027 struct hci_cp_remote_name_req *cp;
2028 struct hci_conn *conn;
2029
2030 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2031
2032 /* If successful wait for the name req complete event before
2033 * checking for the need to do authentication */
2034 if (!status)
2035 return;
2036
2037 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2038 if (!cp)
2039 return;
2040
2041 hci_dev_lock(hdev);
2042
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2044
2045 if (hci_dev_test_flag(hdev, HCI_MGMT))
2046 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2047
2048 if (!conn)
2049 goto unlock;
2050
2051 if (!hci_outgoing_auth_needed(hdev, conn))
2052 goto unlock;
2053
2054 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2055 struct hci_cp_auth_requested auth_cp;
2056
2057 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2058
2059 auth_cp.handle = __cpu_to_le16(conn->handle);
2060 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2061 sizeof(auth_cp), &auth_cp);
2062 }
2063
2064 unlock:
2065 hci_dev_unlock(hdev);
2066 }
2067
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2068 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2069 {
2070 struct hci_cp_read_remote_features *cp;
2071 struct hci_conn *conn;
2072
2073 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2074
2075 if (!status)
2076 return;
2077
2078 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2079 if (!cp)
2080 return;
2081
2082 hci_dev_lock(hdev);
2083
2084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2085 if (conn) {
2086 if (conn->state == BT_CONFIG) {
2087 hci_connect_cfm(conn, status);
2088 hci_conn_drop(conn);
2089 }
2090 }
2091
2092 hci_dev_unlock(hdev);
2093 }
2094
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2095 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2096 {
2097 struct hci_cp_read_remote_ext_features *cp;
2098 struct hci_conn *conn;
2099
2100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2101
2102 if (!status)
2103 return;
2104
2105 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2106 if (!cp)
2107 return;
2108
2109 hci_dev_lock(hdev);
2110
2111 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2112 if (conn) {
2113 if (conn->state == BT_CONFIG) {
2114 hci_connect_cfm(conn, status);
2115 hci_conn_drop(conn);
2116 }
2117 }
2118
2119 hci_dev_unlock(hdev);
2120 }
2121
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2122 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2123 {
2124 struct hci_cp_setup_sync_conn *cp;
2125 struct hci_conn *acl, *sco;
2126 __u16 handle;
2127
2128 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2129
2130 if (!status)
2131 return;
2132
2133 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2134 if (!cp)
2135 return;
2136
2137 handle = __le16_to_cpu(cp->handle);
2138
2139 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2140
2141 hci_dev_lock(hdev);
2142
2143 acl = hci_conn_hash_lookup_handle(hdev, handle);
2144 if (acl) {
2145 sco = acl->link;
2146 if (sco) {
2147 sco->state = BT_CLOSED;
2148
2149 hci_connect_cfm(sco, status);
2150 hci_conn_del(sco);
2151 }
2152 }
2153
2154 hci_dev_unlock(hdev);
2155 }
2156
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2157 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2158 {
2159 struct hci_cp_sniff_mode *cp;
2160 struct hci_conn *conn;
2161
2162 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2163
2164 if (!status)
2165 return;
2166
2167 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2168 if (!cp)
2169 return;
2170
2171 hci_dev_lock(hdev);
2172
2173 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2174 if (conn) {
2175 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2176
2177 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2178 hci_sco_setup(conn, status);
2179 }
2180
2181 hci_dev_unlock(hdev);
2182 }
2183
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2184 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2185 {
2186 struct hci_cp_exit_sniff_mode *cp;
2187 struct hci_conn *conn;
2188
2189 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2190
2191 if (!status)
2192 return;
2193
2194 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2195 if (!cp)
2196 return;
2197
2198 hci_dev_lock(hdev);
2199
2200 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2201 if (conn) {
2202 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2203
2204 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2205 hci_sco_setup(conn, status);
2206 }
2207
2208 hci_dev_unlock(hdev);
2209 }
2210
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2211 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2212 {
2213 struct hci_cp_disconnect *cp;
2214 struct hci_conn *conn;
2215
2216 if (!status)
2217 return;
2218
2219 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2220 if (!cp)
2221 return;
2222
2223 hci_dev_lock(hdev);
2224
2225 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2226 if (conn)
2227 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2228 conn->dst_type, status);
2229
2230 hci_dev_unlock(hdev);
2231 }
2232
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2233 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2234 u8 peer_addr_type, u8 own_address_type,
2235 u8 filter_policy)
2236 {
2237 struct hci_conn *conn;
2238
2239 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2240 peer_addr_type);
2241 if (!conn)
2242 return;
2243
2244 /* Store the initiator and responder address information which
2245 * is needed for SMP. These values will not change during the
2246 * lifetime of the connection.
2247 */
2248 conn->init_addr_type = own_address_type;
2249 if (own_address_type == ADDR_LE_DEV_RANDOM)
2250 bacpy(&conn->init_addr, &hdev->random_addr);
2251 else
2252 bacpy(&conn->init_addr, &hdev->bdaddr);
2253
2254 conn->resp_addr_type = peer_addr_type;
2255 bacpy(&conn->resp_addr, peer_addr);
2256
2257 /* We don't want the connection attempt to stick around
2258 * indefinitely since LE doesn't have a page timeout concept
2259 * like BR/EDR. Set a timer for any connection that doesn't use
2260 * the white list for connecting.
2261 */
2262 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2263 queue_delayed_work(conn->hdev->workqueue,
2264 &conn->le_conn_timeout,
2265 conn->conn_timeout);
2266 }
2267
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2268 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2269 {
2270 struct hci_cp_le_create_conn *cp;
2271
2272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2273
2274 /* All connection failure handling is taken care of by the
2275 * hci_le_conn_failed function which is triggered by the HCI
2276 * request completion callbacks used for connecting.
2277 */
2278 if (status)
2279 return;
2280
2281 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2282 if (!cp)
2283 return;
2284
2285 hci_dev_lock(hdev);
2286
2287 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2288 cp->own_address_type, cp->filter_policy);
2289
2290 hci_dev_unlock(hdev);
2291 }
2292
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2293 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2294 {
2295 struct hci_cp_le_ext_create_conn *cp;
2296
2297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2298
2299 /* All connection failure handling is taken care of by the
2300 * hci_le_conn_failed function which is triggered by the HCI
2301 * request completion callbacks used for connecting.
2302 */
2303 if (status)
2304 return;
2305
2306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2307 if (!cp)
2308 return;
2309
2310 hci_dev_lock(hdev);
2311
2312 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2313 cp->own_addr_type, cp->filter_policy);
2314
2315 hci_dev_unlock(hdev);
2316 }
2317
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2318 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2319 {
2320 struct hci_cp_le_read_remote_features *cp;
2321 struct hci_conn *conn;
2322
2323 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2324
2325 if (!status)
2326 return;
2327
2328 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2329 if (!cp)
2330 return;
2331
2332 hci_dev_lock(hdev);
2333
2334 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2335 if (conn) {
2336 if (conn->state == BT_CONFIG) {
2337 hci_connect_cfm(conn, status);
2338 hci_conn_drop(conn);
2339 }
2340 }
2341
2342 hci_dev_unlock(hdev);
2343 }
2344
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2345 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2346 {
2347 struct hci_cp_le_start_enc *cp;
2348 struct hci_conn *conn;
2349
2350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2351
2352 if (!status)
2353 return;
2354
2355 hci_dev_lock(hdev);
2356
2357 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2358 if (!cp)
2359 goto unlock;
2360
2361 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2362 if (!conn)
2363 goto unlock;
2364
2365 if (conn->state != BT_CONNECTED)
2366 goto unlock;
2367
2368 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2369 hci_conn_drop(conn);
2370
2371 unlock:
2372 hci_dev_unlock(hdev);
2373 }
2374
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2375 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2376 {
2377 struct hci_cp_switch_role *cp;
2378 struct hci_conn *conn;
2379
2380 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2381
2382 if (!status)
2383 return;
2384
2385 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2386 if (!cp)
2387 return;
2388
2389 hci_dev_lock(hdev);
2390
2391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2392 if (conn)
2393 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2394
2395 hci_dev_unlock(hdev);
2396 }
2397
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2398 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2399 {
2400 __u8 status = *((__u8 *) skb->data);
2401 struct discovery_state *discov = &hdev->discovery;
2402 struct inquiry_entry *e;
2403
2404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2405
2406 hci_conn_check_pending(hdev);
2407
2408 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2409 return;
2410
2411 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2412 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2413
2414 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2415 return;
2416
2417 hci_dev_lock(hdev);
2418
2419 if (discov->state != DISCOVERY_FINDING)
2420 goto unlock;
2421
2422 if (list_empty(&discov->resolve)) {
2423 /* When BR/EDR inquiry is active and no LE scanning is in
2424 * progress, then change discovery state to indicate completion.
2425 *
2426 * When running LE scanning and BR/EDR inquiry simultaneously
2427 * and the LE scan already finished, then change the discovery
2428 * state to indicate completion.
2429 */
2430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2431 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2432 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2433 goto unlock;
2434 }
2435
2436 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2437 if (e && hci_resolve_name(hdev, e) == 0) {
2438 e->name_state = NAME_PENDING;
2439 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2440 } else {
2441 /* When BR/EDR inquiry is active and no LE scanning is in
2442 * progress, then change discovery state to indicate completion.
2443 *
2444 * When running LE scanning and BR/EDR inquiry simultaneously
2445 * and the LE scan already finished, then change the discovery
2446 * state to indicate completion.
2447 */
2448 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2449 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2450 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2451 }
2452
2453 unlock:
2454 hci_dev_unlock(hdev);
2455 }
2456
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2457 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 struct inquiry_data data;
2460 struct inquiry_info *info = (void *) (skb->data + 1);
2461 int num_rsp = *((__u8 *) skb->data);
2462
2463 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2464
2465 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2466 return;
2467
2468 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2469 return;
2470
2471 hci_dev_lock(hdev);
2472
2473 for (; num_rsp; num_rsp--, info++) {
2474 u32 flags;
2475
2476 bacpy(&data.bdaddr, &info->bdaddr);
2477 data.pscan_rep_mode = info->pscan_rep_mode;
2478 data.pscan_period_mode = info->pscan_period_mode;
2479 data.pscan_mode = info->pscan_mode;
2480 memcpy(data.dev_class, info->dev_class, 3);
2481 data.clock_offset = info->clock_offset;
2482 data.rssi = HCI_RSSI_INVALID;
2483 data.ssp_mode = 0x00;
2484
2485 flags = hci_inquiry_cache_update(hdev, &data, false);
2486
2487 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2488 info->dev_class, HCI_RSSI_INVALID,
2489 flags, NULL, 0, NULL, 0);
2490 }
2491
2492 hci_dev_unlock(hdev);
2493 }
2494
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2495 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2496 {
2497 struct hci_ev_conn_complete *ev = (void *) skb->data;
2498 struct hci_conn *conn;
2499
2500 BT_DBG("%s", hdev->name);
2501
2502 hci_dev_lock(hdev);
2503
2504 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2505 if (!conn) {
2506 if (ev->link_type != SCO_LINK)
2507 goto unlock;
2508
2509 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2510 if (!conn)
2511 goto unlock;
2512
2513 conn->type = SCO_LINK;
2514 }
2515
2516 if (!ev->status) {
2517 conn->handle = __le16_to_cpu(ev->handle);
2518
2519 if (conn->type == ACL_LINK) {
2520 conn->state = BT_CONFIG;
2521 hci_conn_hold(conn);
2522
2523 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2524 !hci_find_link_key(hdev, &ev->bdaddr))
2525 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2526 else
2527 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2528 } else
2529 conn->state = BT_CONNECTED;
2530
2531 hci_debugfs_create_conn(conn);
2532 hci_conn_add_sysfs(conn);
2533
2534 if (test_bit(HCI_AUTH, &hdev->flags))
2535 set_bit(HCI_CONN_AUTH, &conn->flags);
2536
2537 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2538 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2539
2540 /* "Link key request" completed ahead of "connect request" completes */
2541 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
2542 ev->link_type == ACL_LINK) {
2543 struct link_key *key;
2544 struct hci_cp_read_enc_key_size cp;
2545
2546 key = hci_find_link_key(hdev, &ev->bdaddr);
2547 if (key) {
2548 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2549
2550 if (!(hdev->commands[20] & 0x10)) {
2551 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2552 } else {
2553 cp.handle = cpu_to_le16(conn->handle);
2554 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
2555 sizeof(cp), &cp)) {
2556 bt_dev_err(hdev, "sending read key size failed");
2557 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2558 }
2559 }
2560
2561 hci_encrypt_cfm(conn, ev->status);
2562 }
2563 }
2564
2565 /* Get remote features */
2566 if (conn->type == ACL_LINK) {
2567 struct hci_cp_read_remote_features cp;
2568 cp.handle = ev->handle;
2569 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2570 sizeof(cp), &cp);
2571
2572 hci_req_update_scan(hdev);
2573 }
2574
2575 /* Set packet type for incoming connection */
2576 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2577 struct hci_cp_change_conn_ptype cp;
2578 cp.handle = ev->handle;
2579 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2580 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2581 &cp);
2582 }
2583 } else {
2584 conn->state = BT_CLOSED;
2585 if (conn->type == ACL_LINK)
2586 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2587 conn->dst_type, ev->status);
2588 }
2589
2590 if (conn->type == ACL_LINK)
2591 hci_sco_setup(conn, ev->status);
2592
2593 if (ev->status) {
2594 hci_connect_cfm(conn, ev->status);
2595 hci_conn_del(conn);
2596 } else if (ev->link_type != ACL_LINK)
2597 hci_connect_cfm(conn, ev->status);
2598
2599 unlock:
2600 hci_dev_unlock(hdev);
2601
2602 hci_conn_check_pending(hdev);
2603 }
2604
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2605 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2606 {
2607 struct hci_cp_reject_conn_req cp;
2608
2609 bacpy(&cp.bdaddr, bdaddr);
2610 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2611 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2612 }
2613
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2614 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2615 {
2616 struct hci_ev_conn_request *ev = (void *) skb->data;
2617 int mask = hdev->link_mode;
2618 struct inquiry_entry *ie;
2619 struct hci_conn *conn;
2620 __u8 flags = 0;
2621
2622 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2623 ev->link_type);
2624
2625 /* Reject incoming connection from device with same BD ADDR against
2626 * CVE-2020-26555
2627 */
2628 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2629 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2630 &ev->bdaddr);
2631 hci_reject_conn(hdev, &ev->bdaddr);
2632 return;
2633 }
2634
2635 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2636 &flags);
2637
2638 if (!(mask & HCI_LM_ACCEPT)) {
2639 hci_reject_conn(hdev, &ev->bdaddr);
2640 return;
2641 }
2642
2643 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2644 BDADDR_BREDR)) {
2645 hci_reject_conn(hdev, &ev->bdaddr);
2646 return;
2647 }
2648
2649 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2650 * connection. These features are only touched through mgmt so
2651 * only do the checks if HCI_MGMT is set.
2652 */
2653 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2654 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2655 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2656 BDADDR_BREDR)) {
2657 hci_reject_conn(hdev, &ev->bdaddr);
2658 return;
2659 }
2660
2661 /* Connection accepted */
2662
2663 hci_dev_lock(hdev);
2664
2665 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2666 if (ie)
2667 memcpy(ie->data.dev_class, ev->dev_class, 3);
2668
2669 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2670 &ev->bdaddr);
2671 if (!conn) {
2672 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2673 HCI_ROLE_SLAVE);
2674 if (!conn) {
2675 bt_dev_err(hdev, "no memory for new connection");
2676 hci_dev_unlock(hdev);
2677 return;
2678 }
2679 }
2680
2681 memcpy(conn->dev_class, ev->dev_class, 3);
2682
2683 hci_dev_unlock(hdev);
2684
2685 if (ev->link_type == ACL_LINK ||
2686 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2687 struct hci_cp_accept_conn_req cp;
2688 conn->state = BT_CONNECT;
2689
2690 bacpy(&cp.bdaddr, &ev->bdaddr);
2691
2692 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2693 cp.role = 0x00; /* Become master */
2694 else
2695 cp.role = 0x01; /* Remain slave */
2696
2697 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2698 } else if (!(flags & HCI_PROTO_DEFER)) {
2699 struct hci_cp_accept_sync_conn_req cp;
2700 conn->state = BT_CONNECT;
2701
2702 bacpy(&cp.bdaddr, &ev->bdaddr);
2703 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2704
2705 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2706 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2707 cp.max_latency = cpu_to_le16(0xffff);
2708 cp.content_format = cpu_to_le16(hdev->voice_setting);
2709 cp.retrans_effort = 0xff;
2710
2711 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2712 &cp);
2713 } else {
2714 conn->state = BT_CONNECT2;
2715 hci_connect_cfm(conn, 0);
2716 }
2717 }
2718
hci_to_mgmt_reason(u8 err)2719 static u8 hci_to_mgmt_reason(u8 err)
2720 {
2721 switch (err) {
2722 case HCI_ERROR_CONNECTION_TIMEOUT:
2723 return MGMT_DEV_DISCONN_TIMEOUT;
2724 case HCI_ERROR_REMOTE_USER_TERM:
2725 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2726 case HCI_ERROR_REMOTE_POWER_OFF:
2727 return MGMT_DEV_DISCONN_REMOTE;
2728 case HCI_ERROR_LOCAL_HOST_TERM:
2729 return MGMT_DEV_DISCONN_LOCAL_HOST;
2730 default:
2731 return MGMT_DEV_DISCONN_UNKNOWN;
2732 }
2733 }
2734
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2735 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736 {
2737 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2738 u8 reason;
2739 struct hci_conn_params *params;
2740 struct hci_conn *conn;
2741 bool mgmt_connected;
2742 u8 type;
2743
2744 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2745
2746 hci_dev_lock(hdev);
2747
2748 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2749 if (!conn)
2750 goto unlock;
2751
2752 if (ev->status) {
2753 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2754 conn->dst_type, ev->status);
2755 goto unlock;
2756 }
2757
2758 conn->state = BT_CLOSED;
2759
2760 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2761
2762 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2763 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2764 else
2765 reason = hci_to_mgmt_reason(ev->reason);
2766
2767 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2768 reason, mgmt_connected);
2769
2770 if (conn->type == ACL_LINK) {
2771 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2772 hci_remove_link_key(hdev, &conn->dst);
2773
2774 hci_req_update_scan(hdev);
2775 }
2776
2777 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2778 if (params) {
2779 switch (params->auto_connect) {
2780 case HCI_AUTO_CONN_LINK_LOSS:
2781 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2782 break;
2783 /* Fall through */
2784
2785 case HCI_AUTO_CONN_DIRECT:
2786 case HCI_AUTO_CONN_ALWAYS:
2787 list_del_init(¶ms->action);
2788 list_add(¶ms->action, &hdev->pend_le_conns);
2789 hci_update_background_scan(hdev);
2790 break;
2791
2792 default:
2793 break;
2794 }
2795 }
2796
2797 type = conn->type;
2798
2799 hci_disconn_cfm(conn, ev->reason);
2800 hci_conn_del(conn);
2801
2802 /* Re-enable advertising if necessary, since it might
2803 * have been disabled by the connection. From the
2804 * HCI_LE_Set_Advertise_Enable command description in
2805 * the core specification (v4.0):
2806 * "The Controller shall continue advertising until the Host
2807 * issues an LE_Set_Advertise_Enable command with
2808 * Advertising_Enable set to 0x00 (Advertising is disabled)
2809 * or until a connection is created or until the Advertising
2810 * is timed out due to Directed Advertising."
2811 */
2812 if (type == LE_LINK)
2813 hci_req_reenable_advertising(hdev);
2814
2815 unlock:
2816 hci_dev_unlock(hdev);
2817 }
2818
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2819 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820 {
2821 struct hci_ev_auth_complete *ev = (void *) skb->data;
2822 struct hci_conn *conn;
2823
2824 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2825
2826 hci_dev_lock(hdev);
2827
2828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2829 if (!conn)
2830 goto unlock;
2831
2832 if (!ev->status) {
2833 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2834 set_bit(HCI_CONN_AUTH, &conn->flags);
2835 conn->sec_level = conn->pending_sec_level;
2836 } else {
2837 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2838 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2839
2840 mgmt_auth_failed(conn, ev->status);
2841 }
2842
2843 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2844
2845 if (conn->state == BT_CONFIG) {
2846 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2847 struct hci_cp_set_conn_encrypt cp;
2848 cp.handle = ev->handle;
2849 cp.encrypt = 0x01;
2850 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2851 &cp);
2852 } else {
2853 conn->state = BT_CONNECTED;
2854 hci_connect_cfm(conn, ev->status);
2855 hci_conn_drop(conn);
2856 }
2857 } else {
2858 hci_auth_cfm(conn, ev->status);
2859
2860 hci_conn_hold(conn);
2861 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2862 hci_conn_drop(conn);
2863 }
2864
2865 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2866 if (!ev->status) {
2867 struct hci_cp_set_conn_encrypt cp;
2868 cp.handle = ev->handle;
2869 cp.encrypt = 0x01;
2870 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2871 &cp);
2872 } else {
2873 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2874 hci_encrypt_cfm(conn, ev->status);
2875 }
2876 }
2877
2878 unlock:
2879 hci_dev_unlock(hdev);
2880 }
2881
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2882 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2883 {
2884 struct hci_ev_remote_name *ev = (void *) skb->data;
2885 struct hci_conn *conn;
2886
2887 BT_DBG("%s", hdev->name);
2888
2889 hci_dev_lock(hdev);
2890
2891 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2892
2893 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2894 goto check_auth;
2895
2896 if (ev->status == 0)
2897 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2898 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2899 else
2900 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2901
2902 check_auth:
2903 if (!conn)
2904 goto unlock;
2905
2906 if (!hci_outgoing_auth_needed(hdev, conn))
2907 goto unlock;
2908
2909 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2910 struct hci_cp_auth_requested cp;
2911
2912 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2913
2914 cp.handle = __cpu_to_le16(conn->handle);
2915 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2916 }
2917
2918 unlock:
2919 hci_dev_unlock(hdev);
2920 }
2921
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)2922 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2923 u16 opcode, struct sk_buff *skb)
2924 {
2925 const struct hci_rp_read_enc_key_size *rp;
2926 struct hci_conn *conn;
2927 u16 handle;
2928
2929 BT_DBG("%s status 0x%02x", hdev->name, status);
2930
2931 if (!skb || skb->len < sizeof(*rp)) {
2932 bt_dev_err(hdev, "invalid read key size response");
2933 return;
2934 }
2935
2936 rp = (void *)skb->data;
2937 handle = le16_to_cpu(rp->handle);
2938
2939 hci_dev_lock(hdev);
2940
2941 conn = hci_conn_hash_lookup_handle(hdev, handle);
2942 if (!conn)
2943 goto unlock;
2944
2945 /* If we fail to read the encryption key size, assume maximum
2946 * (which is the same we do also when this HCI command isn't
2947 * supported.
2948 */
2949 if (rp->status) {
2950 bt_dev_err(hdev, "failed to read key size for handle %u",
2951 handle);
2952 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2953 } else {
2954 conn->enc_key_size = rp->key_size;
2955 }
2956
2957 hci_encrypt_cfm(conn, 0);
2958
2959 unlock:
2960 hci_dev_unlock(hdev);
2961 }
2962
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2963 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2964 {
2965 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2966 struct hci_conn *conn;
2967
2968 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2969
2970 hci_dev_lock(hdev);
2971
2972 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2973 if (!conn)
2974 goto unlock;
2975
2976 if (!ev->status) {
2977 if (ev->encrypt) {
2978 /* Encryption implies authentication */
2979 set_bit(HCI_CONN_AUTH, &conn->flags);
2980 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2981 conn->sec_level = conn->pending_sec_level;
2982
2983 /* P-256 authentication key implies FIPS */
2984 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2985 set_bit(HCI_CONN_FIPS, &conn->flags);
2986
2987 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2988 conn->type == LE_LINK)
2989 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2990 } else {
2991 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2992 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2993 }
2994 }
2995
2996 /* We should disregard the current RPA and generate a new one
2997 * whenever the encryption procedure fails.
2998 */
2999 if (ev->status && conn->type == LE_LINK) {
3000 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3001 hci_adv_instances_set_rpa_expired(hdev, true);
3002 }
3003
3004 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3005
3006 /* Check link security requirements are met */
3007 if (!hci_conn_check_link_mode(conn))
3008 ev->status = HCI_ERROR_AUTH_FAILURE;
3009
3010 if (ev->status && conn->state == BT_CONNECTED) {
3011 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3012 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3013
3014 /* Notify upper layers so they can cleanup before
3015 * disconnecting.
3016 */
3017 hci_encrypt_cfm(conn, ev->status);
3018 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3019 hci_conn_drop(conn);
3020 goto unlock;
3021 }
3022
3023 /* Try reading the encryption key size for encrypted ACL links */
3024 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3025 struct hci_cp_read_enc_key_size cp;
3026 struct hci_request req;
3027
3028 /* Only send HCI_Read_Encryption_Key_Size if the
3029 * controller really supports it. If it doesn't, assume
3030 * the default size (16).
3031 */
3032 if (!(hdev->commands[20] & 0x10)) {
3033 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3034 goto notify;
3035 }
3036
3037 hci_req_init(&req, hdev);
3038
3039 cp.handle = cpu_to_le16(conn->handle);
3040 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3041
3042 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3043 bt_dev_err(hdev, "sending read key size failed");
3044 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3045 goto notify;
3046 }
3047
3048 goto unlock;
3049 }
3050
3051 /* Set the default Authenticated Payload Timeout after
3052 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3053 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3054 * sent when the link is active and Encryption is enabled, the conn
3055 * type can be either LE or ACL and controller must support LMP Ping.
3056 * Ensure for AES-CCM encryption as well.
3057 */
3058 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3059 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3060 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3061 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3062 struct hci_cp_write_auth_payload_to cp;
3063
3064 cp.handle = cpu_to_le16(conn->handle);
3065 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3066 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3067 sizeof(cp), &cp);
3068 }
3069
3070 notify:
3071 hci_encrypt_cfm(conn, ev->status);
3072
3073 unlock:
3074 hci_dev_unlock(hdev);
3075 }
3076
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3077 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3078 struct sk_buff *skb)
3079 {
3080 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3081 struct hci_conn *conn;
3082
3083 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3084
3085 hci_dev_lock(hdev);
3086
3087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3088 if (conn) {
3089 if (!ev->status)
3090 set_bit(HCI_CONN_SECURE, &conn->flags);
3091
3092 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3093
3094 hci_key_change_cfm(conn, ev->status);
3095 }
3096
3097 hci_dev_unlock(hdev);
3098 }
3099
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3100 static void hci_remote_features_evt(struct hci_dev *hdev,
3101 struct sk_buff *skb)
3102 {
3103 struct hci_ev_remote_features *ev = (void *) skb->data;
3104 struct hci_conn *conn;
3105
3106 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3107
3108 hci_dev_lock(hdev);
3109
3110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3111 if (!conn)
3112 goto unlock;
3113
3114 if (!ev->status)
3115 memcpy(conn->features[0], ev->features, 8);
3116
3117 if (conn->state != BT_CONFIG)
3118 goto unlock;
3119
3120 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3121 lmp_ext_feat_capable(conn)) {
3122 struct hci_cp_read_remote_ext_features cp;
3123 cp.handle = ev->handle;
3124 cp.page = 0x01;
3125 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3126 sizeof(cp), &cp);
3127 goto unlock;
3128 }
3129
3130 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3131 struct hci_cp_remote_name_req cp;
3132 memset(&cp, 0, sizeof(cp));
3133 bacpy(&cp.bdaddr, &conn->dst);
3134 cp.pscan_rep_mode = 0x02;
3135 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3136 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3137 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3138
3139 if (!hci_outgoing_auth_needed(hdev, conn)) {
3140 conn->state = BT_CONNECTED;
3141 hci_connect_cfm(conn, ev->status);
3142 hci_conn_drop(conn);
3143 }
3144
3145 unlock:
3146 hci_dev_unlock(hdev);
3147 }
3148
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3149 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3150 u16 *opcode, u8 *status,
3151 hci_req_complete_t *req_complete,
3152 hci_req_complete_skb_t *req_complete_skb)
3153 {
3154 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3155
3156 *opcode = __le16_to_cpu(ev->opcode);
3157 *status = skb->data[sizeof(*ev)];
3158
3159 skb_pull(skb, sizeof(*ev));
3160
3161 switch (*opcode) {
3162 case HCI_OP_INQUIRY_CANCEL:
3163 hci_cc_inquiry_cancel(hdev, skb, status);
3164 break;
3165
3166 case HCI_OP_PERIODIC_INQ:
3167 hci_cc_periodic_inq(hdev, skb);
3168 break;
3169
3170 case HCI_OP_EXIT_PERIODIC_INQ:
3171 hci_cc_exit_periodic_inq(hdev, skb);
3172 break;
3173
3174 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3175 hci_cc_remote_name_req_cancel(hdev, skb);
3176 break;
3177
3178 case HCI_OP_ROLE_DISCOVERY:
3179 hci_cc_role_discovery(hdev, skb);
3180 break;
3181
3182 case HCI_OP_READ_LINK_POLICY:
3183 hci_cc_read_link_policy(hdev, skb);
3184 break;
3185
3186 case HCI_OP_WRITE_LINK_POLICY:
3187 hci_cc_write_link_policy(hdev, skb);
3188 break;
3189
3190 case HCI_OP_READ_DEF_LINK_POLICY:
3191 hci_cc_read_def_link_policy(hdev, skb);
3192 break;
3193
3194 case HCI_OP_WRITE_DEF_LINK_POLICY:
3195 hci_cc_write_def_link_policy(hdev, skb);
3196 break;
3197
3198 case HCI_OP_RESET:
3199 hci_cc_reset(hdev, skb);
3200 break;
3201
3202 case HCI_OP_READ_STORED_LINK_KEY:
3203 hci_cc_read_stored_link_key(hdev, skb);
3204 break;
3205
3206 case HCI_OP_DELETE_STORED_LINK_KEY:
3207 hci_cc_delete_stored_link_key(hdev, skb);
3208 break;
3209
3210 case HCI_OP_WRITE_LOCAL_NAME:
3211 hci_cc_write_local_name(hdev, skb);
3212 break;
3213
3214 case HCI_OP_READ_LOCAL_NAME:
3215 hci_cc_read_local_name(hdev, skb);
3216 break;
3217
3218 case HCI_OP_WRITE_AUTH_ENABLE:
3219 hci_cc_write_auth_enable(hdev, skb);
3220 break;
3221
3222 case HCI_OP_WRITE_ENCRYPT_MODE:
3223 hci_cc_write_encrypt_mode(hdev, skb);
3224 break;
3225
3226 case HCI_OP_WRITE_SCAN_ENABLE:
3227 hci_cc_write_scan_enable(hdev, skb);
3228 break;
3229
3230 case HCI_OP_READ_CLASS_OF_DEV:
3231 hci_cc_read_class_of_dev(hdev, skb);
3232 break;
3233
3234 case HCI_OP_WRITE_CLASS_OF_DEV:
3235 hci_cc_write_class_of_dev(hdev, skb);
3236 break;
3237
3238 case HCI_OP_READ_VOICE_SETTING:
3239 hci_cc_read_voice_setting(hdev, skb);
3240 break;
3241
3242 case HCI_OP_WRITE_VOICE_SETTING:
3243 hci_cc_write_voice_setting(hdev, skb);
3244 break;
3245
3246 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3247 hci_cc_read_num_supported_iac(hdev, skb);
3248 break;
3249
3250 case HCI_OP_WRITE_SSP_MODE:
3251 hci_cc_write_ssp_mode(hdev, skb);
3252 break;
3253
3254 case HCI_OP_WRITE_SC_SUPPORT:
3255 hci_cc_write_sc_support(hdev, skb);
3256 break;
3257
3258 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3259 hci_cc_read_auth_payload_timeout(hdev, skb);
3260 break;
3261
3262 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3263 hci_cc_write_auth_payload_timeout(hdev, skb);
3264 break;
3265
3266 case HCI_OP_READ_LOCAL_VERSION:
3267 hci_cc_read_local_version(hdev, skb);
3268 break;
3269
3270 case HCI_OP_READ_LOCAL_COMMANDS:
3271 hci_cc_read_local_commands(hdev, skb);
3272 break;
3273
3274 case HCI_OP_READ_LOCAL_FEATURES:
3275 hci_cc_read_local_features(hdev, skb);
3276 break;
3277
3278 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3279 hci_cc_read_local_ext_features(hdev, skb);
3280 break;
3281
3282 case HCI_OP_READ_BUFFER_SIZE:
3283 hci_cc_read_buffer_size(hdev, skb);
3284 break;
3285
3286 case HCI_OP_READ_BD_ADDR:
3287 hci_cc_read_bd_addr(hdev, skb);
3288 break;
3289
3290 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3291 hci_cc_read_page_scan_activity(hdev, skb);
3292 break;
3293
3294 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3295 hci_cc_write_page_scan_activity(hdev, skb);
3296 break;
3297
3298 case HCI_OP_READ_PAGE_SCAN_TYPE:
3299 hci_cc_read_page_scan_type(hdev, skb);
3300 break;
3301
3302 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3303 hci_cc_write_page_scan_type(hdev, skb);
3304 break;
3305
3306 case HCI_OP_READ_DATA_BLOCK_SIZE:
3307 hci_cc_read_data_block_size(hdev, skb);
3308 break;
3309
3310 case HCI_OP_READ_FLOW_CONTROL_MODE:
3311 hci_cc_read_flow_control_mode(hdev, skb);
3312 break;
3313
3314 case HCI_OP_READ_LOCAL_AMP_INFO:
3315 hci_cc_read_local_amp_info(hdev, skb);
3316 break;
3317
3318 case HCI_OP_READ_CLOCK:
3319 hci_cc_read_clock(hdev, skb);
3320 break;
3321
3322 case HCI_OP_READ_INQ_RSP_TX_POWER:
3323 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3324 break;
3325
3326 case HCI_OP_PIN_CODE_REPLY:
3327 hci_cc_pin_code_reply(hdev, skb);
3328 break;
3329
3330 case HCI_OP_PIN_CODE_NEG_REPLY:
3331 hci_cc_pin_code_neg_reply(hdev, skb);
3332 break;
3333
3334 case HCI_OP_READ_LOCAL_OOB_DATA:
3335 hci_cc_read_local_oob_data(hdev, skb);
3336 break;
3337
3338 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3339 hci_cc_read_local_oob_ext_data(hdev, skb);
3340 break;
3341
3342 case HCI_OP_LE_READ_BUFFER_SIZE:
3343 hci_cc_le_read_buffer_size(hdev, skb);
3344 break;
3345
3346 case HCI_OP_LE_READ_LOCAL_FEATURES:
3347 hci_cc_le_read_local_features(hdev, skb);
3348 break;
3349
3350 case HCI_OP_LE_READ_ADV_TX_POWER:
3351 hci_cc_le_read_adv_tx_power(hdev, skb);
3352 break;
3353
3354 case HCI_OP_USER_CONFIRM_REPLY:
3355 hci_cc_user_confirm_reply(hdev, skb);
3356 break;
3357
3358 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3359 hci_cc_user_confirm_neg_reply(hdev, skb);
3360 break;
3361
3362 case HCI_OP_USER_PASSKEY_REPLY:
3363 hci_cc_user_passkey_reply(hdev, skb);
3364 break;
3365
3366 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3367 hci_cc_user_passkey_neg_reply(hdev, skb);
3368 break;
3369
3370 case HCI_OP_LE_SET_RANDOM_ADDR:
3371 hci_cc_le_set_random_addr(hdev, skb);
3372 break;
3373
3374 case HCI_OP_LE_SET_ADV_ENABLE:
3375 hci_cc_le_set_adv_enable(hdev, skb);
3376 break;
3377
3378 case HCI_OP_LE_SET_SCAN_PARAM:
3379 hci_cc_le_set_scan_param(hdev, skb);
3380 break;
3381
3382 case HCI_OP_LE_SET_SCAN_ENABLE:
3383 hci_cc_le_set_scan_enable(hdev, skb);
3384 break;
3385
3386 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3387 hci_cc_le_read_white_list_size(hdev, skb);
3388 break;
3389
3390 case HCI_OP_LE_CLEAR_WHITE_LIST:
3391 hci_cc_le_clear_white_list(hdev, skb);
3392 break;
3393
3394 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3395 hci_cc_le_add_to_white_list(hdev, skb);
3396 break;
3397
3398 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3399 hci_cc_le_del_from_white_list(hdev, skb);
3400 break;
3401
3402 case HCI_OP_LE_READ_SUPPORTED_STATES:
3403 hci_cc_le_read_supported_states(hdev, skb);
3404 break;
3405
3406 case HCI_OP_LE_READ_DEF_DATA_LEN:
3407 hci_cc_le_read_def_data_len(hdev, skb);
3408 break;
3409
3410 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3411 hci_cc_le_write_def_data_len(hdev, skb);
3412 break;
3413
3414 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3415 hci_cc_le_add_to_resolv_list(hdev, skb);
3416 break;
3417
3418 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3419 hci_cc_le_del_from_resolv_list(hdev, skb);
3420 break;
3421
3422 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3423 hci_cc_le_clear_resolv_list(hdev, skb);
3424 break;
3425
3426 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3427 hci_cc_le_read_resolv_list_size(hdev, skb);
3428 break;
3429
3430 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3431 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3432 break;
3433
3434 case HCI_OP_LE_READ_MAX_DATA_LEN:
3435 hci_cc_le_read_max_data_len(hdev, skb);
3436 break;
3437
3438 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3439 hci_cc_write_le_host_supported(hdev, skb);
3440 break;
3441
3442 case HCI_OP_LE_SET_ADV_PARAM:
3443 hci_cc_set_adv_param(hdev, skb);
3444 break;
3445
3446 case HCI_OP_READ_RSSI:
3447 hci_cc_read_rssi(hdev, skb);
3448 break;
3449
3450 case HCI_OP_READ_TX_POWER:
3451 hci_cc_read_tx_power(hdev, skb);
3452 break;
3453
3454 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3455 hci_cc_write_ssp_debug_mode(hdev, skb);
3456 break;
3457
3458 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3459 hci_cc_le_set_ext_scan_param(hdev, skb);
3460 break;
3461
3462 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3463 hci_cc_le_set_ext_scan_enable(hdev, skb);
3464 break;
3465
3466 case HCI_OP_LE_SET_DEFAULT_PHY:
3467 hci_cc_le_set_default_phy(hdev, skb);
3468 break;
3469
3470 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3471 hci_cc_le_read_num_adv_sets(hdev, skb);
3472 break;
3473
3474 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3475 hci_cc_set_ext_adv_param(hdev, skb);
3476 break;
3477
3478 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3479 hci_cc_le_set_ext_adv_enable(hdev, skb);
3480 break;
3481
3482 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3483 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3484 break;
3485
3486 default:
3487 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3488 break;
3489 }
3490
3491 if (*opcode != HCI_OP_NOP)
3492 cancel_delayed_work(&hdev->cmd_timer);
3493
3494 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3495 atomic_set(&hdev->cmd_cnt, 1);
3496
3497 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3498 req_complete_skb);
3499
3500 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3501 bt_dev_err(hdev,
3502 "unexpected event for opcode 0x%4.4x", *opcode);
3503 return;
3504 }
3505
3506 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3507 queue_work(hdev->workqueue, &hdev->cmd_work);
3508 }
3509
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3510 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3511 u16 *opcode, u8 *status,
3512 hci_req_complete_t *req_complete,
3513 hci_req_complete_skb_t *req_complete_skb)
3514 {
3515 struct hci_ev_cmd_status *ev = (void *) skb->data;
3516
3517 skb_pull(skb, sizeof(*ev));
3518
3519 *opcode = __le16_to_cpu(ev->opcode);
3520 *status = ev->status;
3521
3522 switch (*opcode) {
3523 case HCI_OP_INQUIRY:
3524 hci_cs_inquiry(hdev, ev->status);
3525 break;
3526
3527 case HCI_OP_CREATE_CONN:
3528 hci_cs_create_conn(hdev, ev->status);
3529 break;
3530
3531 case HCI_OP_DISCONNECT:
3532 hci_cs_disconnect(hdev, ev->status);
3533 break;
3534
3535 case HCI_OP_ADD_SCO:
3536 hci_cs_add_sco(hdev, ev->status);
3537 break;
3538
3539 case HCI_OP_AUTH_REQUESTED:
3540 hci_cs_auth_requested(hdev, ev->status);
3541 break;
3542
3543 case HCI_OP_SET_CONN_ENCRYPT:
3544 hci_cs_set_conn_encrypt(hdev, ev->status);
3545 break;
3546
3547 case HCI_OP_REMOTE_NAME_REQ:
3548 hci_cs_remote_name_req(hdev, ev->status);
3549 break;
3550
3551 case HCI_OP_READ_REMOTE_FEATURES:
3552 hci_cs_read_remote_features(hdev, ev->status);
3553 break;
3554
3555 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3556 hci_cs_read_remote_ext_features(hdev, ev->status);
3557 break;
3558
3559 case HCI_OP_SETUP_SYNC_CONN:
3560 hci_cs_setup_sync_conn(hdev, ev->status);
3561 break;
3562
3563 case HCI_OP_SNIFF_MODE:
3564 hci_cs_sniff_mode(hdev, ev->status);
3565 break;
3566
3567 case HCI_OP_EXIT_SNIFF_MODE:
3568 hci_cs_exit_sniff_mode(hdev, ev->status);
3569 break;
3570
3571 case HCI_OP_SWITCH_ROLE:
3572 hci_cs_switch_role(hdev, ev->status);
3573 break;
3574
3575 case HCI_OP_LE_CREATE_CONN:
3576 hci_cs_le_create_conn(hdev, ev->status);
3577 break;
3578
3579 case HCI_OP_LE_READ_REMOTE_FEATURES:
3580 hci_cs_le_read_remote_features(hdev, ev->status);
3581 break;
3582
3583 case HCI_OP_LE_START_ENC:
3584 hci_cs_le_start_enc(hdev, ev->status);
3585 break;
3586
3587 case HCI_OP_LE_EXT_CREATE_CONN:
3588 hci_cs_le_ext_create_conn(hdev, ev->status);
3589 break;
3590
3591 default:
3592 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3593 break;
3594 }
3595
3596 if (*opcode != HCI_OP_NOP)
3597 cancel_delayed_work(&hdev->cmd_timer);
3598
3599 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3600 atomic_set(&hdev->cmd_cnt, 1);
3601
3602 /* Indicate request completion if the command failed. Also, if
3603 * we're not waiting for a special event and we get a success
3604 * command status we should try to flag the request as completed
3605 * (since for this kind of commands there will not be a command
3606 * complete event).
3607 */
3608 if (ev->status ||
3609 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3610 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3611 req_complete_skb);
3612
3613 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3614 bt_dev_err(hdev,
3615 "unexpected event for opcode 0x%4.4x", *opcode);
3616 return;
3617 }
3618
3619 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3620 queue_work(hdev->workqueue, &hdev->cmd_work);
3621 }
3622
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3623 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3624 {
3625 struct hci_ev_hardware_error *ev = (void *) skb->data;
3626
3627 hdev->hw_error_code = ev->code;
3628
3629 queue_work(hdev->req_workqueue, &hdev->error_reset);
3630 }
3631
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3632 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3633 {
3634 struct hci_ev_role_change *ev = (void *) skb->data;
3635 struct hci_conn *conn;
3636
3637 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3638
3639 hci_dev_lock(hdev);
3640
3641 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3642 if (conn) {
3643 if (!ev->status)
3644 conn->role = ev->role;
3645
3646 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3647
3648 hci_role_switch_cfm(conn, ev->status, ev->role);
3649 }
3650
3651 hci_dev_unlock(hdev);
3652 }
3653
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3654 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3655 {
3656 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3657 int i;
3658
3659 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3660 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3661 return;
3662 }
3663
3664 if (skb->len < sizeof(*ev) ||
3665 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3666 BT_DBG("%s bad parameters", hdev->name);
3667 return;
3668 }
3669
3670 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3671
3672 for (i = 0; i < ev->num_hndl; i++) {
3673 struct hci_comp_pkts_info *info = &ev->handles[i];
3674 struct hci_conn *conn;
3675 __u16 handle, count;
3676
3677 handle = __le16_to_cpu(info->handle);
3678 count = __le16_to_cpu(info->count);
3679
3680 conn = hci_conn_hash_lookup_handle(hdev, handle);
3681 if (!conn)
3682 continue;
3683
3684 conn->sent -= count;
3685
3686 switch (conn->type) {
3687 case ACL_LINK:
3688 hdev->acl_cnt += count;
3689 if (hdev->acl_cnt > hdev->acl_pkts)
3690 hdev->acl_cnt = hdev->acl_pkts;
3691 break;
3692
3693 case LE_LINK:
3694 if (hdev->le_pkts) {
3695 hdev->le_cnt += count;
3696 if (hdev->le_cnt > hdev->le_pkts)
3697 hdev->le_cnt = hdev->le_pkts;
3698 } else {
3699 hdev->acl_cnt += count;
3700 if (hdev->acl_cnt > hdev->acl_pkts)
3701 hdev->acl_cnt = hdev->acl_pkts;
3702 }
3703 break;
3704
3705 case SCO_LINK:
3706 hdev->sco_cnt += count;
3707 if (hdev->sco_cnt > hdev->sco_pkts)
3708 hdev->sco_cnt = hdev->sco_pkts;
3709 break;
3710
3711 default:
3712 bt_dev_err(hdev, "unknown type %d conn %p",
3713 conn->type, conn);
3714 break;
3715 }
3716 }
3717
3718 queue_work(hdev->workqueue, &hdev->tx_work);
3719 }
3720
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3721 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3722 __u16 handle)
3723 {
3724 struct hci_chan *chan;
3725
3726 switch (hdev->dev_type) {
3727 case HCI_PRIMARY:
3728 return hci_conn_hash_lookup_handle(hdev, handle);
3729 case HCI_AMP:
3730 chan = hci_chan_lookup_handle(hdev, handle);
3731 if (chan)
3732 return chan->conn;
3733 break;
3734 default:
3735 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3736 break;
3737 }
3738
3739 return NULL;
3740 }
3741
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3742 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3743 {
3744 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3745 int i;
3746
3747 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3748 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3749 return;
3750 }
3751
3752 if (skb->len < sizeof(*ev) ||
3753 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3754 BT_DBG("%s bad parameters", hdev->name);
3755 return;
3756 }
3757
3758 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3759 ev->num_hndl);
3760
3761 for (i = 0; i < ev->num_hndl; i++) {
3762 struct hci_comp_blocks_info *info = &ev->handles[i];
3763 struct hci_conn *conn = NULL;
3764 __u16 handle, block_count;
3765
3766 handle = __le16_to_cpu(info->handle);
3767 block_count = __le16_to_cpu(info->blocks);
3768
3769 conn = __hci_conn_lookup_handle(hdev, handle);
3770 if (!conn)
3771 continue;
3772
3773 conn->sent -= block_count;
3774
3775 switch (conn->type) {
3776 case ACL_LINK:
3777 case AMP_LINK:
3778 hdev->block_cnt += block_count;
3779 if (hdev->block_cnt > hdev->num_blocks)
3780 hdev->block_cnt = hdev->num_blocks;
3781 break;
3782
3783 default:
3784 bt_dev_err(hdev, "unknown type %d conn %p",
3785 conn->type, conn);
3786 break;
3787 }
3788 }
3789
3790 queue_work(hdev->workqueue, &hdev->tx_work);
3791 }
3792
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3793 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3794 {
3795 struct hci_ev_mode_change *ev = (void *) skb->data;
3796 struct hci_conn *conn;
3797
3798 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3799
3800 hci_dev_lock(hdev);
3801
3802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3803 if (conn) {
3804 conn->mode = ev->mode;
3805
3806 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3807 &conn->flags)) {
3808 if (conn->mode == HCI_CM_ACTIVE)
3809 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3810 else
3811 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3812 }
3813
3814 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3815 hci_sco_setup(conn, ev->status);
3816 }
3817
3818 hci_dev_unlock(hdev);
3819 }
3820
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3821 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3822 {
3823 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3824 struct hci_conn *conn;
3825
3826 BT_DBG("%s", hdev->name);
3827
3828 hci_dev_lock(hdev);
3829
3830 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3831 if (!conn)
3832 goto unlock;
3833
3834 if (conn->state == BT_CONNECTED) {
3835 hci_conn_hold(conn);
3836 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3837 hci_conn_drop(conn);
3838 }
3839
3840 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3841 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3842 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3843 sizeof(ev->bdaddr), &ev->bdaddr);
3844 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3845 u8 secure;
3846
3847 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3848 secure = 1;
3849 else
3850 secure = 0;
3851
3852 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3853 }
3854
3855 unlock:
3856 hci_dev_unlock(hdev);
3857 }
3858
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)3859 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3860 {
3861 if (key_type == HCI_LK_CHANGED_COMBINATION)
3862 return;
3863
3864 conn->pin_length = pin_len;
3865 conn->key_type = key_type;
3866
3867 switch (key_type) {
3868 case HCI_LK_LOCAL_UNIT:
3869 case HCI_LK_REMOTE_UNIT:
3870 case HCI_LK_DEBUG_COMBINATION:
3871 return;
3872 case HCI_LK_COMBINATION:
3873 if (pin_len == 16)
3874 conn->pending_sec_level = BT_SECURITY_HIGH;
3875 else
3876 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3877 break;
3878 case HCI_LK_UNAUTH_COMBINATION_P192:
3879 case HCI_LK_UNAUTH_COMBINATION_P256:
3880 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3881 break;
3882 case HCI_LK_AUTH_COMBINATION_P192:
3883 conn->pending_sec_level = BT_SECURITY_HIGH;
3884 break;
3885 case HCI_LK_AUTH_COMBINATION_P256:
3886 conn->pending_sec_level = BT_SECURITY_FIPS;
3887 break;
3888 }
3889 }
3890
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3891 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3892 {
3893 struct hci_ev_link_key_req *ev = (void *) skb->data;
3894 struct hci_cp_link_key_reply cp;
3895 struct hci_conn *conn;
3896 struct link_key *key;
3897
3898 BT_DBG("%s", hdev->name);
3899
3900 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3901 return;
3902
3903 hci_dev_lock(hdev);
3904
3905 key = hci_find_link_key(hdev, &ev->bdaddr);
3906 if (!key) {
3907 BT_DBG("%s link key not found for %pMR", hdev->name,
3908 &ev->bdaddr);
3909 goto not_found;
3910 }
3911
3912 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3913 &ev->bdaddr);
3914
3915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3916 if (conn) {
3917 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3918
3919 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3920 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3921 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3922 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3923 goto not_found;
3924 }
3925
3926 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3927 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3928 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3929 BT_DBG("%s ignoring key unauthenticated for high security",
3930 hdev->name);
3931 goto not_found;
3932 }
3933
3934 conn_set_key(conn, key->type, key->pin_len);
3935 }
3936
3937 bacpy(&cp.bdaddr, &ev->bdaddr);
3938 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3939
3940 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3941
3942 hci_dev_unlock(hdev);
3943
3944 return;
3945
3946 not_found:
3947 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3948 hci_dev_unlock(hdev);
3949 }
3950
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3951 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3952 {
3953 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3954 struct hci_conn *conn;
3955 struct link_key *key;
3956 bool persistent;
3957 u8 pin_len = 0;
3958
3959 BT_DBG("%s", hdev->name);
3960
3961 hci_dev_lock(hdev);
3962
3963 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3964 if (!conn)
3965 goto unlock;
3966
3967 /* Ignore NULL link key against CVE-2020-26555 */
3968 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
3969 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
3970 &ev->bdaddr);
3971 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3972 hci_conn_drop(conn);
3973 goto unlock;
3974 }
3975
3976 hci_conn_hold(conn);
3977 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3978 hci_conn_drop(conn);
3979
3980 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3981 conn_set_key(conn, ev->key_type, conn->pin_length);
3982
3983 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3984 goto unlock;
3985
3986 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3987 ev->key_type, pin_len, &persistent);
3988 if (!key)
3989 goto unlock;
3990
3991 /* Update connection information since adding the key will have
3992 * fixed up the type in the case of changed combination keys.
3993 */
3994 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3995 conn_set_key(conn, key->type, key->pin_len);
3996
3997 mgmt_new_link_key(hdev, key, persistent);
3998
3999 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4000 * is set. If it's not set simply remove the key from the kernel
4001 * list (we've still notified user space about it but with
4002 * store_hint being 0).
4003 */
4004 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4005 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4006 list_del_rcu(&key->list);
4007 kfree_rcu(key, rcu);
4008 goto unlock;
4009 }
4010
4011 if (persistent)
4012 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4013 else
4014 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4015
4016 unlock:
4017 hci_dev_unlock(hdev);
4018 }
4019
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4020 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4021 {
4022 struct hci_ev_clock_offset *ev = (void *) skb->data;
4023 struct hci_conn *conn;
4024
4025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4026
4027 hci_dev_lock(hdev);
4028
4029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4030 if (conn && !ev->status) {
4031 struct inquiry_entry *ie;
4032
4033 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4034 if (ie) {
4035 ie->data.clock_offset = ev->clock_offset;
4036 ie->timestamp = jiffies;
4037 }
4038 }
4039
4040 hci_dev_unlock(hdev);
4041 }
4042
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4043 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4044 {
4045 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4046 struct hci_conn *conn;
4047
4048 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4049
4050 hci_dev_lock(hdev);
4051
4052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4053 if (conn && !ev->status)
4054 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4055
4056 hci_dev_unlock(hdev);
4057 }
4058
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4059 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4060 {
4061 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4062 struct inquiry_entry *ie;
4063
4064 BT_DBG("%s", hdev->name);
4065
4066 hci_dev_lock(hdev);
4067
4068 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4069 if (ie) {
4070 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4071 ie->timestamp = jiffies;
4072 }
4073
4074 hci_dev_unlock(hdev);
4075 }
4076
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4077 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4078 struct sk_buff *skb)
4079 {
4080 struct inquiry_data data;
4081 int num_rsp = *((__u8 *) skb->data);
4082
4083 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4084
4085 if (!num_rsp)
4086 return;
4087
4088 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4089 return;
4090
4091 hci_dev_lock(hdev);
4092
4093 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4094 struct inquiry_info_with_rssi_and_pscan_mode *info;
4095 info = (void *) (skb->data + 1);
4096
4097 if (skb->len < num_rsp * sizeof(*info) + 1)
4098 goto unlock;
4099
4100 for (; num_rsp; num_rsp--, info++) {
4101 u32 flags;
4102
4103 bacpy(&data.bdaddr, &info->bdaddr);
4104 data.pscan_rep_mode = info->pscan_rep_mode;
4105 data.pscan_period_mode = info->pscan_period_mode;
4106 data.pscan_mode = info->pscan_mode;
4107 memcpy(data.dev_class, info->dev_class, 3);
4108 data.clock_offset = info->clock_offset;
4109 data.rssi = info->rssi;
4110 data.ssp_mode = 0x00;
4111
4112 flags = hci_inquiry_cache_update(hdev, &data, false);
4113
4114 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4115 info->dev_class, info->rssi,
4116 flags, NULL, 0, NULL, 0);
4117 }
4118 } else {
4119 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4120
4121 if (skb->len < num_rsp * sizeof(*info) + 1)
4122 goto unlock;
4123
4124 for (; num_rsp; num_rsp--, info++) {
4125 u32 flags;
4126
4127 bacpy(&data.bdaddr, &info->bdaddr);
4128 data.pscan_rep_mode = info->pscan_rep_mode;
4129 data.pscan_period_mode = info->pscan_period_mode;
4130 data.pscan_mode = 0x00;
4131 memcpy(data.dev_class, info->dev_class, 3);
4132 data.clock_offset = info->clock_offset;
4133 data.rssi = info->rssi;
4134 data.ssp_mode = 0x00;
4135
4136 flags = hci_inquiry_cache_update(hdev, &data, false);
4137
4138 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4139 info->dev_class, info->rssi,
4140 flags, NULL, 0, NULL, 0);
4141 }
4142 }
4143
4144 unlock:
4145 hci_dev_unlock(hdev);
4146 }
4147
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4148 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4149 struct sk_buff *skb)
4150 {
4151 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4152 struct hci_conn *conn;
4153
4154 BT_DBG("%s", hdev->name);
4155
4156 hci_dev_lock(hdev);
4157
4158 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4159 if (!conn)
4160 goto unlock;
4161
4162 if (ev->page < HCI_MAX_PAGES)
4163 memcpy(conn->features[ev->page], ev->features, 8);
4164
4165 if (!ev->status && ev->page == 0x01) {
4166 struct inquiry_entry *ie;
4167
4168 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4169 if (ie)
4170 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4171
4172 if (ev->features[0] & LMP_HOST_SSP) {
4173 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4174 } else {
4175 /* It is mandatory by the Bluetooth specification that
4176 * Extended Inquiry Results are only used when Secure
4177 * Simple Pairing is enabled, but some devices violate
4178 * this.
4179 *
4180 * To make these devices work, the internal SSP
4181 * enabled flag needs to be cleared if the remote host
4182 * features do not indicate SSP support */
4183 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4184 }
4185
4186 if (ev->features[0] & LMP_HOST_SC)
4187 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4188 }
4189
4190 if (conn->state != BT_CONFIG)
4191 goto unlock;
4192
4193 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4194 struct hci_cp_remote_name_req cp;
4195 memset(&cp, 0, sizeof(cp));
4196 bacpy(&cp.bdaddr, &conn->dst);
4197 cp.pscan_rep_mode = 0x02;
4198 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4199 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4200 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4201
4202 if (!hci_outgoing_auth_needed(hdev, conn)) {
4203 conn->state = BT_CONNECTED;
4204 hci_connect_cfm(conn, ev->status);
4205 hci_conn_drop(conn);
4206 }
4207
4208 unlock:
4209 hci_dev_unlock(hdev);
4210 }
4211
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4212 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4213 struct sk_buff *skb)
4214 {
4215 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4216 struct hci_conn *conn;
4217
4218 switch (ev->link_type) {
4219 case SCO_LINK:
4220 case ESCO_LINK:
4221 break;
4222 default:
4223 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4224 * for HCI_Synchronous_Connection_Complete is limited to
4225 * either SCO or eSCO
4226 */
4227 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4228 return;
4229 }
4230
4231 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4232
4233 hci_dev_lock(hdev);
4234
4235 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4236 if (!conn) {
4237 if (ev->link_type == ESCO_LINK)
4238 goto unlock;
4239
4240 /* When the link type in the event indicates SCO connection
4241 * and lookup of the connection object fails, then check
4242 * if an eSCO connection object exists.
4243 *
4244 * The core limits the synchronous connections to either
4245 * SCO or eSCO. The eSCO connection is preferred and tried
4246 * to be setup first and until successfully established,
4247 * the link type will be hinted as eSCO.
4248 */
4249 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4250 if (!conn)
4251 goto unlock;
4252 }
4253
4254 switch (ev->status) {
4255 case 0x00:
4256 /* The synchronous connection complete event should only be
4257 * sent once per new connection. Receiving a successful
4258 * complete event when the connection status is already
4259 * BT_CONNECTED means that the device is misbehaving and sent
4260 * multiple complete event packets for the same new connection.
4261 *
4262 * Registering the device more than once can corrupt kernel
4263 * memory, hence upon detecting this invalid event, we report
4264 * an error and ignore the packet.
4265 */
4266 if (conn->state == BT_CONNECTED) {
4267 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4268 goto unlock;
4269 }
4270
4271 conn->handle = __le16_to_cpu(ev->handle);
4272 conn->state = BT_CONNECTED;
4273 conn->type = ev->link_type;
4274
4275 hci_debugfs_create_conn(conn);
4276 hci_conn_add_sysfs(conn);
4277 break;
4278
4279 case 0x10: /* Connection Accept Timeout */
4280 case 0x0d: /* Connection Rejected due to Limited Resources */
4281 case 0x11: /* Unsupported Feature or Parameter Value */
4282 case 0x1c: /* SCO interval rejected */
4283 case 0x1a: /* Unsupported Remote Feature */
4284 case 0x1e: /* Invalid LMP Parameters */
4285 case 0x1f: /* Unspecified error */
4286 case 0x20: /* Unsupported LMP Parameter value */
4287 if (conn->out) {
4288 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4289 (hdev->esco_type & EDR_ESCO_MASK);
4290 if (hci_setup_sync(conn, conn->link->handle))
4291 goto unlock;
4292 }
4293 /* fall through */
4294
4295 default:
4296 conn->state = BT_CLOSED;
4297 break;
4298 }
4299
4300 hci_connect_cfm(conn, ev->status);
4301 if (ev->status)
4302 hci_conn_del(conn);
4303
4304 unlock:
4305 hci_dev_unlock(hdev);
4306 }
4307
eir_get_length(u8 * eir,size_t eir_len)4308 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4309 {
4310 size_t parsed = 0;
4311
4312 while (parsed < eir_len) {
4313 u8 field_len = eir[0];
4314
4315 if (field_len == 0)
4316 return parsed;
4317
4318 parsed += field_len + 1;
4319 eir += field_len + 1;
4320 }
4321
4322 return eir_len;
4323 }
4324
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4325 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4326 struct sk_buff *skb)
4327 {
4328 struct inquiry_data data;
4329 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4330 int num_rsp = *((__u8 *) skb->data);
4331 size_t eir_len;
4332
4333 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4334
4335 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4336 return;
4337
4338 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4339 return;
4340
4341 hci_dev_lock(hdev);
4342
4343 for (; num_rsp; num_rsp--, info++) {
4344 u32 flags;
4345 bool name_known;
4346
4347 bacpy(&data.bdaddr, &info->bdaddr);
4348 data.pscan_rep_mode = info->pscan_rep_mode;
4349 data.pscan_period_mode = info->pscan_period_mode;
4350 data.pscan_mode = 0x00;
4351 memcpy(data.dev_class, info->dev_class, 3);
4352 data.clock_offset = info->clock_offset;
4353 data.rssi = info->rssi;
4354 data.ssp_mode = 0x01;
4355
4356 if (hci_dev_test_flag(hdev, HCI_MGMT))
4357 name_known = eir_get_data(info->data,
4358 sizeof(info->data),
4359 EIR_NAME_COMPLETE, NULL);
4360 else
4361 name_known = true;
4362
4363 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4364
4365 eir_len = eir_get_length(info->data, sizeof(info->data));
4366
4367 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4368 info->dev_class, info->rssi,
4369 flags, info->data, eir_len, NULL, 0);
4370 }
4371
4372 hci_dev_unlock(hdev);
4373 }
4374
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4375 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4376 struct sk_buff *skb)
4377 {
4378 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4379 struct hci_conn *conn;
4380
4381 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4382 __le16_to_cpu(ev->handle));
4383
4384 hci_dev_lock(hdev);
4385
4386 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4387 if (!conn)
4388 goto unlock;
4389
4390 /* For BR/EDR the necessary steps are taken through the
4391 * auth_complete event.
4392 */
4393 if (conn->type != LE_LINK)
4394 goto unlock;
4395
4396 if (!ev->status)
4397 conn->sec_level = conn->pending_sec_level;
4398
4399 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4400
4401 if (ev->status && conn->state == BT_CONNECTED) {
4402 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4403 hci_conn_drop(conn);
4404 goto unlock;
4405 }
4406
4407 if (conn->state == BT_CONFIG) {
4408 if (!ev->status)
4409 conn->state = BT_CONNECTED;
4410
4411 hci_connect_cfm(conn, ev->status);
4412 hci_conn_drop(conn);
4413 } else {
4414 hci_auth_cfm(conn, ev->status);
4415
4416 hci_conn_hold(conn);
4417 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4418 hci_conn_drop(conn);
4419 }
4420
4421 unlock:
4422 hci_dev_unlock(hdev);
4423 }
4424
hci_get_auth_req(struct hci_conn * conn)4425 static u8 hci_get_auth_req(struct hci_conn *conn)
4426 {
4427 /* If remote requests no-bonding follow that lead */
4428 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4429 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4430 return conn->remote_auth | (conn->auth_type & 0x01);
4431
4432 /* If both remote and local have enough IO capabilities, require
4433 * MITM protection
4434 */
4435 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4436 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4437 return conn->remote_auth | 0x01;
4438
4439 /* No MITM protection possible so ignore remote requirement */
4440 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4441 }
4442
bredr_oob_data_present(struct hci_conn * conn)4443 static u8 bredr_oob_data_present(struct hci_conn *conn)
4444 {
4445 struct hci_dev *hdev = conn->hdev;
4446 struct oob_data *data;
4447
4448 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4449 if (!data)
4450 return 0x00;
4451
4452 if (bredr_sc_enabled(hdev)) {
4453 /* When Secure Connections is enabled, then just
4454 * return the present value stored with the OOB
4455 * data. The stored value contains the right present
4456 * information. However it can only be trusted when
4457 * not in Secure Connection Only mode.
4458 */
4459 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4460 return data->present;
4461
4462 /* When Secure Connections Only mode is enabled, then
4463 * the P-256 values are required. If they are not
4464 * available, then do not declare that OOB data is
4465 * present.
4466 */
4467 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4468 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4469 return 0x00;
4470
4471 return 0x02;
4472 }
4473
4474 /* When Secure Connections is not enabled or actually
4475 * not supported by the hardware, then check that if
4476 * P-192 data values are present.
4477 */
4478 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4479 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4480 return 0x00;
4481
4482 return 0x01;
4483 }
4484
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4485 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4486 {
4487 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4488 struct hci_conn *conn;
4489
4490 BT_DBG("%s", hdev->name);
4491
4492 hci_dev_lock(hdev);
4493
4494 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4495 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4496 goto unlock;
4497
4498 /* Assume remote supports SSP since it has triggered this event */
4499 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4500
4501 hci_conn_hold(conn);
4502
4503 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4504 goto unlock;
4505
4506 /* Allow pairing if we're pairable, the initiators of the
4507 * pairing or if the remote is not requesting bonding.
4508 */
4509 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4510 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4511 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4512 struct hci_cp_io_capability_reply cp;
4513
4514 bacpy(&cp.bdaddr, &ev->bdaddr);
4515 /* Change the IO capability from KeyboardDisplay
4516 * to DisplayYesNo as it is not supported by BT spec. */
4517 cp.capability = (conn->io_capability == 0x04) ?
4518 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4519
4520 /* If we are initiators, there is no remote information yet */
4521 if (conn->remote_auth == 0xff) {
4522 /* Request MITM protection if our IO caps allow it
4523 * except for the no-bonding case.
4524 */
4525 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4526 conn->auth_type != HCI_AT_NO_BONDING)
4527 conn->auth_type |= 0x01;
4528 } else {
4529 conn->auth_type = hci_get_auth_req(conn);
4530 }
4531
4532 /* If we're not bondable, force one of the non-bondable
4533 * authentication requirement values.
4534 */
4535 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4536 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4537
4538 cp.authentication = conn->auth_type;
4539 cp.oob_data = bredr_oob_data_present(conn);
4540
4541 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4542 sizeof(cp), &cp);
4543 } else {
4544 struct hci_cp_io_capability_neg_reply cp;
4545
4546 bacpy(&cp.bdaddr, &ev->bdaddr);
4547 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4548
4549 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4550 sizeof(cp), &cp);
4551 }
4552
4553 unlock:
4554 hci_dev_unlock(hdev);
4555 }
4556
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4557 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4558 {
4559 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4560 struct hci_conn *conn;
4561
4562 BT_DBG("%s", hdev->name);
4563
4564 hci_dev_lock(hdev);
4565
4566 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4567 if (!conn)
4568 goto unlock;
4569
4570 conn->remote_cap = ev->capability;
4571 conn->remote_auth = ev->authentication;
4572
4573 unlock:
4574 hci_dev_unlock(hdev);
4575 }
4576
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4577 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4578 struct sk_buff *skb)
4579 {
4580 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4581 int loc_mitm, rem_mitm, confirm_hint = 0;
4582 struct hci_conn *conn;
4583
4584 BT_DBG("%s", hdev->name);
4585
4586 hci_dev_lock(hdev);
4587
4588 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4589 goto unlock;
4590
4591 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4592 if (!conn)
4593 goto unlock;
4594
4595 loc_mitm = (conn->auth_type & 0x01);
4596 rem_mitm = (conn->remote_auth & 0x01);
4597
4598 /* If we require MITM but the remote device can't provide that
4599 * (it has NoInputNoOutput) then reject the confirmation
4600 * request. We check the security level here since it doesn't
4601 * necessarily match conn->auth_type.
4602 */
4603 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4604 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4605 BT_DBG("Rejecting request: remote device can't provide MITM");
4606 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4607 sizeof(ev->bdaddr), &ev->bdaddr);
4608 goto unlock;
4609 }
4610
4611 /* If no side requires MITM protection; auto-accept */
4612 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4613 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4614
4615 /* If we're not the initiators request authorization to
4616 * proceed from user space (mgmt_user_confirm with
4617 * confirm_hint set to 1). The exception is if neither
4618 * side had MITM or if the local IO capability is
4619 * NoInputNoOutput, in which case we do auto-accept
4620 */
4621 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4622 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4623 (loc_mitm || rem_mitm)) {
4624 BT_DBG("Confirming auto-accept as acceptor");
4625 confirm_hint = 1;
4626 goto confirm;
4627 }
4628
4629 BT_DBG("Auto-accept of user confirmation with %ums delay",
4630 hdev->auto_accept_delay);
4631
4632 if (hdev->auto_accept_delay > 0) {
4633 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4634 queue_delayed_work(conn->hdev->workqueue,
4635 &conn->auto_accept_work, delay);
4636 goto unlock;
4637 }
4638
4639 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4640 sizeof(ev->bdaddr), &ev->bdaddr);
4641 goto unlock;
4642 }
4643
4644 confirm:
4645 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4646 le32_to_cpu(ev->passkey), confirm_hint);
4647
4648 unlock:
4649 hci_dev_unlock(hdev);
4650 }
4651
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4652 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4653 struct sk_buff *skb)
4654 {
4655 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4656
4657 BT_DBG("%s", hdev->name);
4658
4659 if (hci_dev_test_flag(hdev, HCI_MGMT))
4660 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4661 }
4662
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4663 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4664 struct sk_buff *skb)
4665 {
4666 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4667 struct hci_conn *conn;
4668
4669 BT_DBG("%s", hdev->name);
4670
4671 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4672 if (!conn)
4673 return;
4674
4675 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4676 conn->passkey_entered = 0;
4677
4678 if (hci_dev_test_flag(hdev, HCI_MGMT))
4679 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4680 conn->dst_type, conn->passkey_notify,
4681 conn->passkey_entered);
4682 }
4683
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4684 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4685 {
4686 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4687 struct hci_conn *conn;
4688
4689 BT_DBG("%s", hdev->name);
4690
4691 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4692 if (!conn)
4693 return;
4694
4695 switch (ev->type) {
4696 case HCI_KEYPRESS_STARTED:
4697 conn->passkey_entered = 0;
4698 return;
4699
4700 case HCI_KEYPRESS_ENTERED:
4701 conn->passkey_entered++;
4702 break;
4703
4704 case HCI_KEYPRESS_ERASED:
4705 conn->passkey_entered--;
4706 break;
4707
4708 case HCI_KEYPRESS_CLEARED:
4709 conn->passkey_entered = 0;
4710 break;
4711
4712 case HCI_KEYPRESS_COMPLETED:
4713 return;
4714 }
4715
4716 if (hci_dev_test_flag(hdev, HCI_MGMT))
4717 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4718 conn->dst_type, conn->passkey_notify,
4719 conn->passkey_entered);
4720 }
4721
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4722 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4723 struct sk_buff *skb)
4724 {
4725 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4726 struct hci_conn *conn;
4727
4728 BT_DBG("%s", hdev->name);
4729
4730 hci_dev_lock(hdev);
4731
4732 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4733 if (!conn || !hci_conn_ssp_enabled(conn))
4734 goto unlock;
4735
4736 /* Reset the authentication requirement to unknown */
4737 conn->remote_auth = 0xff;
4738
4739 /* To avoid duplicate auth_failed events to user space we check
4740 * the HCI_CONN_AUTH_PEND flag which will be set if we
4741 * initiated the authentication. A traditional auth_complete
4742 * event gets always produced as initiator and is also mapped to
4743 * the mgmt_auth_failed event */
4744 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4745 mgmt_auth_failed(conn, ev->status);
4746
4747 hci_conn_drop(conn);
4748
4749 unlock:
4750 hci_dev_unlock(hdev);
4751 }
4752
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4753 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4754 struct sk_buff *skb)
4755 {
4756 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4757 struct inquiry_entry *ie;
4758 struct hci_conn *conn;
4759
4760 BT_DBG("%s", hdev->name);
4761
4762 hci_dev_lock(hdev);
4763
4764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4765 if (conn)
4766 memcpy(conn->features[1], ev->features, 8);
4767
4768 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4769 if (ie)
4770 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4771
4772 hci_dev_unlock(hdev);
4773 }
4774
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4775 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4776 struct sk_buff *skb)
4777 {
4778 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4779 struct oob_data *data;
4780
4781 BT_DBG("%s", hdev->name);
4782
4783 hci_dev_lock(hdev);
4784
4785 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4786 goto unlock;
4787
4788 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4789 if (!data) {
4790 struct hci_cp_remote_oob_data_neg_reply cp;
4791
4792 bacpy(&cp.bdaddr, &ev->bdaddr);
4793 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4794 sizeof(cp), &cp);
4795 goto unlock;
4796 }
4797
4798 if (bredr_sc_enabled(hdev)) {
4799 struct hci_cp_remote_oob_ext_data_reply cp;
4800
4801 bacpy(&cp.bdaddr, &ev->bdaddr);
4802 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4803 memset(cp.hash192, 0, sizeof(cp.hash192));
4804 memset(cp.rand192, 0, sizeof(cp.rand192));
4805 } else {
4806 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4807 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4808 }
4809 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4810 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4811
4812 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4813 sizeof(cp), &cp);
4814 } else {
4815 struct hci_cp_remote_oob_data_reply cp;
4816
4817 bacpy(&cp.bdaddr, &ev->bdaddr);
4818 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4819 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4820
4821 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4822 sizeof(cp), &cp);
4823 }
4824
4825 unlock:
4826 hci_dev_unlock(hdev);
4827 }
4828
4829 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)4830 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4831 {
4832 struct hci_ev_channel_selected *ev = (void *)skb->data;
4833 struct hci_conn *hcon;
4834
4835 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4836
4837 skb_pull(skb, sizeof(*ev));
4838
4839 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4840 if (!hcon)
4841 return;
4842
4843 amp_read_loc_assoc_final_data(hdev, hcon);
4844 }
4845
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4846 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4847 struct sk_buff *skb)
4848 {
4849 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4850 struct hci_conn *hcon, *bredr_hcon;
4851
4852 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4853 ev->status);
4854
4855 hci_dev_lock(hdev);
4856
4857 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4858 if (!hcon) {
4859 hci_dev_unlock(hdev);
4860 return;
4861 }
4862
4863 if (!hcon->amp_mgr) {
4864 hci_dev_unlock(hdev);
4865 return;
4866 }
4867
4868 if (ev->status) {
4869 hci_conn_del(hcon);
4870 hci_dev_unlock(hdev);
4871 return;
4872 }
4873
4874 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4875
4876 hcon->state = BT_CONNECTED;
4877 bacpy(&hcon->dst, &bredr_hcon->dst);
4878
4879 hci_conn_hold(hcon);
4880 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4881 hci_conn_drop(hcon);
4882
4883 hci_debugfs_create_conn(hcon);
4884 hci_conn_add_sysfs(hcon);
4885
4886 amp_physical_cfm(bredr_hcon, hcon);
4887
4888 hci_dev_unlock(hdev);
4889 }
4890
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4891 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4892 {
4893 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4894 struct hci_conn *hcon;
4895 struct hci_chan *hchan;
4896 struct amp_mgr *mgr;
4897
4898 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4899 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4900 ev->status);
4901
4902 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4903 if (!hcon)
4904 return;
4905
4906 /* Create AMP hchan */
4907 hchan = hci_chan_create(hcon);
4908 if (!hchan)
4909 return;
4910
4911 hchan->handle = le16_to_cpu(ev->handle);
4912 hchan->amp = true;
4913
4914 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4915
4916 mgr = hcon->amp_mgr;
4917 if (mgr && mgr->bredr_chan) {
4918 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4919
4920 l2cap_chan_lock(bredr_chan);
4921
4922 bredr_chan->conn->mtu = hdev->block_mtu;
4923 l2cap_logical_cfm(bredr_chan, hchan, 0);
4924 hci_conn_hold(hcon);
4925
4926 l2cap_chan_unlock(bredr_chan);
4927 }
4928 }
4929
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4930 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4931 struct sk_buff *skb)
4932 {
4933 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4934 struct hci_chan *hchan;
4935
4936 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4937 le16_to_cpu(ev->handle), ev->status);
4938
4939 if (ev->status)
4940 return;
4941
4942 hci_dev_lock(hdev);
4943
4944 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4945 if (!hchan || !hchan->amp)
4946 goto unlock;
4947
4948 amp_destroy_logical_link(hchan, ev->reason);
4949
4950 unlock:
4951 hci_dev_unlock(hdev);
4952 }
4953
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4954 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4955 struct sk_buff *skb)
4956 {
4957 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4958 struct hci_conn *hcon;
4959
4960 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4961
4962 if (ev->status)
4963 return;
4964
4965 hci_dev_lock(hdev);
4966
4967 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4968 if (hcon && hcon->type == AMP_LINK) {
4969 hcon->state = BT_CLOSED;
4970 hci_disconn_cfm(hcon, ev->reason);
4971 hci_conn_del(hcon);
4972 }
4973
4974 hci_dev_unlock(hdev);
4975 }
4976 #endif
4977
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)4978 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
4979 u8 bdaddr_type, bdaddr_t *local_rpa)
4980 {
4981 if (conn->out) {
4982 conn->dst_type = bdaddr_type;
4983 conn->resp_addr_type = bdaddr_type;
4984 bacpy(&conn->resp_addr, bdaddr);
4985
4986 /* Check if the controller has set a Local RPA then it must be
4987 * used instead or hdev->rpa.
4988 */
4989 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4990 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4991 bacpy(&conn->init_addr, local_rpa);
4992 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
4993 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4994 bacpy(&conn->init_addr, &conn->hdev->rpa);
4995 } else {
4996 hci_copy_identity_address(conn->hdev, &conn->init_addr,
4997 &conn->init_addr_type);
4998 }
4999 } else {
5000 conn->resp_addr_type = conn->hdev->adv_addr_type;
5001 /* Check if the controller has set a Local RPA then it must be
5002 * used instead or hdev->rpa.
5003 */
5004 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5005 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5006 bacpy(&conn->resp_addr, local_rpa);
5007 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5008 /* In case of ext adv, resp_addr will be updated in
5009 * Adv Terminated event.
5010 */
5011 if (!ext_adv_capable(conn->hdev))
5012 bacpy(&conn->resp_addr,
5013 &conn->hdev->random_addr);
5014 } else {
5015 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5016 }
5017
5018 conn->init_addr_type = bdaddr_type;
5019 bacpy(&conn->init_addr, bdaddr);
5020
5021 /* For incoming connections, set the default minimum
5022 * and maximum connection interval. They will be used
5023 * to check if the parameters are in range and if not
5024 * trigger the connection update procedure.
5025 */
5026 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5027 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5028 }
5029 }
5030
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5031 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5032 bdaddr_t *bdaddr, u8 bdaddr_type,
5033 bdaddr_t *local_rpa, u8 role, u16 handle,
5034 u16 interval, u16 latency,
5035 u16 supervision_timeout)
5036 {
5037 struct hci_conn_params *params;
5038 struct hci_conn *conn;
5039 struct smp_irk *irk;
5040 u8 addr_type;
5041
5042 hci_dev_lock(hdev);
5043
5044 /* All controllers implicitly stop advertising in the event of a
5045 * connection, so ensure that the state bit is cleared.
5046 */
5047 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5048
5049 conn = hci_lookup_le_connect(hdev);
5050 if (!conn) {
5051 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5052 if (!conn) {
5053 bt_dev_err(hdev, "no memory for new connection");
5054 goto unlock;
5055 }
5056
5057 conn->dst_type = bdaddr_type;
5058
5059 /* If we didn't have a hci_conn object previously
5060 * but we're in master role this must be something
5061 * initiated using a white list. Since white list based
5062 * connections are not "first class citizens" we don't
5063 * have full tracking of them. Therefore, we go ahead
5064 * with a "best effort" approach of determining the
5065 * initiator address based on the HCI_PRIVACY flag.
5066 */
5067 if (conn->out) {
5068 conn->resp_addr_type = bdaddr_type;
5069 bacpy(&conn->resp_addr, bdaddr);
5070 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5071 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5072 bacpy(&conn->init_addr, &hdev->rpa);
5073 } else {
5074 hci_copy_identity_address(hdev,
5075 &conn->init_addr,
5076 &conn->init_addr_type);
5077 }
5078 }
5079 } else {
5080 cancel_delayed_work(&conn->le_conn_timeout);
5081 }
5082
5083 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5084
5085 /* Lookup the identity address from the stored connection
5086 * address and address type.
5087 *
5088 * When establishing connections to an identity address, the
5089 * connection procedure will store the resolvable random
5090 * address first. Now if it can be converted back into the
5091 * identity address, start using the identity address from
5092 * now on.
5093 */
5094 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5095 if (irk) {
5096 bacpy(&conn->dst, &irk->bdaddr);
5097 conn->dst_type = irk->addr_type;
5098 }
5099
5100 if (status) {
5101 hci_le_conn_failed(conn, status);
5102 goto unlock;
5103 }
5104
5105 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5106 addr_type = BDADDR_LE_PUBLIC;
5107 else
5108 addr_type = BDADDR_LE_RANDOM;
5109
5110 /* Drop the connection if the device is blocked */
5111 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5112 hci_conn_drop(conn);
5113 goto unlock;
5114 }
5115
5116 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5117 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5118
5119 conn->sec_level = BT_SECURITY_LOW;
5120 conn->handle = handle;
5121 conn->state = BT_CONFIG;
5122
5123 conn->le_conn_interval = interval;
5124 conn->le_conn_latency = latency;
5125 conn->le_supv_timeout = supervision_timeout;
5126
5127 hci_debugfs_create_conn(conn);
5128 hci_conn_add_sysfs(conn);
5129
5130 /* The remote features procedure is defined for master
5131 * role only. So only in case of an initiated connection
5132 * request the remote features.
5133 *
5134 * If the local controller supports slave-initiated features
5135 * exchange, then requesting the remote features in slave
5136 * role is possible. Otherwise just transition into the
5137 * connected state without requesting the remote features.
5138 */
5139 if (conn->out ||
5140 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5141 struct hci_cp_le_read_remote_features cp;
5142
5143 cp.handle = __cpu_to_le16(conn->handle);
5144
5145 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5146 sizeof(cp), &cp);
5147
5148 hci_conn_hold(conn);
5149 } else {
5150 conn->state = BT_CONNECTED;
5151 hci_connect_cfm(conn, status);
5152 }
5153
5154 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5155 conn->dst_type);
5156 if (params) {
5157 list_del_init(¶ms->action);
5158 if (params->conn) {
5159 hci_conn_drop(params->conn);
5160 hci_conn_put(params->conn);
5161 params->conn = NULL;
5162 }
5163 }
5164
5165 unlock:
5166 hci_update_background_scan(hdev);
5167 hci_dev_unlock(hdev);
5168 }
5169
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5170 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5171 {
5172 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5173
5174 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5175
5176 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5177 NULL, ev->role, le16_to_cpu(ev->handle),
5178 le16_to_cpu(ev->interval),
5179 le16_to_cpu(ev->latency),
5180 le16_to_cpu(ev->supervision_timeout));
5181 }
5182
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5183 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5184 struct sk_buff *skb)
5185 {
5186 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5187
5188 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5189
5190 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5191 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5192 le16_to_cpu(ev->interval),
5193 le16_to_cpu(ev->latency),
5194 le16_to_cpu(ev->supervision_timeout));
5195 }
5196
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5197 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5198 {
5199 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5200 struct hci_conn *conn;
5201
5202 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5203
5204 if (ev->status) {
5205 struct adv_info *adv;
5206
5207 adv = hci_find_adv_instance(hdev, ev->handle);
5208 if (!adv)
5209 return;
5210
5211 /* Remove advertising as it has been terminated */
5212 hci_remove_adv_instance(hdev, ev->handle);
5213 mgmt_advertising_removed(NULL, hdev, ev->handle);
5214
5215 return;
5216 }
5217
5218 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5219 if (conn) {
5220 struct adv_info *adv_instance;
5221
5222 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5223 bacmp(&conn->resp_addr, BDADDR_ANY))
5224 return;
5225
5226 if (!hdev->cur_adv_instance) {
5227 bacpy(&conn->resp_addr, &hdev->random_addr);
5228 return;
5229 }
5230
5231 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5232 if (adv_instance)
5233 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5234 }
5235 }
5236
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5237 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5238 struct sk_buff *skb)
5239 {
5240 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5241 struct hci_conn *conn;
5242
5243 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5244
5245 if (ev->status)
5246 return;
5247
5248 hci_dev_lock(hdev);
5249
5250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5251 if (conn) {
5252 conn->le_conn_interval = le16_to_cpu(ev->interval);
5253 conn->le_conn_latency = le16_to_cpu(ev->latency);
5254 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5255 }
5256
5257 hci_dev_unlock(hdev);
5258 }
5259
5260 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5261 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5262 bdaddr_t *addr,
5263 u8 addr_type, u8 adv_type,
5264 bdaddr_t *direct_rpa)
5265 {
5266 struct hci_conn *conn;
5267 struct hci_conn_params *params;
5268
5269 /* If the event is not connectable don't proceed further */
5270 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5271 return NULL;
5272
5273 /* Ignore if the device is blocked */
5274 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5275 return NULL;
5276
5277 /* Most controller will fail if we try to create new connections
5278 * while we have an existing one in slave role.
5279 */
5280 if (hdev->conn_hash.le_num_slave > 0)
5281 return NULL;
5282
5283 /* If we're not connectable only connect devices that we have in
5284 * our pend_le_conns list.
5285 */
5286 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5287 addr_type);
5288 if (!params)
5289 return NULL;
5290
5291 if (!params->explicit_connect) {
5292 switch (params->auto_connect) {
5293 case HCI_AUTO_CONN_DIRECT:
5294 /* Only devices advertising with ADV_DIRECT_IND are
5295 * triggering a connection attempt. This is allowing
5296 * incoming connections from slave devices.
5297 */
5298 if (adv_type != LE_ADV_DIRECT_IND)
5299 return NULL;
5300 break;
5301 case HCI_AUTO_CONN_ALWAYS:
5302 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5303 * are triggering a connection attempt. This means
5304 * that incoming connectioms from slave device are
5305 * accepted and also outgoing connections to slave
5306 * devices are established when found.
5307 */
5308 break;
5309 default:
5310 return NULL;
5311 }
5312 }
5313
5314 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5315 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5316 direct_rpa);
5317 if (!IS_ERR(conn)) {
5318 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5319 * by higher layer that tried to connect, if no then
5320 * store the pointer since we don't really have any
5321 * other owner of the object besides the params that
5322 * triggered it. This way we can abort the connection if
5323 * the parameters get removed and keep the reference
5324 * count consistent once the connection is established.
5325 */
5326
5327 if (!params->explicit_connect)
5328 params->conn = hci_conn_get(conn);
5329
5330 return conn;
5331 }
5332
5333 switch (PTR_ERR(conn)) {
5334 case -EBUSY:
5335 /* If hci_connect() returns -EBUSY it means there is already
5336 * an LE connection attempt going on. Since controllers don't
5337 * support more than one connection attempt at the time, we
5338 * don't consider this an error case.
5339 */
5340 break;
5341 default:
5342 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5343 return NULL;
5344 }
5345
5346 return NULL;
5347 }
5348
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5349 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5350 u8 bdaddr_type, bdaddr_t *direct_addr,
5351 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5352 bool ext_adv)
5353 {
5354 struct discovery_state *d = &hdev->discovery;
5355 struct smp_irk *irk;
5356 struct hci_conn *conn;
5357 bool match;
5358 u32 flags;
5359 u8 *ptr, real_len;
5360
5361 switch (type) {
5362 case LE_ADV_IND:
5363 case LE_ADV_DIRECT_IND:
5364 case LE_ADV_SCAN_IND:
5365 case LE_ADV_NONCONN_IND:
5366 case LE_ADV_SCAN_RSP:
5367 break;
5368 default:
5369 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5370 "type: 0x%02x", type);
5371 return;
5372 }
5373
5374 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5375 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5376 return;
5377 }
5378
5379 /* Find the end of the data in case the report contains padded zero
5380 * bytes at the end causing an invalid length value.
5381 *
5382 * When data is NULL, len is 0 so there is no need for extra ptr
5383 * check as 'ptr < data + 0' is already false in such case.
5384 */
5385 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5386 if (ptr + 1 + *ptr > data + len)
5387 break;
5388 }
5389
5390 real_len = ptr - data;
5391
5392 /* Adjust for actual length */
5393 if (len != real_len) {
5394 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5395 len = real_len;
5396 }
5397
5398 /* If the direct address is present, then this report is from
5399 * a LE Direct Advertising Report event. In that case it is
5400 * important to see if the address is matching the local
5401 * controller address.
5402 */
5403 if (direct_addr) {
5404 /* Only resolvable random addresses are valid for these
5405 * kind of reports and others can be ignored.
5406 */
5407 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5408 return;
5409
5410 /* If the controller is not using resolvable random
5411 * addresses, then this report can be ignored.
5412 */
5413 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5414 return;
5415
5416 /* If the local IRK of the controller does not match
5417 * with the resolvable random address provided, then
5418 * this report can be ignored.
5419 */
5420 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5421 return;
5422 }
5423
5424 /* Check if we need to convert to identity address */
5425 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5426 if (irk) {
5427 bdaddr = &irk->bdaddr;
5428 bdaddr_type = irk->addr_type;
5429 }
5430
5431 /* Check if we have been requested to connect to this device.
5432 *
5433 * direct_addr is set only for directed advertising reports (it is NULL
5434 * for advertising reports) and is already verified to be RPA above.
5435 */
5436 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5437 direct_addr);
5438 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5439 /* Store report for later inclusion by
5440 * mgmt_device_connected
5441 */
5442 memcpy(conn->le_adv_data, data, len);
5443 conn->le_adv_data_len = len;
5444 }
5445
5446 /* Passive scanning shouldn't trigger any device found events,
5447 * except for devices marked as CONN_REPORT for which we do send
5448 * device found events.
5449 */
5450 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5451 if (type == LE_ADV_DIRECT_IND)
5452 return;
5453
5454 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5455 bdaddr, bdaddr_type))
5456 return;
5457
5458 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5459 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5460 else
5461 flags = 0;
5462 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5463 rssi, flags, data, len, NULL, 0);
5464 return;
5465 }
5466
5467 /* When receiving non-connectable or scannable undirected
5468 * advertising reports, this means that the remote device is
5469 * not connectable and then clearly indicate this in the
5470 * device found event.
5471 *
5472 * When receiving a scan response, then there is no way to
5473 * know if the remote device is connectable or not. However
5474 * since scan responses are merged with a previously seen
5475 * advertising report, the flags field from that report
5476 * will be used.
5477 *
5478 * In the really unlikely case that a controller get confused
5479 * and just sends a scan response event, then it is marked as
5480 * not connectable as well.
5481 */
5482 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5483 type == LE_ADV_SCAN_RSP)
5484 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5485 else
5486 flags = 0;
5487
5488 /* If there's nothing pending either store the data from this
5489 * event or send an immediate device found event if the data
5490 * should not be stored for later.
5491 */
5492 if (!ext_adv && !has_pending_adv_report(hdev)) {
5493 /* If the report will trigger a SCAN_REQ store it for
5494 * later merging.
5495 */
5496 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5497 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5498 rssi, flags, data, len);
5499 return;
5500 }
5501
5502 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5503 rssi, flags, data, len, NULL, 0);
5504 return;
5505 }
5506
5507 /* Check if the pending report is for the same device as the new one */
5508 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5509 bdaddr_type == d->last_adv_addr_type);
5510
5511 /* If the pending data doesn't match this report or this isn't a
5512 * scan response (e.g. we got a duplicate ADV_IND) then force
5513 * sending of the pending data.
5514 */
5515 if (type != LE_ADV_SCAN_RSP || !match) {
5516 /* Send out whatever is in the cache, but skip duplicates */
5517 if (!match)
5518 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5519 d->last_adv_addr_type, NULL,
5520 d->last_adv_rssi, d->last_adv_flags,
5521 d->last_adv_data,
5522 d->last_adv_data_len, NULL, 0);
5523
5524 /* If the new report will trigger a SCAN_REQ store it for
5525 * later merging.
5526 */
5527 if (!ext_adv && (type == LE_ADV_IND ||
5528 type == LE_ADV_SCAN_IND)) {
5529 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5530 rssi, flags, data, len);
5531 return;
5532 }
5533
5534 /* The advertising reports cannot be merged, so clear
5535 * the pending report and send out a device found event.
5536 */
5537 clear_pending_adv_report(hdev);
5538 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5539 rssi, flags, data, len, NULL, 0);
5540 return;
5541 }
5542
5543 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5544 * the new event is a SCAN_RSP. We can therefore proceed with
5545 * sending a merged device found event.
5546 */
5547 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5548 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5549 d->last_adv_data, d->last_adv_data_len, data, len);
5550 clear_pending_adv_report(hdev);
5551 }
5552
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5553 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5554 {
5555 u8 num_reports = skb->data[0];
5556 void *ptr = &skb->data[1];
5557
5558 hci_dev_lock(hdev);
5559
5560 while (num_reports--) {
5561 struct hci_ev_le_advertising_info *ev = ptr;
5562 s8 rssi;
5563
5564 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5565 bt_dev_err(hdev, "Malicious advertising data.");
5566 break;
5567 }
5568
5569 if (ev->length <= HCI_MAX_AD_LENGTH &&
5570 ev->data + ev->length <= skb_tail_pointer(skb)) {
5571 rssi = ev->data[ev->length];
5572 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5573 ev->bdaddr_type, NULL, 0, rssi,
5574 ev->data, ev->length, false);
5575 } else {
5576 bt_dev_err(hdev, "Dropping invalid advertising data");
5577 }
5578
5579 ptr += sizeof(*ev) + ev->length + 1;
5580 }
5581
5582 hci_dev_unlock(hdev);
5583 }
5584
ext_evt_type_to_legacy(u16 evt_type)5585 static u8 ext_evt_type_to_legacy(u16 evt_type)
5586 {
5587 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5588 switch (evt_type) {
5589 case LE_LEGACY_ADV_IND:
5590 return LE_ADV_IND;
5591 case LE_LEGACY_ADV_DIRECT_IND:
5592 return LE_ADV_DIRECT_IND;
5593 case LE_LEGACY_ADV_SCAN_IND:
5594 return LE_ADV_SCAN_IND;
5595 case LE_LEGACY_NONCONN_IND:
5596 return LE_ADV_NONCONN_IND;
5597 case LE_LEGACY_SCAN_RSP_ADV:
5598 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5599 return LE_ADV_SCAN_RSP;
5600 }
5601
5602 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5603 evt_type);
5604
5605 return LE_ADV_INVALID;
5606 }
5607
5608 if (evt_type & LE_EXT_ADV_CONN_IND) {
5609 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5610 return LE_ADV_DIRECT_IND;
5611
5612 return LE_ADV_IND;
5613 }
5614
5615 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5616 return LE_ADV_SCAN_RSP;
5617
5618 if (evt_type & LE_EXT_ADV_SCAN_IND)
5619 return LE_ADV_SCAN_IND;
5620
5621 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5622 evt_type & LE_EXT_ADV_DIRECT_IND)
5623 return LE_ADV_NONCONN_IND;
5624
5625 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5626 evt_type);
5627
5628 return LE_ADV_INVALID;
5629 }
5630
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5631 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5632 {
5633 u8 num_reports = skb->data[0];
5634 void *ptr = &skb->data[1];
5635
5636 hci_dev_lock(hdev);
5637
5638 while (num_reports--) {
5639 struct hci_ev_le_ext_adv_report *ev = ptr;
5640 u8 legacy_evt_type;
5641 u16 evt_type;
5642
5643 evt_type = __le16_to_cpu(ev->evt_type);
5644 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5645 if (legacy_evt_type != LE_ADV_INVALID) {
5646 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5647 ev->bdaddr_type, NULL, 0, ev->rssi,
5648 ev->data, ev->length,
5649 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5650 }
5651
5652 ptr += sizeof(*ev) + ev->length;
5653 }
5654
5655 hci_dev_unlock(hdev);
5656 }
5657
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5658 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5659 struct sk_buff *skb)
5660 {
5661 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5662 struct hci_conn *conn;
5663
5664 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5665
5666 hci_dev_lock(hdev);
5667
5668 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5669 if (conn) {
5670 if (!ev->status)
5671 memcpy(conn->features[0], ev->features, 8);
5672
5673 if (conn->state == BT_CONFIG) {
5674 __u8 status;
5675
5676 /* If the local controller supports slave-initiated
5677 * features exchange, but the remote controller does
5678 * not, then it is possible that the error code 0x1a
5679 * for unsupported remote feature gets returned.
5680 *
5681 * In this specific case, allow the connection to
5682 * transition into connected state and mark it as
5683 * successful.
5684 */
5685 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5686 !conn->out && ev->status == 0x1a)
5687 status = 0x00;
5688 else
5689 status = ev->status;
5690
5691 conn->state = BT_CONNECTED;
5692 hci_connect_cfm(conn, status);
5693 hci_conn_drop(conn);
5694 }
5695 }
5696
5697 hci_dev_unlock(hdev);
5698 }
5699
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5700 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5701 {
5702 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5703 struct hci_cp_le_ltk_reply cp;
5704 struct hci_cp_le_ltk_neg_reply neg;
5705 struct hci_conn *conn;
5706 struct smp_ltk *ltk;
5707
5708 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5709
5710 hci_dev_lock(hdev);
5711
5712 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5713 if (conn == NULL)
5714 goto not_found;
5715
5716 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5717 if (!ltk)
5718 goto not_found;
5719
5720 if (smp_ltk_is_sc(ltk)) {
5721 /* With SC both EDiv and Rand are set to zero */
5722 if (ev->ediv || ev->rand)
5723 goto not_found;
5724 } else {
5725 /* For non-SC keys check that EDiv and Rand match */
5726 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5727 goto not_found;
5728 }
5729
5730 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5731 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5732 cp.handle = cpu_to_le16(conn->handle);
5733
5734 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5735
5736 conn->enc_key_size = ltk->enc_size;
5737
5738 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5739
5740 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5741 * temporary key used to encrypt a connection following
5742 * pairing. It is used during the Encrypted Session Setup to
5743 * distribute the keys. Later, security can be re-established
5744 * using a distributed LTK.
5745 */
5746 if (ltk->type == SMP_STK) {
5747 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5748 list_del_rcu(<k->list);
5749 kfree_rcu(ltk, rcu);
5750 } else {
5751 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5752 }
5753
5754 hci_dev_unlock(hdev);
5755
5756 return;
5757
5758 not_found:
5759 neg.handle = ev->handle;
5760 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5761 hci_dev_unlock(hdev);
5762 }
5763
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)5764 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5765 u8 reason)
5766 {
5767 struct hci_cp_le_conn_param_req_neg_reply cp;
5768
5769 cp.handle = cpu_to_le16(handle);
5770 cp.reason = reason;
5771
5772 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5773 &cp);
5774 }
5775
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5776 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5777 struct sk_buff *skb)
5778 {
5779 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5780 struct hci_cp_le_conn_param_req_reply cp;
5781 struct hci_conn *hcon;
5782 u16 handle, min, max, latency, timeout;
5783
5784 handle = le16_to_cpu(ev->handle);
5785 min = le16_to_cpu(ev->interval_min);
5786 max = le16_to_cpu(ev->interval_max);
5787 latency = le16_to_cpu(ev->latency);
5788 timeout = le16_to_cpu(ev->timeout);
5789
5790 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5791 if (!hcon || hcon->state != BT_CONNECTED)
5792 return send_conn_param_neg_reply(hdev, handle,
5793 HCI_ERROR_UNKNOWN_CONN_ID);
5794
5795 if (max > hcon->le_conn_max_interval)
5796 return send_conn_param_neg_reply(hdev, handle,
5797 HCI_ERROR_INVALID_LL_PARAMS);
5798
5799 if (hci_check_conn_params(min, max, latency, timeout))
5800 return send_conn_param_neg_reply(hdev, handle,
5801 HCI_ERROR_INVALID_LL_PARAMS);
5802
5803 if (hcon->role == HCI_ROLE_MASTER) {
5804 struct hci_conn_params *params;
5805 u8 store_hint;
5806
5807 hci_dev_lock(hdev);
5808
5809 params = hci_conn_params_lookup(hdev, &hcon->dst,
5810 hcon->dst_type);
5811 if (params) {
5812 params->conn_min_interval = min;
5813 params->conn_max_interval = max;
5814 params->conn_latency = latency;
5815 params->supervision_timeout = timeout;
5816 store_hint = 0x01;
5817 } else{
5818 store_hint = 0x00;
5819 }
5820
5821 hci_dev_unlock(hdev);
5822
5823 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5824 store_hint, min, max, latency, timeout);
5825 }
5826
5827 cp.handle = ev->handle;
5828 cp.interval_min = ev->interval_min;
5829 cp.interval_max = ev->interval_max;
5830 cp.latency = ev->latency;
5831 cp.timeout = ev->timeout;
5832 cp.min_ce_len = 0;
5833 cp.max_ce_len = 0;
5834
5835 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5836 }
5837
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5838 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5839 struct sk_buff *skb)
5840 {
5841 u8 num_reports = skb->data[0];
5842 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5843
5844 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5845 return;
5846
5847 hci_dev_lock(hdev);
5848
5849 for (; num_reports; num_reports--, ev++)
5850 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5851 ev->bdaddr_type, &ev->direct_addr,
5852 ev->direct_addr_type, ev->rssi, NULL, 0,
5853 false);
5854
5855 hci_dev_unlock(hdev);
5856 }
5857
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)5858 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5859 {
5860 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5861
5862 skb_pull(skb, sizeof(*le_ev));
5863
5864 switch (le_ev->subevent) {
5865 case HCI_EV_LE_CONN_COMPLETE:
5866 hci_le_conn_complete_evt(hdev, skb);
5867 break;
5868
5869 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5870 hci_le_conn_update_complete_evt(hdev, skb);
5871 break;
5872
5873 case HCI_EV_LE_ADVERTISING_REPORT:
5874 hci_le_adv_report_evt(hdev, skb);
5875 break;
5876
5877 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5878 hci_le_remote_feat_complete_evt(hdev, skb);
5879 break;
5880
5881 case HCI_EV_LE_LTK_REQ:
5882 hci_le_ltk_request_evt(hdev, skb);
5883 break;
5884
5885 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5886 hci_le_remote_conn_param_req_evt(hdev, skb);
5887 break;
5888
5889 case HCI_EV_LE_DIRECT_ADV_REPORT:
5890 hci_le_direct_adv_report_evt(hdev, skb);
5891 break;
5892
5893 case HCI_EV_LE_EXT_ADV_REPORT:
5894 hci_le_ext_adv_report_evt(hdev, skb);
5895 break;
5896
5897 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5898 hci_le_enh_conn_complete_evt(hdev, skb);
5899 break;
5900
5901 case HCI_EV_LE_EXT_ADV_SET_TERM:
5902 hci_le_ext_adv_term_evt(hdev, skb);
5903 break;
5904
5905 default:
5906 break;
5907 }
5908 }
5909
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)5910 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5911 u8 event, struct sk_buff *skb)
5912 {
5913 struct hci_ev_cmd_complete *ev;
5914 struct hci_event_hdr *hdr;
5915
5916 if (!skb)
5917 return false;
5918
5919 if (skb->len < sizeof(*hdr)) {
5920 bt_dev_err(hdev, "too short HCI event");
5921 return false;
5922 }
5923
5924 hdr = (void *) skb->data;
5925 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5926
5927 if (event) {
5928 if (hdr->evt != event)
5929 return false;
5930 return true;
5931 }
5932
5933 /* Check if request ended in Command Status - no way to retreive
5934 * any extra parameters in this case.
5935 */
5936 if (hdr->evt == HCI_EV_CMD_STATUS)
5937 return false;
5938
5939 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5940 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5941 hdr->evt);
5942 return false;
5943 }
5944
5945 if (skb->len < sizeof(*ev)) {
5946 bt_dev_err(hdev, "too short cmd_complete event");
5947 return false;
5948 }
5949
5950 ev = (void *) skb->data;
5951 skb_pull(skb, sizeof(*ev));
5952
5953 if (opcode != __le16_to_cpu(ev->opcode)) {
5954 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5955 __le16_to_cpu(ev->opcode));
5956 return false;
5957 }
5958
5959 return true;
5960 }
5961
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)5962 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5963 {
5964 struct hci_event_hdr *hdr = (void *) skb->data;
5965 hci_req_complete_t req_complete = NULL;
5966 hci_req_complete_skb_t req_complete_skb = NULL;
5967 struct sk_buff *orig_skb = NULL;
5968 u8 status = 0, event = hdr->evt, req_evt = 0;
5969 u16 opcode = HCI_OP_NOP;
5970
5971 if (!event) {
5972 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5973 goto done;
5974 }
5975
5976 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5977 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5978 opcode = __le16_to_cpu(cmd_hdr->opcode);
5979 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5980 &req_complete_skb);
5981 req_evt = event;
5982 }
5983
5984 /* If it looks like we might end up having to call
5985 * req_complete_skb, store a pristine copy of the skb since the
5986 * various handlers may modify the original one through
5987 * skb_pull() calls, etc.
5988 */
5989 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5990 event == HCI_EV_CMD_COMPLETE)
5991 orig_skb = skb_clone(skb, GFP_KERNEL);
5992
5993 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5994
5995 switch (event) {
5996 case HCI_EV_INQUIRY_COMPLETE:
5997 hci_inquiry_complete_evt(hdev, skb);
5998 break;
5999
6000 case HCI_EV_INQUIRY_RESULT:
6001 hci_inquiry_result_evt(hdev, skb);
6002 break;
6003
6004 case HCI_EV_CONN_COMPLETE:
6005 hci_conn_complete_evt(hdev, skb);
6006 break;
6007
6008 case HCI_EV_CONN_REQUEST:
6009 hci_conn_request_evt(hdev, skb);
6010 break;
6011
6012 case HCI_EV_DISCONN_COMPLETE:
6013 hci_disconn_complete_evt(hdev, skb);
6014 break;
6015
6016 case HCI_EV_AUTH_COMPLETE:
6017 hci_auth_complete_evt(hdev, skb);
6018 break;
6019
6020 case HCI_EV_REMOTE_NAME:
6021 hci_remote_name_evt(hdev, skb);
6022 break;
6023
6024 case HCI_EV_ENCRYPT_CHANGE:
6025 hci_encrypt_change_evt(hdev, skb);
6026 break;
6027
6028 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6029 hci_change_link_key_complete_evt(hdev, skb);
6030 break;
6031
6032 case HCI_EV_REMOTE_FEATURES:
6033 hci_remote_features_evt(hdev, skb);
6034 break;
6035
6036 case HCI_EV_CMD_COMPLETE:
6037 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6038 &req_complete, &req_complete_skb);
6039 break;
6040
6041 case HCI_EV_CMD_STATUS:
6042 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6043 &req_complete_skb);
6044 break;
6045
6046 case HCI_EV_HARDWARE_ERROR:
6047 hci_hardware_error_evt(hdev, skb);
6048 break;
6049
6050 case HCI_EV_ROLE_CHANGE:
6051 hci_role_change_evt(hdev, skb);
6052 break;
6053
6054 case HCI_EV_NUM_COMP_PKTS:
6055 hci_num_comp_pkts_evt(hdev, skb);
6056 break;
6057
6058 case HCI_EV_MODE_CHANGE:
6059 hci_mode_change_evt(hdev, skb);
6060 break;
6061
6062 case HCI_EV_PIN_CODE_REQ:
6063 hci_pin_code_request_evt(hdev, skb);
6064 break;
6065
6066 case HCI_EV_LINK_KEY_REQ:
6067 hci_link_key_request_evt(hdev, skb);
6068 break;
6069
6070 case HCI_EV_LINK_KEY_NOTIFY:
6071 hci_link_key_notify_evt(hdev, skb);
6072 break;
6073
6074 case HCI_EV_CLOCK_OFFSET:
6075 hci_clock_offset_evt(hdev, skb);
6076 break;
6077
6078 case HCI_EV_PKT_TYPE_CHANGE:
6079 hci_pkt_type_change_evt(hdev, skb);
6080 break;
6081
6082 case HCI_EV_PSCAN_REP_MODE:
6083 hci_pscan_rep_mode_evt(hdev, skb);
6084 break;
6085
6086 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6087 hci_inquiry_result_with_rssi_evt(hdev, skb);
6088 break;
6089
6090 case HCI_EV_REMOTE_EXT_FEATURES:
6091 hci_remote_ext_features_evt(hdev, skb);
6092 break;
6093
6094 case HCI_EV_SYNC_CONN_COMPLETE:
6095 hci_sync_conn_complete_evt(hdev, skb);
6096 break;
6097
6098 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6099 hci_extended_inquiry_result_evt(hdev, skb);
6100 break;
6101
6102 case HCI_EV_KEY_REFRESH_COMPLETE:
6103 hci_key_refresh_complete_evt(hdev, skb);
6104 break;
6105
6106 case HCI_EV_IO_CAPA_REQUEST:
6107 hci_io_capa_request_evt(hdev, skb);
6108 break;
6109
6110 case HCI_EV_IO_CAPA_REPLY:
6111 hci_io_capa_reply_evt(hdev, skb);
6112 break;
6113
6114 case HCI_EV_USER_CONFIRM_REQUEST:
6115 hci_user_confirm_request_evt(hdev, skb);
6116 break;
6117
6118 case HCI_EV_USER_PASSKEY_REQUEST:
6119 hci_user_passkey_request_evt(hdev, skb);
6120 break;
6121
6122 case HCI_EV_USER_PASSKEY_NOTIFY:
6123 hci_user_passkey_notify_evt(hdev, skb);
6124 break;
6125
6126 case HCI_EV_KEYPRESS_NOTIFY:
6127 hci_keypress_notify_evt(hdev, skb);
6128 break;
6129
6130 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6131 hci_simple_pair_complete_evt(hdev, skb);
6132 break;
6133
6134 case HCI_EV_REMOTE_HOST_FEATURES:
6135 hci_remote_host_features_evt(hdev, skb);
6136 break;
6137
6138 case HCI_EV_LE_META:
6139 hci_le_meta_evt(hdev, skb);
6140 break;
6141
6142 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6143 hci_remote_oob_data_request_evt(hdev, skb);
6144 break;
6145
6146 #if IS_ENABLED(CONFIG_BT_HS)
6147 case HCI_EV_CHANNEL_SELECTED:
6148 hci_chan_selected_evt(hdev, skb);
6149 break;
6150
6151 case HCI_EV_PHY_LINK_COMPLETE:
6152 hci_phy_link_complete_evt(hdev, skb);
6153 break;
6154
6155 case HCI_EV_LOGICAL_LINK_COMPLETE:
6156 hci_loglink_complete_evt(hdev, skb);
6157 break;
6158
6159 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6160 hci_disconn_loglink_complete_evt(hdev, skb);
6161 break;
6162
6163 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6164 hci_disconn_phylink_complete_evt(hdev, skb);
6165 break;
6166 #endif
6167
6168 case HCI_EV_NUM_COMP_BLOCKS:
6169 hci_num_comp_blocks_evt(hdev, skb);
6170 break;
6171
6172 default:
6173 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6174 break;
6175 }
6176
6177 if (req_complete) {
6178 req_complete(hdev, status, opcode);
6179 } else if (req_complete_skb) {
6180 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6181 kfree_skb(orig_skb);
6182 orig_skb = NULL;
6183 }
6184 req_complete_skb(hdev, status, opcode, orig_skb);
6185 }
6186
6187 done:
6188 kfree_skb(orig_skb);
6189 kfree_skb(skb);
6190 hdev->stat.evt_rx++;
6191 }
6192