1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "a2mp.h"
38 #include "amp.h"
39 #include "smp.h"
40 #include "msft.h"
41
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
44
45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46
47 /* Handle HCI Event packets */
48
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 u8 *new_status)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%2.2x", hdev->name, status);
55
56 /* It is possible that we receive Inquiry Complete event right
57 * before we receive Inquiry Cancel Command Complete event, in
58 * which case the latter event should have status of Command
59 * Disallowed (0x0c). This should not be treated as error, since
60 * we actually achieve what Inquiry Cancel wants to achieve,
61 * which is to end the last Inquiry session.
62 */
63 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
64 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
65 status = 0x00;
66 }
67
68 *new_status = status;
69
70 if (status)
71 return;
72
73 clear_bit(HCI_INQUIRY, &hdev->flags);
74 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
75 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76
77 hci_dev_lock(hdev);
78 /* Set discovery state to stopped if we're not doing LE active
79 * scanning.
80 */
81 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
82 hdev->le_scan_type != LE_SCAN_ACTIVE)
83 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_dev_unlock(hdev);
85
86 hci_conn_check_pending(hdev);
87 }
88
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)89 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 __u8 status = *((__u8 *) skb->data);
92
93 BT_DBG("%s status 0x%2.2x", hdev->name, status);
94
95 if (status)
96 return;
97
98 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 }
100
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)101 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 __u8 status = *((__u8 *) skb->data);
104
105 BT_DBG("%s status 0x%2.2x", hdev->name, status);
106
107 if (status)
108 return;
109
110 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
111
112 hci_conn_check_pending(hdev);
113 }
114
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)115 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 struct sk_buff *skb)
117 {
118 BT_DBG("%s", hdev->name);
119 }
120
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)121 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
122 {
123 struct hci_rp_role_discovery *rp = (void *) skb->data;
124 struct hci_conn *conn;
125
126 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127
128 if (rp->status)
129 return;
130
131 hci_dev_lock(hdev);
132
133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 if (conn)
135 conn->role = rp->role;
136
137 hci_dev_unlock(hdev);
138 }
139
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)140 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 {
142 struct hci_rp_read_link_policy *rp = (void *) skb->data;
143 struct hci_conn *conn;
144
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146
147 if (rp->status)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = __le16_to_cpu(rp->policy);
155
156 hci_dev_unlock(hdev);
157 }
158
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)159 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_write_link_policy *rp = (void *) skb->data;
162 struct hci_conn *conn;
163 void *sent;
164
165 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166
167 if (rp->status)
168 return;
169
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 if (!sent)
172 return;
173
174 hci_dev_lock(hdev);
175
176 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
177 if (conn)
178 conn->link_policy = get_unaligned_le16(sent + 2);
179
180 hci_dev_unlock(hdev);
181 }
182
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)183 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct sk_buff *skb)
185 {
186 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
189
190 if (rp->status)
191 return;
192
193 hdev->link_policy = __le16_to_cpu(rp->policy);
194 }
195
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)196 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 struct sk_buff *skb)
198 {
199 __u8 status = *((__u8 *) skb->data);
200 void *sent;
201
202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
203
204 if (status)
205 return;
206
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
208 if (!sent)
209 return;
210
211 hdev->link_policy = get_unaligned_le16(sent);
212 }
213
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)214 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
215 {
216 __u8 status = *((__u8 *) skb->data);
217
218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
219
220 clear_bit(HCI_RESET, &hdev->flags);
221
222 if (status)
223 return;
224
225 /* Reset all non-persistent flags */
226 hci_dev_clear_volatile_flags(hdev);
227
228 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
229
230 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
231 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
232
233 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
234 hdev->adv_data_len = 0;
235
236 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
237 hdev->scan_rsp_data_len = 0;
238
239 hdev->le_scan_type = LE_SCAN_PASSIVE;
240
241 hdev->ssp_debug_mode = 0;
242
243 hci_bdaddr_list_clear(&hdev->le_accept_list);
244 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 }
246
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)247 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct sk_buff *skb)
249 {
250 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
251 struct hci_cp_read_stored_link_key *sent;
252
253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
254
255 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
256 if (!sent)
257 return;
258
259 if (!rp->status && sent->read_all == 0x01) {
260 hdev->stored_max_keys = rp->max_keys;
261 hdev->stored_num_keys = rp->num_keys;
262 }
263 }
264
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)265 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct sk_buff *skb)
267 {
268 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
271
272 if (rp->status)
273 return;
274
275 if (rp->num_keys <= hdev->stored_num_keys)
276 hdev->stored_num_keys -= rp->num_keys;
277 else
278 hdev->stored_num_keys = 0;
279 }
280
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)281 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 __u8 status = *((__u8 *) skb->data);
284 void *sent;
285
286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
287
288 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
289 if (!sent)
290 return;
291
292 hci_dev_lock(hdev);
293
294 if (hci_dev_test_flag(hdev, HCI_MGMT))
295 mgmt_set_local_name_complete(hdev, sent, status);
296 else if (!status)
297 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
298
299 hci_dev_unlock(hdev);
300 }
301
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)302 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
303 {
304 struct hci_rp_read_local_name *rp = (void *) skb->data;
305
306 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
307
308 if (rp->status)
309 return;
310
311 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
312 hci_dev_test_flag(hdev, HCI_CONFIG))
313 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 }
315
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)316 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
317 {
318 __u8 status = *((__u8 *) skb->data);
319 void *sent;
320
321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
322
323 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
324 if (!sent)
325 return;
326
327 hci_dev_lock(hdev);
328
329 if (!status) {
330 __u8 param = *((__u8 *) sent);
331
332 if (param == AUTH_ENABLED)
333 set_bit(HCI_AUTH, &hdev->flags);
334 else
335 clear_bit(HCI_AUTH, &hdev->flags);
336 }
337
338 if (hci_dev_test_flag(hdev, HCI_MGMT))
339 mgmt_auth_enable_complete(hdev, status);
340
341 hci_dev_unlock(hdev);
342 }
343
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)344 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
345 {
346 __u8 status = *((__u8 *) skb->data);
347 __u8 param;
348 void *sent;
349
350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
351
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
356 if (!sent)
357 return;
358
359 param = *((__u8 *) sent);
360
361 if (param)
362 set_bit(HCI_ENCRYPT, &hdev->flags);
363 else
364 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 }
366
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)367 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
368 {
369 __u8 status = *((__u8 *) skb->data);
370 __u8 param;
371 void *sent;
372
373 BT_DBG("%s status 0x%2.2x", hdev->name, status);
374
375 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
376 if (!sent)
377 return;
378
379 param = *((__u8 *) sent);
380
381 hci_dev_lock(hdev);
382
383 if (status) {
384 hdev->discov_timeout = 0;
385 goto done;
386 }
387
388 if (param & SCAN_INQUIRY)
389 set_bit(HCI_ISCAN, &hdev->flags);
390 else
391 clear_bit(HCI_ISCAN, &hdev->flags);
392
393 if (param & SCAN_PAGE)
394 set_bit(HCI_PSCAN, &hdev->flags);
395 else
396 clear_bit(HCI_PSCAN, &hdev->flags);
397
398 done:
399 hci_dev_unlock(hdev);
400 }
401
hci_cc_set_event_filter(struct hci_dev * hdev,struct sk_buff * skb)402 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
403 {
404 __u8 status = *((__u8 *)skb->data);
405 struct hci_cp_set_event_filter *cp;
406 void *sent;
407
408 BT_DBG("%s status 0x%2.2x", hdev->name, status);
409
410 if (status)
411 return;
412
413 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
414 if (!sent)
415 return;
416
417 cp = (struct hci_cp_set_event_filter *)sent;
418
419 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
420 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
421 else
422 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 }
424
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)425 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
426 {
427 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
428
429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
430
431 if (rp->status)
432 return;
433
434 memcpy(hdev->dev_class, rp->dev_class, 3);
435
436 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
437 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 }
439
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)440 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 __u8 status = *((__u8 *) skb->data);
443 void *sent;
444
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
446
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
448 if (!sent)
449 return;
450
451 hci_dev_lock(hdev);
452
453 if (status == 0)
454 memcpy(hdev->dev_class, sent, 3);
455
456 if (hci_dev_test_flag(hdev, HCI_MGMT))
457 mgmt_set_class_of_dev_complete(hdev, sent, status);
458
459 hci_dev_unlock(hdev);
460 }
461
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)462 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
463 {
464 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 __u16 setting;
466
467 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
468
469 if (rp->status)
470 return;
471
472 setting = __le16_to_cpu(rp->voice_setting);
473
474 if (hdev->voice_setting == setting)
475 return;
476
477 hdev->voice_setting = setting;
478
479 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480
481 if (hdev->notify)
482 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 }
484
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)485 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 struct sk_buff *skb)
487 {
488 __u8 status = *((__u8 *) skb->data);
489 __u16 setting;
490 void *sent;
491
492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493
494 if (status)
495 return;
496
497 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
498 if (!sent)
499 return;
500
501 setting = get_unaligned_le16(sent);
502
503 if (hdev->voice_setting == setting)
504 return;
505
506 hdev->voice_setting = setting;
507
508 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509
510 if (hdev->notify)
511 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 }
513
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)514 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 struct sk_buff *skb)
516 {
517 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
518
519 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
520
521 if (rp->status)
522 return;
523
524 hdev->num_iac = rp->num_iac;
525
526 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 }
528
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)529 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
530 {
531 __u8 status = *((__u8 *) skb->data);
532 struct hci_cp_write_ssp_mode *sent;
533
534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
535
536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
537 if (!sent)
538 return;
539
540 hci_dev_lock(hdev);
541
542 if (!status) {
543 if (sent->mode)
544 hdev->features[1][0] |= LMP_HOST_SSP;
545 else
546 hdev->features[1][0] &= ~LMP_HOST_SSP;
547 }
548
549 if (hci_dev_test_flag(hdev, HCI_MGMT))
550 mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 else if (!status) {
552 if (sent->mode)
553 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
554 else
555 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 }
557
558 hci_dev_unlock(hdev);
559 }
560
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)561 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
562 {
563 u8 status = *((u8 *) skb->data);
564 struct hci_cp_write_sc_support *sent;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, status);
567
568 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
569 if (!sent)
570 return;
571
572 hci_dev_lock(hdev);
573
574 if (!status) {
575 if (sent->support)
576 hdev->features[1][0] |= LMP_HOST_SC;
577 else
578 hdev->features[1][0] &= ~LMP_HOST_SC;
579 }
580
581 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
582 if (sent->support)
583 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
584 else
585 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 }
587
588 hci_dev_unlock(hdev);
589 }
590
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 {
593 struct hci_rp_read_local_version *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
601 hci_dev_test_flag(hdev, HCI_CONFIG)) {
602 hdev->hci_ver = rp->hci_ver;
603 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
604 hdev->lmp_ver = rp->lmp_ver;
605 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
606 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
607 }
608 }
609
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)610 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 struct sk_buff *skb)
612 {
613 struct hci_rp_read_local_commands *rp = (void *) skb->data;
614
615 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
616
617 if (rp->status)
618 return;
619
620 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
621 hci_dev_test_flag(hdev, HCI_CONFIG))
622 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 }
624
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)625 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 struct sk_buff *skb)
627 {
628 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
629 struct hci_conn *conn;
630
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632
633 if (rp->status)
634 return;
635
636 hci_dev_lock(hdev);
637
638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
639 if (conn)
640 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
641
642 hci_dev_unlock(hdev);
643 }
644
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)645 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
649 struct hci_conn *conn;
650 void *sent;
651
652 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653
654 if (rp->status)
655 return;
656
657 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
658 if (!sent)
659 return;
660
661 hci_dev_lock(hdev);
662
663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
664 if (conn)
665 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
666
667 hci_dev_unlock(hdev);
668 }
669
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)670 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 struct sk_buff *skb)
672 {
673 struct hci_rp_read_local_features *rp = (void *) skb->data;
674
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677 if (rp->status)
678 return;
679
680 memcpy(hdev->features, rp->features, 8);
681
682 /* Adjust default settings according to features
683 * supported by device. */
684
685 if (hdev->features[0][0] & LMP_3SLOT)
686 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
687
688 if (hdev->features[0][0] & LMP_5SLOT)
689 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
690
691 if (hdev->features[0][1] & LMP_HV2) {
692 hdev->pkt_type |= (HCI_HV2);
693 hdev->esco_type |= (ESCO_HV2);
694 }
695
696 if (hdev->features[0][1] & LMP_HV3) {
697 hdev->pkt_type |= (HCI_HV3);
698 hdev->esco_type |= (ESCO_HV3);
699 }
700
701 if (lmp_esco_capable(hdev))
702 hdev->esco_type |= (ESCO_EV3);
703
704 if (hdev->features[0][4] & LMP_EV4)
705 hdev->esco_type |= (ESCO_EV4);
706
707 if (hdev->features[0][4] & LMP_EV5)
708 hdev->esco_type |= (ESCO_EV5);
709
710 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
711 hdev->esco_type |= (ESCO_2EV3);
712
713 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
714 hdev->esco_type |= (ESCO_3EV3);
715
716 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
717 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 }
719
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)720 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 struct sk_buff *skb)
722 {
723 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
724
725 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
726
727 if (rp->status)
728 return;
729
730 if (hdev->max_page < rp->max_page)
731 hdev->max_page = rp->max_page;
732
733 if (rp->page < HCI_MAX_PAGES)
734 memcpy(hdev->features[rp->page], rp->features, 8);
735 }
736
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)737 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 struct sk_buff *skb)
739 {
740 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
741
742 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743
744 if (rp->status)
745 return;
746
747 hdev->flow_ctl_mode = rp->mode;
748 }
749
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)750 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
751 {
752 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
753
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755
756 if (rp->status)
757 return;
758
759 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
760 hdev->sco_mtu = rp->sco_mtu;
761 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
762 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
763
764 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
765 hdev->sco_mtu = 64;
766 hdev->sco_pkts = 8;
767 }
768
769 hdev->acl_cnt = hdev->acl_pkts;
770 hdev->sco_cnt = hdev->sco_pkts;
771
772 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
773 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 }
775
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)776 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
777 {
778 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
779
780 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
781
782 if (rp->status)
783 return;
784
785 if (test_bit(HCI_INIT, &hdev->flags))
786 bacpy(&hdev->bdaddr, &rp->bdaddr);
787
788 if (hci_dev_test_flag(hdev, HCI_SETUP))
789 bacpy(&hdev->setup_addr, &rp->bdaddr);
790 }
791
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,struct sk_buff * skb)792 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 struct sk_buff *skb)
794 {
795 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 if (rp->status)
800 return;
801
802 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
803 hci_dev_test_flag(hdev, HCI_CONFIG)) {
804 hdev->pairing_opts = rp->pairing_opts;
805 hdev->max_enc_key_size = rp->max_key_size;
806 }
807 }
808
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)809 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815
816 if (rp->status)
817 return;
818
819 if (test_bit(HCI_INIT, &hdev->flags)) {
820 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
821 hdev->page_scan_window = __le16_to_cpu(rp->window);
822 }
823 }
824
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)825 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 struct sk_buff *skb)
827 {
828 u8 status = *((u8 *) skb->data);
829 struct hci_cp_write_page_scan_activity *sent;
830
831 BT_DBG("%s status 0x%2.2x", hdev->name, status);
832
833 if (status)
834 return;
835
836 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
837 if (!sent)
838 return;
839
840 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
841 hdev->page_scan_window = __le16_to_cpu(sent->window);
842 }
843
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)844 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 struct sk_buff *skb)
846 {
847 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
848
849 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850
851 if (rp->status)
852 return;
853
854 if (test_bit(HCI_INIT, &hdev->flags))
855 hdev->page_scan_type = rp->type;
856 }
857
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)858 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 struct sk_buff *skb)
860 {
861 u8 status = *((u8 *) skb->data);
862 u8 *type;
863
864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
865
866 if (status)
867 return;
868
869 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
870 if (type)
871 hdev->page_scan_type = *type;
872 }
873
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)874 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 struct sk_buff *skb)
876 {
877 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
878
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
880
881 if (rp->status)
882 return;
883
884 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
885 hdev->block_len = __le16_to_cpu(rp->block_len);
886 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
887
888 hdev->block_cnt = hdev->num_blocks;
889
890 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
891 hdev->block_cnt, hdev->block_len);
892 }
893
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)894 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
895 {
896 struct hci_rp_read_clock *rp = (void *) skb->data;
897 struct hci_cp_read_clock *cp;
898 struct hci_conn *conn;
899
900 BT_DBG("%s", hdev->name);
901
902 if (skb->len < sizeof(*rp))
903 return;
904
905 if (rp->status)
906 return;
907
908 hci_dev_lock(hdev);
909
910 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
911 if (!cp)
912 goto unlock;
913
914 if (cp->which == 0x00) {
915 hdev->clock = le32_to_cpu(rp->clock);
916 goto unlock;
917 }
918
919 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
920 if (conn) {
921 conn->clock = le32_to_cpu(rp->clock);
922 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
923 }
924
925 unlock:
926 hci_dev_unlock(hdev);
927 }
928
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)929 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 struct sk_buff *skb)
931 {
932 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
933
934 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
935
936 if (rp->status)
937 return;
938
939 hdev->amp_status = rp->amp_status;
940 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
941 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
942 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
943 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
944 hdev->amp_type = rp->amp_type;
945 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
946 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
947 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
948 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 }
950
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)951 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 struct sk_buff *skb)
953 {
954 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
955
956 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957
958 if (rp->status)
959 return;
960
961 hdev->inq_tx_power = rp->tx_power;
962 }
963
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)964 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 struct sk_buff *skb)
966 {
967 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
968
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970
971 if (rp->status)
972 return;
973
974 hdev->err_data_reporting = rp->err_data_reporting;
975 }
976
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)977 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 struct sk_buff *skb)
979 {
980 __u8 status = *((__u8 *)skb->data);
981 struct hci_cp_write_def_err_data_reporting *cp;
982
983 BT_DBG("%s status 0x%2.2x", hdev->name, status);
984
985 if (status)
986 return;
987
988 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
989 if (!cp)
990 return;
991
992 hdev->err_data_reporting = cp->err_data_reporting;
993 }
994
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)995 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
996 {
997 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
998 struct hci_cp_pin_code_reply *cp;
999 struct hci_conn *conn;
1000
1001 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002
1003 hci_dev_lock(hdev);
1004
1005 if (hci_dev_test_flag(hdev, HCI_MGMT))
1006 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1007
1008 if (rp->status)
1009 goto unlock;
1010
1011 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1012 if (!cp)
1013 goto unlock;
1014
1015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1016 if (conn)
1017 conn->pin_length = cp->pin_len;
1018
1019 unlock:
1020 hci_dev_unlock(hdev);
1021 }
1022
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1023 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 {
1025 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1026
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1028
1029 hci_dev_lock(hdev);
1030
1031 if (hci_dev_test_flag(hdev, HCI_MGMT))
1032 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 rp->status);
1034
1035 hci_dev_unlock(hdev);
1036 }
1037
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)1038 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1039 struct sk_buff *skb)
1040 {
1041 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1042
1043 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044
1045 if (rp->status)
1046 return;
1047
1048 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1049 hdev->le_pkts = rp->le_max_pkt;
1050
1051 hdev->le_cnt = hdev->le_pkts;
1052
1053 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 }
1055
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)1056 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1057 struct sk_buff *skb)
1058 {
1059 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1060
1061 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1062
1063 if (rp->status)
1064 return;
1065
1066 memcpy(hdev->le_features, rp->features, 8);
1067 }
1068
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1069 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1071 {
1072 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1073
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1075
1076 if (rp->status)
1077 return;
1078
1079 hdev->adv_tx_power = rp->tx_power;
1080 }
1081
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1082 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1083 {
1084 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1085
1086 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1087
1088 hci_dev_lock(hdev);
1089
1090 if (hci_dev_test_flag(hdev, HCI_MGMT))
1091 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 rp->status);
1093
1094 hci_dev_unlock(hdev);
1095 }
1096
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1097 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1098 struct sk_buff *skb)
1099 {
1100 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1101
1102 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103
1104 hci_dev_lock(hdev);
1105
1106 if (hci_dev_test_flag(hdev, HCI_MGMT))
1107 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1108 ACL_LINK, 0, rp->status);
1109
1110 hci_dev_unlock(hdev);
1111 }
1112
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1113 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1114 {
1115 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1116
1117 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1118
1119 hci_dev_lock(hdev);
1120
1121 if (hci_dev_test_flag(hdev, HCI_MGMT))
1122 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 0, rp->status);
1124
1125 hci_dev_unlock(hdev);
1126 }
1127
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1128 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1129 struct sk_buff *skb)
1130 {
1131 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1132
1133 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134
1135 hci_dev_lock(hdev);
1136
1137 if (hci_dev_test_flag(hdev, HCI_MGMT))
1138 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1139 ACL_LINK, 0, rp->status);
1140
1141 hci_dev_unlock(hdev);
1142 }
1143
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1144 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1145 struct sk_buff *skb)
1146 {
1147 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1148
1149 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 }
1151
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1152 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1153 struct sk_buff *skb)
1154 {
1155 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1156
1157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 }
1159
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1160 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1161 {
1162 __u8 status = *((__u8 *) skb->data);
1163 bdaddr_t *sent;
1164
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166
1167 if (status)
1168 return;
1169
1170 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1171 if (!sent)
1172 return;
1173
1174 hci_dev_lock(hdev);
1175
1176 bacpy(&hdev->random_addr, sent);
1177
1178 if (!bacmp(&hdev->rpa, sent)) {
1179 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1180 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1181 secs_to_jiffies(hdev->rpa_timeout));
1182 }
1183
1184 hci_dev_unlock(hdev);
1185 }
1186
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1187 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1188 {
1189 __u8 status = *((__u8 *) skb->data);
1190 struct hci_cp_le_set_default_phy *cp;
1191
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193
1194 if (status)
1195 return;
1196
1197 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1198 if (!cp)
1199 return;
1200
1201 hci_dev_lock(hdev);
1202
1203 hdev->le_tx_def_phys = cp->tx_phys;
1204 hdev->le_rx_def_phys = cp->rx_phys;
1205
1206 hci_dev_unlock(hdev);
1207 }
1208
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1209 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1210 struct sk_buff *skb)
1211 {
1212 __u8 status = *((__u8 *) skb->data);
1213 struct hci_cp_le_set_adv_set_rand_addr *cp;
1214 struct adv_info *adv;
1215
1216 if (status)
1217 return;
1218
1219 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1220 /* Update only in case the adv instance since handle 0x00 shall be using
1221 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1222 * non-extended adverting.
1223 */
1224 if (!cp || !cp->handle)
1225 return;
1226
1227 hci_dev_lock(hdev);
1228
1229 adv = hci_find_adv_instance(hdev, cp->handle);
1230 if (adv) {
1231 bacpy(&adv->random_addr, &cp->bdaddr);
1232 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1233 adv->rpa_expired = false;
1234 queue_delayed_work(hdev->workqueue,
1235 &adv->rpa_expired_cb,
1236 secs_to_jiffies(hdev->rpa_timeout));
1237 }
1238 }
1239
1240 hci_dev_unlock(hdev);
1241 }
1242
hci_cc_le_read_transmit_power(struct hci_dev * hdev,struct sk_buff * skb)1243 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1244 struct sk_buff *skb)
1245 {
1246 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1247
1248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1249
1250 if (rp->status)
1251 return;
1252
1253 hdev->min_le_tx_power = rp->min_le_tx_power;
1254 hdev->max_le_tx_power = rp->max_le_tx_power;
1255 }
1256
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1257 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1258 {
1259 __u8 *sent, status = *((__u8 *) skb->data);
1260
1261 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1262
1263 if (status)
1264 return;
1265
1266 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1267 if (!sent)
1268 return;
1269
1270 hci_dev_lock(hdev);
1271
1272 /* If we're doing connection initiation as peripheral. Set a
1273 * timeout in case something goes wrong.
1274 */
1275 if (*sent) {
1276 struct hci_conn *conn;
1277
1278 hci_dev_set_flag(hdev, HCI_LE_ADV);
1279
1280 conn = hci_lookup_le_connect(hdev);
1281 if (conn)
1282 queue_delayed_work(hdev->workqueue,
1283 &conn->le_conn_timeout,
1284 conn->conn_timeout);
1285 } else {
1286 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 }
1288
1289 hci_dev_unlock(hdev);
1290 }
1291
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1292 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1293 struct sk_buff *skb)
1294 {
1295 struct hci_cp_le_set_ext_adv_enable *cp;
1296 struct hci_cp_ext_adv_set *set;
1297 __u8 status = *((__u8 *) skb->data);
1298 struct adv_info *adv = NULL, *n;
1299
1300 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1301
1302 if (status)
1303 return;
1304
1305 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1306 if (!cp)
1307 return;
1308
1309 set = (void *)cp->data;
1310
1311 hci_dev_lock(hdev);
1312
1313 if (cp->num_of_sets)
1314 adv = hci_find_adv_instance(hdev, set->handle);
1315
1316 if (cp->enable) {
1317 struct hci_conn *conn;
1318
1319 hci_dev_set_flag(hdev, HCI_LE_ADV);
1320
1321 if (adv)
1322 adv->enabled = true;
1323
1324 conn = hci_lookup_le_connect(hdev);
1325 if (conn)
1326 queue_delayed_work(hdev->workqueue,
1327 &conn->le_conn_timeout,
1328 conn->conn_timeout);
1329 } else {
1330 if (cp->num_of_sets) {
1331 if (adv)
1332 adv->enabled = false;
1333
1334 /* If just one instance was disabled check if there are
1335 * any other instance enabled before clearing HCI_LE_ADV
1336 */
1337 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1338 list) {
1339 if (adv->enabled)
1340 goto unlock;
1341 }
1342 } else {
1343 /* All instances shall be considered disabled */
1344 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1345 list)
1346 adv->enabled = false;
1347 }
1348
1349 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1350 }
1351
1352 unlock:
1353 hci_dev_unlock(hdev);
1354 }
1355
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1356 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1357 {
1358 struct hci_cp_le_set_scan_param *cp;
1359 __u8 status = *((__u8 *) skb->data);
1360
1361 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1362
1363 if (status)
1364 return;
1365
1366 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1367 if (!cp)
1368 return;
1369
1370 hci_dev_lock(hdev);
1371
1372 hdev->le_scan_type = cp->type;
1373
1374 hci_dev_unlock(hdev);
1375 }
1376
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1377 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1378 struct sk_buff *skb)
1379 {
1380 struct hci_cp_le_set_ext_scan_params *cp;
1381 __u8 status = *((__u8 *) skb->data);
1382 struct hci_cp_le_scan_phy_params *phy_param;
1383
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386 if (status)
1387 return;
1388
1389 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1390 if (!cp)
1391 return;
1392
1393 phy_param = (void *)cp->data;
1394
1395 hci_dev_lock(hdev);
1396
1397 hdev->le_scan_type = phy_param->type;
1398
1399 hci_dev_unlock(hdev);
1400 }
1401
has_pending_adv_report(struct hci_dev * hdev)1402 static bool has_pending_adv_report(struct hci_dev *hdev)
1403 {
1404 struct discovery_state *d = &hdev->discovery;
1405
1406 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1407 }
1408
clear_pending_adv_report(struct hci_dev * hdev)1409 static void clear_pending_adv_report(struct hci_dev *hdev)
1410 {
1411 struct discovery_state *d = &hdev->discovery;
1412
1413 bacpy(&d->last_adv_addr, BDADDR_ANY);
1414 d->last_adv_data_len = 0;
1415 }
1416
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1417 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1418 u8 bdaddr_type, s8 rssi, u32 flags,
1419 u8 *data, u8 len)
1420 {
1421 struct discovery_state *d = &hdev->discovery;
1422
1423 if (len > HCI_MAX_AD_LENGTH)
1424 return;
1425
1426 bacpy(&d->last_adv_addr, bdaddr);
1427 d->last_adv_addr_type = bdaddr_type;
1428 d->last_adv_rssi = rssi;
1429 d->last_adv_flags = flags;
1430 memcpy(d->last_adv_data, data, len);
1431 d->last_adv_data_len = len;
1432 }
1433
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1434 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1435 {
1436 hci_dev_lock(hdev);
1437
1438 switch (enable) {
1439 case LE_SCAN_ENABLE:
1440 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1441 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1442 clear_pending_adv_report(hdev);
1443 break;
1444
1445 case LE_SCAN_DISABLE:
1446 /* We do this here instead of when setting DISCOVERY_STOPPED
1447 * since the latter would potentially require waiting for
1448 * inquiry to stop too.
1449 */
1450 if (has_pending_adv_report(hdev)) {
1451 struct discovery_state *d = &hdev->discovery;
1452
1453 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1454 d->last_adv_addr_type, NULL,
1455 d->last_adv_rssi, d->last_adv_flags,
1456 d->last_adv_data,
1457 d->last_adv_data_len, NULL, 0);
1458 }
1459
1460 /* Cancel this timer so that we don't try to disable scanning
1461 * when it's already disabled.
1462 */
1463 cancel_delayed_work(&hdev->le_scan_disable);
1464
1465 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1466
1467 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1468 * interrupted scanning due to a connect request. Mark
1469 * therefore discovery as stopped. If this was not
1470 * because of a connect request advertising might have
1471 * been disabled because of active scanning, so
1472 * re-enable it again if necessary.
1473 */
1474 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1475 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1477 hdev->discovery.state == DISCOVERY_FINDING)
1478 hci_req_reenable_advertising(hdev);
1479
1480 break;
1481
1482 default:
1483 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1484 enable);
1485 break;
1486 }
1487
1488 hci_dev_unlock(hdev);
1489 }
1490
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1491 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1492 struct sk_buff *skb)
1493 {
1494 struct hci_cp_le_set_scan_enable *cp;
1495 __u8 status = *((__u8 *) skb->data);
1496
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498
1499 if (status)
1500 return;
1501
1502 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1503 if (!cp)
1504 return;
1505
1506 le_set_scan_enable_complete(hdev, cp->enable);
1507 }
1508
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1509 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1510 struct sk_buff *skb)
1511 {
1512 struct hci_cp_le_set_ext_scan_enable *cp;
1513 __u8 status = *((__u8 *) skb->data);
1514
1515 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1516
1517 if (status)
1518 return;
1519
1520 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1521 if (!cp)
1522 return;
1523
1524 le_set_scan_enable_complete(hdev, cp->enable);
1525 }
1526
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1527 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1529 {
1530 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1531
1532 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1533 rp->num_of_sets);
1534
1535 if (rp->status)
1536 return;
1537
1538 hdev->le_num_of_adv_sets = rp->num_of_sets;
1539 }
1540
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,struct sk_buff * skb)1541 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1543 {
1544 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1545
1546 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1547
1548 if (rp->status)
1549 return;
1550
1551 hdev->le_accept_list_size = rp->size;
1552 }
1553
hci_cc_le_clear_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1554 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1555 struct sk_buff *skb)
1556 {
1557 __u8 status = *((__u8 *) skb->data);
1558
1559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1560
1561 if (status)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565 hci_bdaddr_list_clear(&hdev->le_accept_list);
1566 hci_dev_unlock(hdev);
1567 }
1568
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1569 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1570 struct sk_buff *skb)
1571 {
1572 struct hci_cp_le_add_to_accept_list *sent;
1573 __u8 status = *((__u8 *) skb->data);
1574
1575 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1576
1577 if (status)
1578 return;
1579
1580 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1581 if (!sent)
1582 return;
1583
1584 hci_dev_lock(hdev);
1585 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1586 sent->bdaddr_type);
1587 hci_dev_unlock(hdev);
1588 }
1589
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,struct sk_buff * skb)1590 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1591 struct sk_buff *skb)
1592 {
1593 struct hci_cp_le_del_from_accept_list *sent;
1594 __u8 status = *((__u8 *) skb->data);
1595
1596 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1597
1598 if (status)
1599 return;
1600
1601 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1602 if (!sent)
1603 return;
1604
1605 hci_dev_lock(hdev);
1606 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1607 sent->bdaddr_type);
1608 hci_dev_unlock(hdev);
1609 }
1610
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1611 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1612 struct sk_buff *skb)
1613 {
1614 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1615
1616 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1617
1618 if (rp->status)
1619 return;
1620
1621 memcpy(hdev->le_states, rp->le_states, 8);
1622 }
1623
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1624 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1625 struct sk_buff *skb)
1626 {
1627 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1628
1629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1630
1631 if (rp->status)
1632 return;
1633
1634 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1635 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1636 }
1637
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1638 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1639 struct sk_buff *skb)
1640 {
1641 struct hci_cp_le_write_def_data_len *sent;
1642 __u8 status = *((__u8 *) skb->data);
1643
1644 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645
1646 if (status)
1647 return;
1648
1649 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1650 if (!sent)
1651 return;
1652
1653 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1654 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1655 }
1656
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1657 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1659 {
1660 struct hci_cp_le_add_to_resolv_list *sent;
1661 __u8 status = *((__u8 *) skb->data);
1662
1663 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1664
1665 if (status)
1666 return;
1667
1668 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1669 if (!sent)
1670 return;
1671
1672 hci_dev_lock(hdev);
1673 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1674 sent->bdaddr_type, sent->peer_irk,
1675 sent->local_irk);
1676 hci_dev_unlock(hdev);
1677 }
1678
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1679 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1680 struct sk_buff *skb)
1681 {
1682 struct hci_cp_le_del_from_resolv_list *sent;
1683 __u8 status = *((__u8 *) skb->data);
1684
1685 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1686
1687 if (status)
1688 return;
1689
1690 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1691 if (!sent)
1692 return;
1693
1694 hci_dev_lock(hdev);
1695 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1696 sent->bdaddr_type);
1697 hci_dev_unlock(hdev);
1698 }
1699
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1700 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1701 struct sk_buff *skb)
1702 {
1703 __u8 status = *((__u8 *) skb->data);
1704
1705 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1706
1707 if (status)
1708 return;
1709
1710 hci_dev_lock(hdev);
1711 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1712 hci_dev_unlock(hdev);
1713 }
1714
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1715 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1716 struct sk_buff *skb)
1717 {
1718 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1719
1720 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1721
1722 if (rp->status)
1723 return;
1724
1725 hdev->le_resolv_list_size = rp->size;
1726 }
1727
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1728 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1729 struct sk_buff *skb)
1730 {
1731 __u8 *sent, status = *((__u8 *) skb->data);
1732
1733 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1734
1735 if (status)
1736 return;
1737
1738 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1739 if (!sent)
1740 return;
1741
1742 hci_dev_lock(hdev);
1743
1744 if (*sent)
1745 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1746 else
1747 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1748
1749 hci_dev_unlock(hdev);
1750 }
1751
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1752 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1753 struct sk_buff *skb)
1754 {
1755 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1756
1757 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1758
1759 if (rp->status)
1760 return;
1761
1762 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1763 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1764 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1765 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1766 }
1767
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1768 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1769 struct sk_buff *skb)
1770 {
1771 struct hci_cp_write_le_host_supported *sent;
1772 __u8 status = *((__u8 *) skb->data);
1773
1774 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1775
1776 if (status)
1777 return;
1778
1779 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1780 if (!sent)
1781 return;
1782
1783 hci_dev_lock(hdev);
1784
1785 if (sent->le) {
1786 hdev->features[1][0] |= LMP_HOST_LE;
1787 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1788 } else {
1789 hdev->features[1][0] &= ~LMP_HOST_LE;
1790 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1791 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1792 }
1793
1794 if (sent->simul)
1795 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1796 else
1797 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1798
1799 hci_dev_unlock(hdev);
1800 }
1801
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1802 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1803 {
1804 struct hci_cp_le_set_adv_param *cp;
1805 u8 status = *((u8 *) skb->data);
1806
1807 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1808
1809 if (status)
1810 return;
1811
1812 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1813 if (!cp)
1814 return;
1815
1816 hci_dev_lock(hdev);
1817 hdev->adv_addr_type = cp->own_address_type;
1818 hci_dev_unlock(hdev);
1819 }
1820
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1821 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1822 {
1823 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1824 struct hci_cp_le_set_ext_adv_params *cp;
1825 struct adv_info *adv_instance;
1826
1827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1828
1829 if (rp->status)
1830 return;
1831
1832 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1833 if (!cp)
1834 return;
1835
1836 hci_dev_lock(hdev);
1837 hdev->adv_addr_type = cp->own_addr_type;
1838 if (!cp->handle) {
1839 /* Store in hdev for instance 0 */
1840 hdev->adv_tx_power = rp->tx_power;
1841 } else {
1842 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1843 if (adv_instance)
1844 adv_instance->tx_power = rp->tx_power;
1845 }
1846 /* Update adv data as tx power is known now */
1847 hci_req_update_adv_data(hdev, cp->handle);
1848
1849 hci_dev_unlock(hdev);
1850 }
1851
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1852 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1853 {
1854 struct hci_rp_read_rssi *rp = (void *) skb->data;
1855 struct hci_conn *conn;
1856
1857 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1858
1859 if (rp->status)
1860 return;
1861
1862 hci_dev_lock(hdev);
1863
1864 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1865 if (conn)
1866 conn->rssi = rp->rssi;
1867
1868 hci_dev_unlock(hdev);
1869 }
1870
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1871 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1872 {
1873 struct hci_cp_read_tx_power *sent;
1874 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1875 struct hci_conn *conn;
1876
1877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1878
1879 if (rp->status)
1880 return;
1881
1882 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1883 if (!sent)
1884 return;
1885
1886 hci_dev_lock(hdev);
1887
1888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1889 if (!conn)
1890 goto unlock;
1891
1892 switch (sent->type) {
1893 case 0x00:
1894 conn->tx_power = rp->tx_power;
1895 break;
1896 case 0x01:
1897 conn->max_tx_power = rp->tx_power;
1898 break;
1899 }
1900
1901 unlock:
1902 hci_dev_unlock(hdev);
1903 }
1904
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1905 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1906 {
1907 u8 status = *((u8 *) skb->data);
1908 u8 *mode;
1909
1910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1911
1912 if (status)
1913 return;
1914
1915 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1916 if (mode)
1917 hdev->ssp_debug_mode = *mode;
1918 }
1919
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1920 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1921 {
1922 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1923
1924 if (status) {
1925 hci_conn_check_pending(hdev);
1926 return;
1927 }
1928
1929 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
1930 set_bit(HCI_INQUIRY, &hdev->flags);
1931 }
1932
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1933 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1934 {
1935 struct hci_cp_create_conn *cp;
1936 struct hci_conn *conn;
1937
1938 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1939
1940 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1941 if (!cp)
1942 return;
1943
1944 hci_dev_lock(hdev);
1945
1946 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1947
1948 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1949
1950 if (status) {
1951 if (conn && conn->state == BT_CONNECT) {
1952 if (status != 0x0c || conn->attempt > 2) {
1953 conn->state = BT_CLOSED;
1954 hci_connect_cfm(conn, status);
1955 hci_conn_del(conn);
1956 } else
1957 conn->state = BT_CONNECT2;
1958 }
1959 } else {
1960 if (!conn) {
1961 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1962 HCI_ROLE_MASTER);
1963 if (!conn)
1964 bt_dev_err(hdev, "no memory for new connection");
1965 }
1966 }
1967
1968 hci_dev_unlock(hdev);
1969 }
1970
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1971 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1972 {
1973 struct hci_cp_add_sco *cp;
1974 struct hci_conn *acl, *sco;
1975 __u16 handle;
1976
1977 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1978
1979 if (!status)
1980 return;
1981
1982 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1983 if (!cp)
1984 return;
1985
1986 handle = __le16_to_cpu(cp->handle);
1987
1988 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1989
1990 hci_dev_lock(hdev);
1991
1992 acl = hci_conn_hash_lookup_handle(hdev, handle);
1993 if (acl) {
1994 sco = acl->link;
1995 if (sco) {
1996 sco->state = BT_CLOSED;
1997
1998 hci_connect_cfm(sco, status);
1999 hci_conn_del(sco);
2000 }
2001 }
2002
2003 hci_dev_unlock(hdev);
2004 }
2005
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2006 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2007 {
2008 struct hci_cp_auth_requested *cp;
2009 struct hci_conn *conn;
2010
2011 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2012
2013 if (!status)
2014 return;
2015
2016 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2017 if (!cp)
2018 return;
2019
2020 hci_dev_lock(hdev);
2021
2022 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2023 if (conn) {
2024 if (conn->state == BT_CONFIG) {
2025 hci_connect_cfm(conn, status);
2026 hci_conn_drop(conn);
2027 }
2028 }
2029
2030 hci_dev_unlock(hdev);
2031 }
2032
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2033 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2034 {
2035 struct hci_cp_set_conn_encrypt *cp;
2036 struct hci_conn *conn;
2037
2038 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2039
2040 if (!status)
2041 return;
2042
2043 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2044 if (!cp)
2045 return;
2046
2047 hci_dev_lock(hdev);
2048
2049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2050 if (conn) {
2051 if (conn->state == BT_CONFIG) {
2052 hci_connect_cfm(conn, status);
2053 hci_conn_drop(conn);
2054 }
2055 }
2056
2057 hci_dev_unlock(hdev);
2058 }
2059
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2060 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2061 struct hci_conn *conn)
2062 {
2063 if (conn->state != BT_CONFIG || !conn->out)
2064 return 0;
2065
2066 if (conn->pending_sec_level == BT_SECURITY_SDP)
2067 return 0;
2068
2069 /* Only request authentication for SSP connections or non-SSP
2070 * devices with sec_level MEDIUM or HIGH or if MITM protection
2071 * is requested.
2072 */
2073 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2074 conn->pending_sec_level != BT_SECURITY_FIPS &&
2075 conn->pending_sec_level != BT_SECURITY_HIGH &&
2076 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2077 return 0;
2078
2079 return 1;
2080 }
2081
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2082 static int hci_resolve_name(struct hci_dev *hdev,
2083 struct inquiry_entry *e)
2084 {
2085 struct hci_cp_remote_name_req cp;
2086
2087 memset(&cp, 0, sizeof(cp));
2088
2089 bacpy(&cp.bdaddr, &e->data.bdaddr);
2090 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2091 cp.pscan_mode = e->data.pscan_mode;
2092 cp.clock_offset = e->data.clock_offset;
2093
2094 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2095 }
2096
hci_resolve_next_name(struct hci_dev * hdev)2097 static bool hci_resolve_next_name(struct hci_dev *hdev)
2098 {
2099 struct discovery_state *discov = &hdev->discovery;
2100 struct inquiry_entry *e;
2101
2102 if (list_empty(&discov->resolve))
2103 return false;
2104
2105 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2106 if (!e)
2107 return false;
2108
2109 if (hci_resolve_name(hdev, e) == 0) {
2110 e->name_state = NAME_PENDING;
2111 return true;
2112 }
2113
2114 return false;
2115 }
2116
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2117 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2118 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2119 {
2120 struct discovery_state *discov = &hdev->discovery;
2121 struct inquiry_entry *e;
2122
2123 /* Update the mgmt connected state if necessary. Be careful with
2124 * conn objects that exist but are not (yet) connected however.
2125 * Only those in BT_CONFIG or BT_CONNECTED states can be
2126 * considered connected.
2127 */
2128 if (conn &&
2129 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2130 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2131 mgmt_device_connected(hdev, conn, name, name_len);
2132
2133 if (discov->state == DISCOVERY_STOPPED)
2134 return;
2135
2136 if (discov->state == DISCOVERY_STOPPING)
2137 goto discov_complete;
2138
2139 if (discov->state != DISCOVERY_RESOLVING)
2140 return;
2141
2142 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2143 /* If the device was not found in a list of found devices names of which
2144 * are pending. there is no need to continue resolving a next name as it
2145 * will be done upon receiving another Remote Name Request Complete
2146 * Event */
2147 if (!e)
2148 return;
2149
2150 list_del(&e->list);
2151 if (name) {
2152 e->name_state = NAME_KNOWN;
2153 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2154 e->data.rssi, name, name_len);
2155 } else {
2156 e->name_state = NAME_NOT_KNOWN;
2157 }
2158
2159 if (hci_resolve_next_name(hdev))
2160 return;
2161
2162 discov_complete:
2163 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2164 }
2165
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2166 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2167 {
2168 struct hci_cp_remote_name_req *cp;
2169 struct hci_conn *conn;
2170
2171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2172
2173 /* If successful wait for the name req complete event before
2174 * checking for the need to do authentication */
2175 if (!status)
2176 return;
2177
2178 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2179 if (!cp)
2180 return;
2181
2182 hci_dev_lock(hdev);
2183
2184 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2185
2186 if (hci_dev_test_flag(hdev, HCI_MGMT))
2187 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2188
2189 if (!conn)
2190 goto unlock;
2191
2192 if (!hci_outgoing_auth_needed(hdev, conn))
2193 goto unlock;
2194
2195 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2196 struct hci_cp_auth_requested auth_cp;
2197
2198 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2199
2200 auth_cp.handle = __cpu_to_le16(conn->handle);
2201 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2202 sizeof(auth_cp), &auth_cp);
2203 }
2204
2205 unlock:
2206 hci_dev_unlock(hdev);
2207 }
2208
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2209 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2210 {
2211 struct hci_cp_read_remote_features *cp;
2212 struct hci_conn *conn;
2213
2214 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2215
2216 if (!status)
2217 return;
2218
2219 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2220 if (!cp)
2221 return;
2222
2223 hci_dev_lock(hdev);
2224
2225 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2226 if (conn) {
2227 if (conn->state == BT_CONFIG) {
2228 hci_connect_cfm(conn, status);
2229 hci_conn_drop(conn);
2230 }
2231 }
2232
2233 hci_dev_unlock(hdev);
2234 }
2235
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2236 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2237 {
2238 struct hci_cp_read_remote_ext_features *cp;
2239 struct hci_conn *conn;
2240
2241 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2242
2243 if (!status)
2244 return;
2245
2246 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2247 if (!cp)
2248 return;
2249
2250 hci_dev_lock(hdev);
2251
2252 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2253 if (conn) {
2254 if (conn->state == BT_CONFIG) {
2255 hci_connect_cfm(conn, status);
2256 hci_conn_drop(conn);
2257 }
2258 }
2259
2260 hci_dev_unlock(hdev);
2261 }
2262
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2263 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2264 {
2265 struct hci_cp_setup_sync_conn *cp;
2266 struct hci_conn *acl, *sco;
2267 __u16 handle;
2268
2269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2270
2271 if (!status)
2272 return;
2273
2274 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2275 if (!cp)
2276 return;
2277
2278 handle = __le16_to_cpu(cp->handle);
2279
2280 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2281
2282 hci_dev_lock(hdev);
2283
2284 acl = hci_conn_hash_lookup_handle(hdev, handle);
2285 if (acl) {
2286 sco = acl->link;
2287 if (sco) {
2288 sco->state = BT_CLOSED;
2289
2290 hci_connect_cfm(sco, status);
2291 hci_conn_del(sco);
2292 }
2293 }
2294
2295 hci_dev_unlock(hdev);
2296 }
2297
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2298 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2299 {
2300 struct hci_cp_sniff_mode *cp;
2301 struct hci_conn *conn;
2302
2303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2304
2305 if (!status)
2306 return;
2307
2308 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2309 if (!cp)
2310 return;
2311
2312 hci_dev_lock(hdev);
2313
2314 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2315 if (conn) {
2316 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2317
2318 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2319 hci_sco_setup(conn, status);
2320 }
2321
2322 hci_dev_unlock(hdev);
2323 }
2324
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2325 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2326 {
2327 struct hci_cp_exit_sniff_mode *cp;
2328 struct hci_conn *conn;
2329
2330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2331
2332 if (!status)
2333 return;
2334
2335 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2336 if (!cp)
2337 return;
2338
2339 hci_dev_lock(hdev);
2340
2341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2342 if (conn) {
2343 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2344
2345 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2346 hci_sco_setup(conn, status);
2347 }
2348
2349 hci_dev_unlock(hdev);
2350 }
2351
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2352 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2353 {
2354 struct hci_cp_disconnect *cp;
2355 struct hci_conn *conn;
2356
2357 if (!status)
2358 return;
2359
2360 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2361 if (!cp)
2362 return;
2363
2364 hci_dev_lock(hdev);
2365
2366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2367 if (conn) {
2368 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2369 conn->dst_type, status);
2370
2371 if (conn->type == LE_LINK) {
2372 hdev->cur_adv_instance = conn->adv_instance;
2373 hci_req_reenable_advertising(hdev);
2374 }
2375
2376 /* If the disconnection failed for any reason, the upper layer
2377 * does not retry to disconnect in current implementation.
2378 * Hence, we need to do some basic cleanup here and re-enable
2379 * advertising if necessary.
2380 */
2381 hci_conn_del(conn);
2382 }
2383
2384 hci_dev_unlock(hdev);
2385 }
2386
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2387 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2388 u8 peer_addr_type, u8 own_address_type,
2389 u8 filter_policy)
2390 {
2391 struct hci_conn *conn;
2392
2393 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2394 peer_addr_type);
2395 if (!conn)
2396 return;
2397
2398 /* When using controller based address resolution, then the new
2399 * address types 0x02 and 0x03 are used. These types need to be
2400 * converted back into either public address or random address type
2401 */
2402 if (use_ll_privacy(hdev) &&
2403 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2404 switch (own_address_type) {
2405 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2406 own_address_type = ADDR_LE_DEV_PUBLIC;
2407 break;
2408 case ADDR_LE_DEV_RANDOM_RESOLVED:
2409 own_address_type = ADDR_LE_DEV_RANDOM;
2410 break;
2411 }
2412 }
2413
2414 /* Store the initiator and responder address information which
2415 * is needed for SMP. These values will not change during the
2416 * lifetime of the connection.
2417 */
2418 conn->init_addr_type = own_address_type;
2419 if (own_address_type == ADDR_LE_DEV_RANDOM)
2420 bacpy(&conn->init_addr, &hdev->random_addr);
2421 else
2422 bacpy(&conn->init_addr, &hdev->bdaddr);
2423
2424 conn->resp_addr_type = peer_addr_type;
2425 bacpy(&conn->resp_addr, peer_addr);
2426
2427 /* We don't want the connection attempt to stick around
2428 * indefinitely since LE doesn't have a page timeout concept
2429 * like BR/EDR. Set a timer for any connection that doesn't use
2430 * the accept list for connecting.
2431 */
2432 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2433 queue_delayed_work(conn->hdev->workqueue,
2434 &conn->le_conn_timeout,
2435 conn->conn_timeout);
2436 }
2437
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2438 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2439 {
2440 struct hci_cp_le_create_conn *cp;
2441
2442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2443
2444 /* All connection failure handling is taken care of by the
2445 * hci_le_conn_failed function which is triggered by the HCI
2446 * request completion callbacks used for connecting.
2447 */
2448 if (status)
2449 return;
2450
2451 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2452 if (!cp)
2453 return;
2454
2455 hci_dev_lock(hdev);
2456
2457 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2458 cp->own_address_type, cp->filter_policy);
2459
2460 hci_dev_unlock(hdev);
2461 }
2462
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2463 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2464 {
2465 struct hci_cp_le_ext_create_conn *cp;
2466
2467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2468
2469 /* All connection failure handling is taken care of by the
2470 * hci_le_conn_failed function which is triggered by the HCI
2471 * request completion callbacks used for connecting.
2472 */
2473 if (status)
2474 return;
2475
2476 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2477 if (!cp)
2478 return;
2479
2480 hci_dev_lock(hdev);
2481
2482 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2483 cp->own_addr_type, cp->filter_policy);
2484
2485 hci_dev_unlock(hdev);
2486 }
2487
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2488 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2489 {
2490 struct hci_cp_le_read_remote_features *cp;
2491 struct hci_conn *conn;
2492
2493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2494
2495 if (!status)
2496 return;
2497
2498 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2499 if (!cp)
2500 return;
2501
2502 hci_dev_lock(hdev);
2503
2504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2505 if (conn) {
2506 if (conn->state == BT_CONFIG) {
2507 hci_connect_cfm(conn, status);
2508 hci_conn_drop(conn);
2509 }
2510 }
2511
2512 hci_dev_unlock(hdev);
2513 }
2514
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2515 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2516 {
2517 struct hci_cp_le_start_enc *cp;
2518 struct hci_conn *conn;
2519
2520 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2521
2522 if (!status)
2523 return;
2524
2525 hci_dev_lock(hdev);
2526
2527 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2528 if (!cp)
2529 goto unlock;
2530
2531 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2532 if (!conn)
2533 goto unlock;
2534
2535 if (conn->state != BT_CONNECTED)
2536 goto unlock;
2537
2538 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2539 hci_conn_drop(conn);
2540
2541 unlock:
2542 hci_dev_unlock(hdev);
2543 }
2544
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2545 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2546 {
2547 struct hci_cp_switch_role *cp;
2548 struct hci_conn *conn;
2549
2550 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2551
2552 if (!status)
2553 return;
2554
2555 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2556 if (!cp)
2557 return;
2558
2559 hci_dev_lock(hdev);
2560
2561 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2562 if (conn)
2563 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2564
2565 hci_dev_unlock(hdev);
2566 }
2567
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2568 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 {
2570 __u8 status = *((__u8 *) skb->data);
2571 struct discovery_state *discov = &hdev->discovery;
2572 struct inquiry_entry *e;
2573
2574 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2575
2576 hci_conn_check_pending(hdev);
2577
2578 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2579 return;
2580
2581 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2582 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2583
2584 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2585 return;
2586
2587 hci_dev_lock(hdev);
2588
2589 if (discov->state != DISCOVERY_FINDING)
2590 goto unlock;
2591
2592 if (list_empty(&discov->resolve)) {
2593 /* When BR/EDR inquiry is active and no LE scanning is in
2594 * progress, then change discovery state to indicate completion.
2595 *
2596 * When running LE scanning and BR/EDR inquiry simultaneously
2597 * and the LE scan already finished, then change the discovery
2598 * state to indicate completion.
2599 */
2600 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2601 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2602 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2603 goto unlock;
2604 }
2605
2606 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2607 if (e && hci_resolve_name(hdev, e) == 0) {
2608 e->name_state = NAME_PENDING;
2609 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2610 } else {
2611 /* When BR/EDR inquiry is active and no LE scanning is in
2612 * progress, then change discovery state to indicate completion.
2613 *
2614 * When running LE scanning and BR/EDR inquiry simultaneously
2615 * and the LE scan already finished, then change the discovery
2616 * state to indicate completion.
2617 */
2618 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2619 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2621 }
2622
2623 unlock:
2624 hci_dev_unlock(hdev);
2625 }
2626
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2627 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 struct inquiry_data data;
2630 struct inquiry_info *info = (void *) (skb->data + 1);
2631 int num_rsp = *((__u8 *) skb->data);
2632
2633 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2634
2635 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2636 return;
2637
2638 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2639 return;
2640
2641 hci_dev_lock(hdev);
2642
2643 for (; num_rsp; num_rsp--, info++) {
2644 u32 flags;
2645
2646 bacpy(&data.bdaddr, &info->bdaddr);
2647 data.pscan_rep_mode = info->pscan_rep_mode;
2648 data.pscan_period_mode = info->pscan_period_mode;
2649 data.pscan_mode = info->pscan_mode;
2650 memcpy(data.dev_class, info->dev_class, 3);
2651 data.clock_offset = info->clock_offset;
2652 data.rssi = HCI_RSSI_INVALID;
2653 data.ssp_mode = 0x00;
2654
2655 flags = hci_inquiry_cache_update(hdev, &data, false);
2656
2657 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2658 info->dev_class, HCI_RSSI_INVALID,
2659 flags, NULL, 0, NULL, 0);
2660 }
2661
2662 hci_dev_unlock(hdev);
2663 }
2664
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2665 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2666 {
2667 struct hci_ev_conn_complete *ev = (void *) skb->data;
2668 struct hci_conn *conn;
2669
2670 BT_DBG("%s", hdev->name);
2671
2672 hci_dev_lock(hdev);
2673
2674 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2675 if (!conn) {
2676 /* Connection may not exist if auto-connected. Check the bredr
2677 * allowlist to see if this device is allowed to auto connect.
2678 * If link is an ACL type, create a connection class
2679 * automatically.
2680 *
2681 * Auto-connect will only occur if the event filter is
2682 * programmed with a given address. Right now, event filter is
2683 * only used during suspend.
2684 */
2685 if (ev->link_type == ACL_LINK &&
2686 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2687 &ev->bdaddr,
2688 BDADDR_BREDR)) {
2689 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2690 HCI_ROLE_SLAVE);
2691 if (!conn) {
2692 bt_dev_err(hdev, "no memory for new conn");
2693 goto unlock;
2694 }
2695 } else {
2696 if (ev->link_type != SCO_LINK)
2697 goto unlock;
2698
2699 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2700 &ev->bdaddr);
2701 if (!conn)
2702 goto unlock;
2703
2704 conn->type = SCO_LINK;
2705 }
2706 }
2707
2708 if (!ev->status) {
2709 conn->handle = __le16_to_cpu(ev->handle);
2710
2711 if (conn->type == ACL_LINK) {
2712 conn->state = BT_CONFIG;
2713 hci_conn_hold(conn);
2714
2715 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2716 !hci_find_link_key(hdev, &ev->bdaddr))
2717 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2718 else
2719 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2720 } else
2721 conn->state = BT_CONNECTED;
2722
2723 hci_debugfs_create_conn(conn);
2724 hci_conn_add_sysfs(conn);
2725
2726 if (test_bit(HCI_AUTH, &hdev->flags))
2727 set_bit(HCI_CONN_AUTH, &conn->flags);
2728
2729 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2730 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2731
2732 /* Get remote features */
2733 if (conn->type == ACL_LINK) {
2734 struct hci_cp_read_remote_features cp;
2735 cp.handle = ev->handle;
2736 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2737 sizeof(cp), &cp);
2738
2739 hci_req_update_scan(hdev);
2740 }
2741
2742 /* Set packet type for incoming connection */
2743 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2744 struct hci_cp_change_conn_ptype cp;
2745 cp.handle = ev->handle;
2746 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2747 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2748 &cp);
2749 }
2750 } else {
2751 conn->state = BT_CLOSED;
2752 if (conn->type == ACL_LINK)
2753 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2754 conn->dst_type, ev->status);
2755 }
2756
2757 if (conn->type == ACL_LINK)
2758 hci_sco_setup(conn, ev->status);
2759
2760 if (ev->status) {
2761 hci_connect_cfm(conn, ev->status);
2762 hci_conn_del(conn);
2763 } else if (ev->link_type == SCO_LINK) {
2764 switch (conn->setting & SCO_AIRMODE_MASK) {
2765 case SCO_AIRMODE_CVSD:
2766 if (hdev->notify)
2767 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2768 break;
2769 }
2770
2771 hci_connect_cfm(conn, ev->status);
2772 }
2773
2774 unlock:
2775 hci_dev_unlock(hdev);
2776
2777 hci_conn_check_pending(hdev);
2778 }
2779
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2780 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2781 {
2782 struct hci_cp_reject_conn_req cp;
2783
2784 bacpy(&cp.bdaddr, bdaddr);
2785 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2786 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2787 }
2788
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2789 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 {
2791 struct hci_ev_conn_request *ev = (void *) skb->data;
2792 int mask = hdev->link_mode;
2793 struct inquiry_entry *ie;
2794 struct hci_conn *conn;
2795 __u8 flags = 0;
2796
2797 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2798 ev->link_type);
2799
2800 /* Reject incoming connection from device with same BD ADDR against
2801 * CVE-2020-26555
2802 */
2803 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2804 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2805 &ev->bdaddr);
2806 hci_reject_conn(hdev, &ev->bdaddr);
2807 return;
2808 }
2809
2810 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2811 &flags);
2812
2813 if (!(mask & HCI_LM_ACCEPT)) {
2814 hci_reject_conn(hdev, &ev->bdaddr);
2815 return;
2816 }
2817
2818 hci_dev_lock(hdev);
2819
2820 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2821 BDADDR_BREDR)) {
2822 hci_reject_conn(hdev, &ev->bdaddr);
2823 goto unlock;
2824 }
2825
2826 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2827 * connection. These features are only touched through mgmt so
2828 * only do the checks if HCI_MGMT is set.
2829 */
2830 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2831 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2832 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2833 BDADDR_BREDR)) {
2834 hci_reject_conn(hdev, &ev->bdaddr);
2835 goto unlock;
2836 }
2837
2838 /* Connection accepted */
2839
2840 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2841 if (ie)
2842 memcpy(ie->data.dev_class, ev->dev_class, 3);
2843
2844 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2845 &ev->bdaddr);
2846 if (!conn) {
2847 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2848 HCI_ROLE_SLAVE);
2849 if (!conn) {
2850 bt_dev_err(hdev, "no memory for new connection");
2851 goto unlock;
2852 }
2853 }
2854
2855 memcpy(conn->dev_class, ev->dev_class, 3);
2856
2857 hci_dev_unlock(hdev);
2858
2859 if (ev->link_type == ACL_LINK ||
2860 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2861 struct hci_cp_accept_conn_req cp;
2862 conn->state = BT_CONNECT;
2863
2864 bacpy(&cp.bdaddr, &ev->bdaddr);
2865
2866 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2867 cp.role = 0x00; /* Become central */
2868 else
2869 cp.role = 0x01; /* Remain peripheral */
2870
2871 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2872 } else if (!(flags & HCI_PROTO_DEFER)) {
2873 struct hci_cp_accept_sync_conn_req cp;
2874 conn->state = BT_CONNECT;
2875
2876 bacpy(&cp.bdaddr, &ev->bdaddr);
2877 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2878
2879 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2880 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2881 cp.max_latency = cpu_to_le16(0xffff);
2882 cp.content_format = cpu_to_le16(hdev->voice_setting);
2883 cp.retrans_effort = 0xff;
2884
2885 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2886 &cp);
2887 } else {
2888 conn->state = BT_CONNECT2;
2889 hci_connect_cfm(conn, 0);
2890 }
2891
2892 return;
2893 unlock:
2894 hci_dev_unlock(hdev);
2895 }
2896
hci_to_mgmt_reason(u8 err)2897 static u8 hci_to_mgmt_reason(u8 err)
2898 {
2899 switch (err) {
2900 case HCI_ERROR_CONNECTION_TIMEOUT:
2901 return MGMT_DEV_DISCONN_TIMEOUT;
2902 case HCI_ERROR_REMOTE_USER_TERM:
2903 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2904 case HCI_ERROR_REMOTE_POWER_OFF:
2905 return MGMT_DEV_DISCONN_REMOTE;
2906 case HCI_ERROR_LOCAL_HOST_TERM:
2907 return MGMT_DEV_DISCONN_LOCAL_HOST;
2908 default:
2909 return MGMT_DEV_DISCONN_UNKNOWN;
2910 }
2911 }
2912
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2913 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2914 {
2915 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2916 u8 reason;
2917 struct hci_conn_params *params;
2918 struct hci_conn *conn;
2919 bool mgmt_connected;
2920
2921 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2922
2923 hci_dev_lock(hdev);
2924
2925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2926 if (!conn)
2927 goto unlock;
2928
2929 if (ev->status) {
2930 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2931 conn->dst_type, ev->status);
2932 goto unlock;
2933 }
2934
2935 conn->state = BT_CLOSED;
2936
2937 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2938
2939 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2940 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2941 else
2942 reason = hci_to_mgmt_reason(ev->reason);
2943
2944 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2945 reason, mgmt_connected);
2946
2947 if (conn->type == ACL_LINK) {
2948 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2949 hci_remove_link_key(hdev, &conn->dst);
2950
2951 hci_req_update_scan(hdev);
2952 }
2953
2954 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2955 if (params) {
2956 switch (params->auto_connect) {
2957 case HCI_AUTO_CONN_LINK_LOSS:
2958 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2959 break;
2960 fallthrough;
2961
2962 case HCI_AUTO_CONN_DIRECT:
2963 case HCI_AUTO_CONN_ALWAYS:
2964 list_del_init(¶ms->action);
2965 list_add(¶ms->action, &hdev->pend_le_conns);
2966 hci_update_background_scan(hdev);
2967 break;
2968
2969 default:
2970 break;
2971 }
2972 }
2973
2974 hci_disconn_cfm(conn, ev->reason);
2975
2976 /* The suspend notifier is waiting for all devices to disconnect so
2977 * clear the bit from pending tasks and inform the wait queue.
2978 */
2979 if (list_empty(&hdev->conn_hash.list) &&
2980 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2981 wake_up(&hdev->suspend_wait_q);
2982 }
2983
2984 /* Re-enable advertising if necessary, since it might
2985 * have been disabled by the connection. From the
2986 * HCI_LE_Set_Advertise_Enable command description in
2987 * the core specification (v4.0):
2988 * "The Controller shall continue advertising until the Host
2989 * issues an LE_Set_Advertise_Enable command with
2990 * Advertising_Enable set to 0x00 (Advertising is disabled)
2991 * or until a connection is created or until the Advertising
2992 * is timed out due to Directed Advertising."
2993 */
2994 if (conn->type == LE_LINK) {
2995 hdev->cur_adv_instance = conn->adv_instance;
2996 hci_req_reenable_advertising(hdev);
2997 }
2998
2999 hci_conn_del(conn);
3000
3001 unlock:
3002 hci_dev_unlock(hdev);
3003 }
3004
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3005 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3006 {
3007 struct hci_ev_auth_complete *ev = (void *) skb->data;
3008 struct hci_conn *conn;
3009
3010 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3011
3012 hci_dev_lock(hdev);
3013
3014 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3015 if (!conn)
3016 goto unlock;
3017
3018 if (!ev->status) {
3019 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3020 set_bit(HCI_CONN_AUTH, &conn->flags);
3021 conn->sec_level = conn->pending_sec_level;
3022 } else {
3023 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3024 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3025
3026 mgmt_auth_failed(conn, ev->status);
3027 }
3028
3029 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3030
3031 if (conn->state == BT_CONFIG) {
3032 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3033 struct hci_cp_set_conn_encrypt cp;
3034 cp.handle = ev->handle;
3035 cp.encrypt = 0x01;
3036 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3037 &cp);
3038 } else {
3039 conn->state = BT_CONNECTED;
3040 hci_connect_cfm(conn, ev->status);
3041 hci_conn_drop(conn);
3042 }
3043 } else {
3044 hci_auth_cfm(conn, ev->status);
3045
3046 hci_conn_hold(conn);
3047 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3048 hci_conn_drop(conn);
3049 }
3050
3051 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3052 if (!ev->status) {
3053 struct hci_cp_set_conn_encrypt cp;
3054 cp.handle = ev->handle;
3055 cp.encrypt = 0x01;
3056 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3057 &cp);
3058 } else {
3059 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3060 hci_encrypt_cfm(conn, ev->status);
3061 }
3062 }
3063
3064 unlock:
3065 hci_dev_unlock(hdev);
3066 }
3067
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)3068 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3069 {
3070 struct hci_ev_remote_name *ev = (void *) skb->data;
3071 struct hci_conn *conn;
3072
3073 BT_DBG("%s", hdev->name);
3074
3075 hci_conn_check_pending(hdev);
3076
3077 hci_dev_lock(hdev);
3078
3079 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3080
3081 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3082 goto check_auth;
3083
3084 if (ev->status == 0)
3085 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3086 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3087 else
3088 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3089
3090 check_auth:
3091 if (!conn)
3092 goto unlock;
3093
3094 if (!hci_outgoing_auth_needed(hdev, conn))
3095 goto unlock;
3096
3097 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3098 struct hci_cp_auth_requested cp;
3099
3100 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3101
3102 cp.handle = __cpu_to_le16(conn->handle);
3103 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3104 }
3105
3106 unlock:
3107 hci_dev_unlock(hdev);
3108 }
3109
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3110 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3111 u16 opcode, struct sk_buff *skb)
3112 {
3113 const struct hci_rp_read_enc_key_size *rp;
3114 struct hci_conn *conn;
3115 u16 handle;
3116
3117 BT_DBG("%s status 0x%02x", hdev->name, status);
3118
3119 if (!skb || skb->len < sizeof(*rp)) {
3120 bt_dev_err(hdev, "invalid read key size response");
3121 return;
3122 }
3123
3124 rp = (void *)skb->data;
3125 handle = le16_to_cpu(rp->handle);
3126
3127 hci_dev_lock(hdev);
3128
3129 conn = hci_conn_hash_lookup_handle(hdev, handle);
3130 if (!conn)
3131 goto unlock;
3132
3133 /* While unexpected, the read_enc_key_size command may fail. The most
3134 * secure approach is to then assume the key size is 0 to force a
3135 * disconnection.
3136 */
3137 if (rp->status) {
3138 bt_dev_err(hdev, "failed to read key size for handle %u",
3139 handle);
3140 conn->enc_key_size = 0;
3141 } else {
3142 conn->enc_key_size = rp->key_size;
3143 }
3144
3145 hci_encrypt_cfm(conn, 0);
3146
3147 unlock:
3148 hci_dev_unlock(hdev);
3149 }
3150
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3151 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152 {
3153 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3154 struct hci_conn *conn;
3155
3156 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3157
3158 hci_dev_lock(hdev);
3159
3160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3161 if (!conn)
3162 goto unlock;
3163
3164 if (!ev->status) {
3165 if (ev->encrypt) {
3166 /* Encryption implies authentication */
3167 set_bit(HCI_CONN_AUTH, &conn->flags);
3168 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3169 conn->sec_level = conn->pending_sec_level;
3170
3171 /* P-256 authentication key implies FIPS */
3172 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3173 set_bit(HCI_CONN_FIPS, &conn->flags);
3174
3175 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3176 conn->type == LE_LINK)
3177 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3178 } else {
3179 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3180 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3181 }
3182 }
3183
3184 /* We should disregard the current RPA and generate a new one
3185 * whenever the encryption procedure fails.
3186 */
3187 if (ev->status && conn->type == LE_LINK) {
3188 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3189 hci_adv_instances_set_rpa_expired(hdev, true);
3190 }
3191
3192 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3193
3194 /* Check link security requirements are met */
3195 if (!hci_conn_check_link_mode(conn))
3196 ev->status = HCI_ERROR_AUTH_FAILURE;
3197
3198 if (ev->status && conn->state == BT_CONNECTED) {
3199 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3200 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3201
3202 /* Notify upper layers so they can cleanup before
3203 * disconnecting.
3204 */
3205 hci_encrypt_cfm(conn, ev->status);
3206 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3207 hci_conn_drop(conn);
3208 goto unlock;
3209 }
3210
3211 /* Try reading the encryption key size for encrypted ACL links */
3212 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3213 struct hci_cp_read_enc_key_size cp;
3214 struct hci_request req;
3215
3216 /* Only send HCI_Read_Encryption_Key_Size if the
3217 * controller really supports it. If it doesn't, assume
3218 * the default size (16).
3219 */
3220 if (!(hdev->commands[20] & 0x10)) {
3221 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3222 goto notify;
3223 }
3224
3225 hci_req_init(&req, hdev);
3226
3227 cp.handle = cpu_to_le16(conn->handle);
3228 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3229
3230 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3231 bt_dev_err(hdev, "sending read key size failed");
3232 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3233 goto notify;
3234 }
3235
3236 goto unlock;
3237 }
3238
3239 /* Set the default Authenticated Payload Timeout after
3240 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3241 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3242 * sent when the link is active and Encryption is enabled, the conn
3243 * type can be either LE or ACL and controller must support LMP Ping.
3244 * Ensure for AES-CCM encryption as well.
3245 */
3246 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3247 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3248 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3249 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3250 struct hci_cp_write_auth_payload_to cp;
3251
3252 cp.handle = cpu_to_le16(conn->handle);
3253 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3254 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3255 sizeof(cp), &cp);
3256 }
3257
3258 notify:
3259 hci_encrypt_cfm(conn, ev->status);
3260
3261 unlock:
3262 hci_dev_unlock(hdev);
3263 }
3264
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3265 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3266 struct sk_buff *skb)
3267 {
3268 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3269 struct hci_conn *conn;
3270
3271 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3272
3273 hci_dev_lock(hdev);
3274
3275 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3276 if (conn) {
3277 if (!ev->status)
3278 set_bit(HCI_CONN_SECURE, &conn->flags);
3279
3280 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3281
3282 hci_key_change_cfm(conn, ev->status);
3283 }
3284
3285 hci_dev_unlock(hdev);
3286 }
3287
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3288 static void hci_remote_features_evt(struct hci_dev *hdev,
3289 struct sk_buff *skb)
3290 {
3291 struct hci_ev_remote_features *ev = (void *) skb->data;
3292 struct hci_conn *conn;
3293
3294 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3295
3296 hci_dev_lock(hdev);
3297
3298 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3299 if (!conn)
3300 goto unlock;
3301
3302 if (!ev->status)
3303 memcpy(conn->features[0], ev->features, 8);
3304
3305 if (conn->state != BT_CONFIG)
3306 goto unlock;
3307
3308 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3309 lmp_ext_feat_capable(conn)) {
3310 struct hci_cp_read_remote_ext_features cp;
3311 cp.handle = ev->handle;
3312 cp.page = 0x01;
3313 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3314 sizeof(cp), &cp);
3315 goto unlock;
3316 }
3317
3318 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3319 struct hci_cp_remote_name_req cp;
3320 memset(&cp, 0, sizeof(cp));
3321 bacpy(&cp.bdaddr, &conn->dst);
3322 cp.pscan_rep_mode = 0x02;
3323 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3324 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3325 mgmt_device_connected(hdev, conn, NULL, 0);
3326
3327 if (!hci_outgoing_auth_needed(hdev, conn)) {
3328 conn->state = BT_CONNECTED;
3329 hci_connect_cfm(conn, ev->status);
3330 hci_conn_drop(conn);
3331 }
3332
3333 unlock:
3334 hci_dev_unlock(hdev);
3335 }
3336
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3337 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3338 {
3339 cancel_delayed_work(&hdev->cmd_timer);
3340
3341 if (!test_bit(HCI_RESET, &hdev->flags)) {
3342 if (ncmd) {
3343 cancel_delayed_work(&hdev->ncmd_timer);
3344 atomic_set(&hdev->cmd_cnt, 1);
3345 } else {
3346 schedule_delayed_work(&hdev->ncmd_timer,
3347 HCI_NCMD_TIMEOUT);
3348 }
3349 }
3350 }
3351
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3352 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3353 u16 *opcode, u8 *status,
3354 hci_req_complete_t *req_complete,
3355 hci_req_complete_skb_t *req_complete_skb)
3356 {
3357 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3358
3359 *opcode = __le16_to_cpu(ev->opcode);
3360 *status = skb->data[sizeof(*ev)];
3361
3362 skb_pull(skb, sizeof(*ev));
3363
3364 switch (*opcode) {
3365 case HCI_OP_INQUIRY_CANCEL:
3366 hci_cc_inquiry_cancel(hdev, skb, status);
3367 break;
3368
3369 case HCI_OP_PERIODIC_INQ:
3370 hci_cc_periodic_inq(hdev, skb);
3371 break;
3372
3373 case HCI_OP_EXIT_PERIODIC_INQ:
3374 hci_cc_exit_periodic_inq(hdev, skb);
3375 break;
3376
3377 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3378 hci_cc_remote_name_req_cancel(hdev, skb);
3379 break;
3380
3381 case HCI_OP_ROLE_DISCOVERY:
3382 hci_cc_role_discovery(hdev, skb);
3383 break;
3384
3385 case HCI_OP_READ_LINK_POLICY:
3386 hci_cc_read_link_policy(hdev, skb);
3387 break;
3388
3389 case HCI_OP_WRITE_LINK_POLICY:
3390 hci_cc_write_link_policy(hdev, skb);
3391 break;
3392
3393 case HCI_OP_READ_DEF_LINK_POLICY:
3394 hci_cc_read_def_link_policy(hdev, skb);
3395 break;
3396
3397 case HCI_OP_WRITE_DEF_LINK_POLICY:
3398 hci_cc_write_def_link_policy(hdev, skb);
3399 break;
3400
3401 case HCI_OP_RESET:
3402 hci_cc_reset(hdev, skb);
3403 break;
3404
3405 case HCI_OP_READ_STORED_LINK_KEY:
3406 hci_cc_read_stored_link_key(hdev, skb);
3407 break;
3408
3409 case HCI_OP_DELETE_STORED_LINK_KEY:
3410 hci_cc_delete_stored_link_key(hdev, skb);
3411 break;
3412
3413 case HCI_OP_WRITE_LOCAL_NAME:
3414 hci_cc_write_local_name(hdev, skb);
3415 break;
3416
3417 case HCI_OP_READ_LOCAL_NAME:
3418 hci_cc_read_local_name(hdev, skb);
3419 break;
3420
3421 case HCI_OP_WRITE_AUTH_ENABLE:
3422 hci_cc_write_auth_enable(hdev, skb);
3423 break;
3424
3425 case HCI_OP_WRITE_ENCRYPT_MODE:
3426 hci_cc_write_encrypt_mode(hdev, skb);
3427 break;
3428
3429 case HCI_OP_WRITE_SCAN_ENABLE:
3430 hci_cc_write_scan_enable(hdev, skb);
3431 break;
3432
3433 case HCI_OP_SET_EVENT_FLT:
3434 hci_cc_set_event_filter(hdev, skb);
3435 break;
3436
3437 case HCI_OP_READ_CLASS_OF_DEV:
3438 hci_cc_read_class_of_dev(hdev, skb);
3439 break;
3440
3441 case HCI_OP_WRITE_CLASS_OF_DEV:
3442 hci_cc_write_class_of_dev(hdev, skb);
3443 break;
3444
3445 case HCI_OP_READ_VOICE_SETTING:
3446 hci_cc_read_voice_setting(hdev, skb);
3447 break;
3448
3449 case HCI_OP_WRITE_VOICE_SETTING:
3450 hci_cc_write_voice_setting(hdev, skb);
3451 break;
3452
3453 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3454 hci_cc_read_num_supported_iac(hdev, skb);
3455 break;
3456
3457 case HCI_OP_WRITE_SSP_MODE:
3458 hci_cc_write_ssp_mode(hdev, skb);
3459 break;
3460
3461 case HCI_OP_WRITE_SC_SUPPORT:
3462 hci_cc_write_sc_support(hdev, skb);
3463 break;
3464
3465 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3466 hci_cc_read_auth_payload_timeout(hdev, skb);
3467 break;
3468
3469 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3470 hci_cc_write_auth_payload_timeout(hdev, skb);
3471 break;
3472
3473 case HCI_OP_READ_LOCAL_VERSION:
3474 hci_cc_read_local_version(hdev, skb);
3475 break;
3476
3477 case HCI_OP_READ_LOCAL_COMMANDS:
3478 hci_cc_read_local_commands(hdev, skb);
3479 break;
3480
3481 case HCI_OP_READ_LOCAL_FEATURES:
3482 hci_cc_read_local_features(hdev, skb);
3483 break;
3484
3485 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3486 hci_cc_read_local_ext_features(hdev, skb);
3487 break;
3488
3489 case HCI_OP_READ_BUFFER_SIZE:
3490 hci_cc_read_buffer_size(hdev, skb);
3491 break;
3492
3493 case HCI_OP_READ_BD_ADDR:
3494 hci_cc_read_bd_addr(hdev, skb);
3495 break;
3496
3497 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3498 hci_cc_read_local_pairing_opts(hdev, skb);
3499 break;
3500
3501 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3502 hci_cc_read_page_scan_activity(hdev, skb);
3503 break;
3504
3505 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3506 hci_cc_write_page_scan_activity(hdev, skb);
3507 break;
3508
3509 case HCI_OP_READ_PAGE_SCAN_TYPE:
3510 hci_cc_read_page_scan_type(hdev, skb);
3511 break;
3512
3513 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3514 hci_cc_write_page_scan_type(hdev, skb);
3515 break;
3516
3517 case HCI_OP_READ_DATA_BLOCK_SIZE:
3518 hci_cc_read_data_block_size(hdev, skb);
3519 break;
3520
3521 case HCI_OP_READ_FLOW_CONTROL_MODE:
3522 hci_cc_read_flow_control_mode(hdev, skb);
3523 break;
3524
3525 case HCI_OP_READ_LOCAL_AMP_INFO:
3526 hci_cc_read_local_amp_info(hdev, skb);
3527 break;
3528
3529 case HCI_OP_READ_CLOCK:
3530 hci_cc_read_clock(hdev, skb);
3531 break;
3532
3533 case HCI_OP_READ_INQ_RSP_TX_POWER:
3534 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3535 break;
3536
3537 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3538 hci_cc_read_def_err_data_reporting(hdev, skb);
3539 break;
3540
3541 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3542 hci_cc_write_def_err_data_reporting(hdev, skb);
3543 break;
3544
3545 case HCI_OP_PIN_CODE_REPLY:
3546 hci_cc_pin_code_reply(hdev, skb);
3547 break;
3548
3549 case HCI_OP_PIN_CODE_NEG_REPLY:
3550 hci_cc_pin_code_neg_reply(hdev, skb);
3551 break;
3552
3553 case HCI_OP_READ_LOCAL_OOB_DATA:
3554 hci_cc_read_local_oob_data(hdev, skb);
3555 break;
3556
3557 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3558 hci_cc_read_local_oob_ext_data(hdev, skb);
3559 break;
3560
3561 case HCI_OP_LE_READ_BUFFER_SIZE:
3562 hci_cc_le_read_buffer_size(hdev, skb);
3563 break;
3564
3565 case HCI_OP_LE_READ_LOCAL_FEATURES:
3566 hci_cc_le_read_local_features(hdev, skb);
3567 break;
3568
3569 case HCI_OP_LE_READ_ADV_TX_POWER:
3570 hci_cc_le_read_adv_tx_power(hdev, skb);
3571 break;
3572
3573 case HCI_OP_USER_CONFIRM_REPLY:
3574 hci_cc_user_confirm_reply(hdev, skb);
3575 break;
3576
3577 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3578 hci_cc_user_confirm_neg_reply(hdev, skb);
3579 break;
3580
3581 case HCI_OP_USER_PASSKEY_REPLY:
3582 hci_cc_user_passkey_reply(hdev, skb);
3583 break;
3584
3585 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3586 hci_cc_user_passkey_neg_reply(hdev, skb);
3587 break;
3588
3589 case HCI_OP_LE_SET_RANDOM_ADDR:
3590 hci_cc_le_set_random_addr(hdev, skb);
3591 break;
3592
3593 case HCI_OP_LE_SET_ADV_ENABLE:
3594 hci_cc_le_set_adv_enable(hdev, skb);
3595 break;
3596
3597 case HCI_OP_LE_SET_SCAN_PARAM:
3598 hci_cc_le_set_scan_param(hdev, skb);
3599 break;
3600
3601 case HCI_OP_LE_SET_SCAN_ENABLE:
3602 hci_cc_le_set_scan_enable(hdev, skb);
3603 break;
3604
3605 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3606 hci_cc_le_read_accept_list_size(hdev, skb);
3607 break;
3608
3609 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3610 hci_cc_le_clear_accept_list(hdev, skb);
3611 break;
3612
3613 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3614 hci_cc_le_add_to_accept_list(hdev, skb);
3615 break;
3616
3617 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3618 hci_cc_le_del_from_accept_list(hdev, skb);
3619 break;
3620
3621 case HCI_OP_LE_READ_SUPPORTED_STATES:
3622 hci_cc_le_read_supported_states(hdev, skb);
3623 break;
3624
3625 case HCI_OP_LE_READ_DEF_DATA_LEN:
3626 hci_cc_le_read_def_data_len(hdev, skb);
3627 break;
3628
3629 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3630 hci_cc_le_write_def_data_len(hdev, skb);
3631 break;
3632
3633 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3634 hci_cc_le_add_to_resolv_list(hdev, skb);
3635 break;
3636
3637 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3638 hci_cc_le_del_from_resolv_list(hdev, skb);
3639 break;
3640
3641 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3642 hci_cc_le_clear_resolv_list(hdev, skb);
3643 break;
3644
3645 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3646 hci_cc_le_read_resolv_list_size(hdev, skb);
3647 break;
3648
3649 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3650 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3651 break;
3652
3653 case HCI_OP_LE_READ_MAX_DATA_LEN:
3654 hci_cc_le_read_max_data_len(hdev, skb);
3655 break;
3656
3657 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3658 hci_cc_write_le_host_supported(hdev, skb);
3659 break;
3660
3661 case HCI_OP_LE_SET_ADV_PARAM:
3662 hci_cc_set_adv_param(hdev, skb);
3663 break;
3664
3665 case HCI_OP_READ_RSSI:
3666 hci_cc_read_rssi(hdev, skb);
3667 break;
3668
3669 case HCI_OP_READ_TX_POWER:
3670 hci_cc_read_tx_power(hdev, skb);
3671 break;
3672
3673 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3674 hci_cc_write_ssp_debug_mode(hdev, skb);
3675 break;
3676
3677 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3678 hci_cc_le_set_ext_scan_param(hdev, skb);
3679 break;
3680
3681 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3682 hci_cc_le_set_ext_scan_enable(hdev, skb);
3683 break;
3684
3685 case HCI_OP_LE_SET_DEFAULT_PHY:
3686 hci_cc_le_set_default_phy(hdev, skb);
3687 break;
3688
3689 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3690 hci_cc_le_read_num_adv_sets(hdev, skb);
3691 break;
3692
3693 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3694 hci_cc_set_ext_adv_param(hdev, skb);
3695 break;
3696
3697 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3698 hci_cc_le_set_ext_adv_enable(hdev, skb);
3699 break;
3700
3701 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3702 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3703 break;
3704
3705 case HCI_OP_LE_READ_TRANSMIT_POWER:
3706 hci_cc_le_read_transmit_power(hdev, skb);
3707 break;
3708
3709 default:
3710 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3711 break;
3712 }
3713
3714 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3715
3716 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3717 req_complete_skb);
3718
3719 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3720 bt_dev_err(hdev,
3721 "unexpected event for opcode 0x%4.4x", *opcode);
3722 return;
3723 }
3724
3725 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3726 queue_work(hdev->workqueue, &hdev->cmd_work);
3727 }
3728
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3729 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3730 u16 *opcode, u8 *status,
3731 hci_req_complete_t *req_complete,
3732 hci_req_complete_skb_t *req_complete_skb)
3733 {
3734 struct hci_ev_cmd_status *ev = (void *) skb->data;
3735
3736 skb_pull(skb, sizeof(*ev));
3737
3738 *opcode = __le16_to_cpu(ev->opcode);
3739 *status = ev->status;
3740
3741 switch (*opcode) {
3742 case HCI_OP_INQUIRY:
3743 hci_cs_inquiry(hdev, ev->status);
3744 break;
3745
3746 case HCI_OP_CREATE_CONN:
3747 hci_cs_create_conn(hdev, ev->status);
3748 break;
3749
3750 case HCI_OP_DISCONNECT:
3751 hci_cs_disconnect(hdev, ev->status);
3752 break;
3753
3754 case HCI_OP_ADD_SCO:
3755 hci_cs_add_sco(hdev, ev->status);
3756 break;
3757
3758 case HCI_OP_AUTH_REQUESTED:
3759 hci_cs_auth_requested(hdev, ev->status);
3760 break;
3761
3762 case HCI_OP_SET_CONN_ENCRYPT:
3763 hci_cs_set_conn_encrypt(hdev, ev->status);
3764 break;
3765
3766 case HCI_OP_REMOTE_NAME_REQ:
3767 hci_cs_remote_name_req(hdev, ev->status);
3768 break;
3769
3770 case HCI_OP_READ_REMOTE_FEATURES:
3771 hci_cs_read_remote_features(hdev, ev->status);
3772 break;
3773
3774 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3775 hci_cs_read_remote_ext_features(hdev, ev->status);
3776 break;
3777
3778 case HCI_OP_SETUP_SYNC_CONN:
3779 hci_cs_setup_sync_conn(hdev, ev->status);
3780 break;
3781
3782 case HCI_OP_SNIFF_MODE:
3783 hci_cs_sniff_mode(hdev, ev->status);
3784 break;
3785
3786 case HCI_OP_EXIT_SNIFF_MODE:
3787 hci_cs_exit_sniff_mode(hdev, ev->status);
3788 break;
3789
3790 case HCI_OP_SWITCH_ROLE:
3791 hci_cs_switch_role(hdev, ev->status);
3792 break;
3793
3794 case HCI_OP_LE_CREATE_CONN:
3795 hci_cs_le_create_conn(hdev, ev->status);
3796 break;
3797
3798 case HCI_OP_LE_READ_REMOTE_FEATURES:
3799 hci_cs_le_read_remote_features(hdev, ev->status);
3800 break;
3801
3802 case HCI_OP_LE_START_ENC:
3803 hci_cs_le_start_enc(hdev, ev->status);
3804 break;
3805
3806 case HCI_OP_LE_EXT_CREATE_CONN:
3807 hci_cs_le_ext_create_conn(hdev, ev->status);
3808 break;
3809
3810 default:
3811 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3812 break;
3813 }
3814
3815 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3816
3817 /* Indicate request completion if the command failed. Also, if
3818 * we're not waiting for a special event and we get a success
3819 * command status we should try to flag the request as completed
3820 * (since for this kind of commands there will not be a command
3821 * complete event).
3822 */
3823 if (ev->status ||
3824 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3825 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3826 req_complete_skb);
3827
3828 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3829 bt_dev_err(hdev,
3830 "unexpected event for opcode 0x%4.4x", *opcode);
3831 return;
3832 }
3833
3834 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3835 queue_work(hdev->workqueue, &hdev->cmd_work);
3836 }
3837
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3838 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3839 {
3840 struct hci_ev_hardware_error *ev = (void *) skb->data;
3841
3842 hdev->hw_error_code = ev->code;
3843
3844 queue_work(hdev->req_workqueue, &hdev->error_reset);
3845 }
3846
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3847 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3848 {
3849 struct hci_ev_role_change *ev = (void *) skb->data;
3850 struct hci_conn *conn;
3851
3852 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3853
3854 hci_dev_lock(hdev);
3855
3856 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3857 if (conn) {
3858 if (!ev->status)
3859 conn->role = ev->role;
3860
3861 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3862
3863 hci_role_switch_cfm(conn, ev->status, ev->role);
3864 }
3865
3866 hci_dev_unlock(hdev);
3867 }
3868
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3869 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3870 {
3871 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3872 int i;
3873
3874 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3875 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3876 return;
3877 }
3878
3879 if (skb->len < sizeof(*ev) ||
3880 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3881 BT_DBG("%s bad parameters", hdev->name);
3882 return;
3883 }
3884
3885 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3886
3887 for (i = 0; i < ev->num_hndl; i++) {
3888 struct hci_comp_pkts_info *info = &ev->handles[i];
3889 struct hci_conn *conn;
3890 __u16 handle, count;
3891
3892 handle = __le16_to_cpu(info->handle);
3893 count = __le16_to_cpu(info->count);
3894
3895 conn = hci_conn_hash_lookup_handle(hdev, handle);
3896 if (!conn)
3897 continue;
3898
3899 conn->sent -= count;
3900
3901 switch (conn->type) {
3902 case ACL_LINK:
3903 hdev->acl_cnt += count;
3904 if (hdev->acl_cnt > hdev->acl_pkts)
3905 hdev->acl_cnt = hdev->acl_pkts;
3906 break;
3907
3908 case LE_LINK:
3909 if (hdev->le_pkts) {
3910 hdev->le_cnt += count;
3911 if (hdev->le_cnt > hdev->le_pkts)
3912 hdev->le_cnt = hdev->le_pkts;
3913 } else {
3914 hdev->acl_cnt += count;
3915 if (hdev->acl_cnt > hdev->acl_pkts)
3916 hdev->acl_cnt = hdev->acl_pkts;
3917 }
3918 break;
3919
3920 case SCO_LINK:
3921 hdev->sco_cnt += count;
3922 if (hdev->sco_cnt > hdev->sco_pkts)
3923 hdev->sco_cnt = hdev->sco_pkts;
3924 break;
3925
3926 default:
3927 bt_dev_err(hdev, "unknown type %d conn %p",
3928 conn->type, conn);
3929 break;
3930 }
3931 }
3932
3933 queue_work(hdev->workqueue, &hdev->tx_work);
3934 }
3935
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3936 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3937 __u16 handle)
3938 {
3939 struct hci_chan *chan;
3940
3941 switch (hdev->dev_type) {
3942 case HCI_PRIMARY:
3943 return hci_conn_hash_lookup_handle(hdev, handle);
3944 case HCI_AMP:
3945 chan = hci_chan_lookup_handle(hdev, handle);
3946 if (chan)
3947 return chan->conn;
3948 break;
3949 default:
3950 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3951 break;
3952 }
3953
3954 return NULL;
3955 }
3956
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3957 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3958 {
3959 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3960 int i;
3961
3962 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3963 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3964 return;
3965 }
3966
3967 if (skb->len < sizeof(*ev) ||
3968 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3969 BT_DBG("%s bad parameters", hdev->name);
3970 return;
3971 }
3972
3973 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3974 ev->num_hndl);
3975
3976 for (i = 0; i < ev->num_hndl; i++) {
3977 struct hci_comp_blocks_info *info = &ev->handles[i];
3978 struct hci_conn *conn = NULL;
3979 __u16 handle, block_count;
3980
3981 handle = __le16_to_cpu(info->handle);
3982 block_count = __le16_to_cpu(info->blocks);
3983
3984 conn = __hci_conn_lookup_handle(hdev, handle);
3985 if (!conn)
3986 continue;
3987
3988 conn->sent -= block_count;
3989
3990 switch (conn->type) {
3991 case ACL_LINK:
3992 case AMP_LINK:
3993 hdev->block_cnt += block_count;
3994 if (hdev->block_cnt > hdev->num_blocks)
3995 hdev->block_cnt = hdev->num_blocks;
3996 break;
3997
3998 default:
3999 bt_dev_err(hdev, "unknown type %d conn %p",
4000 conn->type, conn);
4001 break;
4002 }
4003 }
4004
4005 queue_work(hdev->workqueue, &hdev->tx_work);
4006 }
4007
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4008 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010 struct hci_ev_mode_change *ev = (void *) skb->data;
4011 struct hci_conn *conn;
4012
4013 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4014
4015 hci_dev_lock(hdev);
4016
4017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4018 if (conn) {
4019 conn->mode = ev->mode;
4020
4021 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4022 &conn->flags)) {
4023 if (conn->mode == HCI_CM_ACTIVE)
4024 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4025 else
4026 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4027 }
4028
4029 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4030 hci_sco_setup(conn, ev->status);
4031 }
4032
4033 hci_dev_unlock(hdev);
4034 }
4035
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4036 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4037 {
4038 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4039 struct hci_conn *conn;
4040
4041 BT_DBG("%s", hdev->name);
4042
4043 hci_dev_lock(hdev);
4044
4045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4046 if (!conn)
4047 goto unlock;
4048
4049 if (conn->state == BT_CONNECTED) {
4050 hci_conn_hold(conn);
4051 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4052 hci_conn_drop(conn);
4053 }
4054
4055 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4056 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4057 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4058 sizeof(ev->bdaddr), &ev->bdaddr);
4059 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4060 u8 secure;
4061
4062 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4063 secure = 1;
4064 else
4065 secure = 0;
4066
4067 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4068 }
4069
4070 unlock:
4071 hci_dev_unlock(hdev);
4072 }
4073
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4074 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4075 {
4076 if (key_type == HCI_LK_CHANGED_COMBINATION)
4077 return;
4078
4079 conn->pin_length = pin_len;
4080 conn->key_type = key_type;
4081
4082 switch (key_type) {
4083 case HCI_LK_LOCAL_UNIT:
4084 case HCI_LK_REMOTE_UNIT:
4085 case HCI_LK_DEBUG_COMBINATION:
4086 return;
4087 case HCI_LK_COMBINATION:
4088 if (pin_len == 16)
4089 conn->pending_sec_level = BT_SECURITY_HIGH;
4090 else
4091 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4092 break;
4093 case HCI_LK_UNAUTH_COMBINATION_P192:
4094 case HCI_LK_UNAUTH_COMBINATION_P256:
4095 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4096 break;
4097 case HCI_LK_AUTH_COMBINATION_P192:
4098 conn->pending_sec_level = BT_SECURITY_HIGH;
4099 break;
4100 case HCI_LK_AUTH_COMBINATION_P256:
4101 conn->pending_sec_level = BT_SECURITY_FIPS;
4102 break;
4103 }
4104 }
4105
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4106 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4107 {
4108 struct hci_ev_link_key_req *ev = (void *) skb->data;
4109 struct hci_cp_link_key_reply cp;
4110 struct hci_conn *conn;
4111 struct link_key *key;
4112
4113 BT_DBG("%s", hdev->name);
4114
4115 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4116 return;
4117
4118 hci_dev_lock(hdev);
4119
4120 key = hci_find_link_key(hdev, &ev->bdaddr);
4121 if (!key) {
4122 BT_DBG("%s link key not found for %pMR", hdev->name,
4123 &ev->bdaddr);
4124 goto not_found;
4125 }
4126
4127 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4128 &ev->bdaddr);
4129
4130 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4131 if (conn) {
4132 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4133
4134 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4135 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4136 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4137 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4138 goto not_found;
4139 }
4140
4141 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4142 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4143 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4144 BT_DBG("%s ignoring key unauthenticated for high security",
4145 hdev->name);
4146 goto not_found;
4147 }
4148
4149 conn_set_key(conn, key->type, key->pin_len);
4150 }
4151
4152 bacpy(&cp.bdaddr, &ev->bdaddr);
4153 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4154
4155 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4156
4157 hci_dev_unlock(hdev);
4158
4159 return;
4160
4161 not_found:
4162 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4163 hci_dev_unlock(hdev);
4164 }
4165
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4166 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4167 {
4168 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4169 struct hci_conn *conn;
4170 struct link_key *key;
4171 bool persistent;
4172 u8 pin_len = 0;
4173
4174 BT_DBG("%s", hdev->name);
4175
4176 hci_dev_lock(hdev);
4177
4178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4179 if (!conn)
4180 goto unlock;
4181
4182 /* Ignore NULL link key against CVE-2020-26555 */
4183 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4184 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4185 &ev->bdaddr);
4186 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4187 hci_conn_drop(conn);
4188 goto unlock;
4189 }
4190
4191 hci_conn_hold(conn);
4192 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4193 hci_conn_drop(conn);
4194
4195 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4196 conn_set_key(conn, ev->key_type, conn->pin_length);
4197
4198 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4199 goto unlock;
4200
4201 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4202 ev->key_type, pin_len, &persistent);
4203 if (!key)
4204 goto unlock;
4205
4206 /* Update connection information since adding the key will have
4207 * fixed up the type in the case of changed combination keys.
4208 */
4209 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4210 conn_set_key(conn, key->type, key->pin_len);
4211
4212 mgmt_new_link_key(hdev, key, persistent);
4213
4214 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4215 * is set. If it's not set simply remove the key from the kernel
4216 * list (we've still notified user space about it but with
4217 * store_hint being 0).
4218 */
4219 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4220 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4221 list_del_rcu(&key->list);
4222 kfree_rcu(key, rcu);
4223 goto unlock;
4224 }
4225
4226 if (persistent)
4227 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4228 else
4229 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4230
4231 unlock:
4232 hci_dev_unlock(hdev);
4233 }
4234
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4235 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4236 {
4237 struct hci_ev_clock_offset *ev = (void *) skb->data;
4238 struct hci_conn *conn;
4239
4240 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4241
4242 hci_dev_lock(hdev);
4243
4244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4245 if (conn && !ev->status) {
4246 struct inquiry_entry *ie;
4247
4248 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4249 if (ie) {
4250 ie->data.clock_offset = ev->clock_offset;
4251 ie->timestamp = jiffies;
4252 }
4253 }
4254
4255 hci_dev_unlock(hdev);
4256 }
4257
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4258 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4259 {
4260 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4261 struct hci_conn *conn;
4262
4263 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4264
4265 hci_dev_lock(hdev);
4266
4267 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4268 if (conn && !ev->status)
4269 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4270
4271 hci_dev_unlock(hdev);
4272 }
4273
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4274 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4275 {
4276 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4277 struct inquiry_entry *ie;
4278
4279 BT_DBG("%s", hdev->name);
4280
4281 hci_dev_lock(hdev);
4282
4283 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4284 if (ie) {
4285 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4286 ie->timestamp = jiffies;
4287 }
4288
4289 hci_dev_unlock(hdev);
4290 }
4291
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4292 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4293 struct sk_buff *skb)
4294 {
4295 struct inquiry_data data;
4296 int num_rsp = *((__u8 *) skb->data);
4297
4298 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4299
4300 if (!num_rsp)
4301 return;
4302
4303 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4304 return;
4305
4306 hci_dev_lock(hdev);
4307
4308 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4309 struct inquiry_info_with_rssi_and_pscan_mode *info;
4310 info = (void *) (skb->data + 1);
4311
4312 if (skb->len < num_rsp * sizeof(*info) + 1)
4313 goto unlock;
4314
4315 for (; num_rsp; num_rsp--, info++) {
4316 u32 flags;
4317
4318 bacpy(&data.bdaddr, &info->bdaddr);
4319 data.pscan_rep_mode = info->pscan_rep_mode;
4320 data.pscan_period_mode = info->pscan_period_mode;
4321 data.pscan_mode = info->pscan_mode;
4322 memcpy(data.dev_class, info->dev_class, 3);
4323 data.clock_offset = info->clock_offset;
4324 data.rssi = info->rssi;
4325 data.ssp_mode = 0x00;
4326
4327 flags = hci_inquiry_cache_update(hdev, &data, false);
4328
4329 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4330 info->dev_class, info->rssi,
4331 flags, NULL, 0, NULL, 0);
4332 }
4333 } else {
4334 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4335
4336 if (skb->len < num_rsp * sizeof(*info) + 1)
4337 goto unlock;
4338
4339 for (; num_rsp; num_rsp--, info++) {
4340 u32 flags;
4341
4342 bacpy(&data.bdaddr, &info->bdaddr);
4343 data.pscan_rep_mode = info->pscan_rep_mode;
4344 data.pscan_period_mode = info->pscan_period_mode;
4345 data.pscan_mode = 0x00;
4346 memcpy(data.dev_class, info->dev_class, 3);
4347 data.clock_offset = info->clock_offset;
4348 data.rssi = info->rssi;
4349 data.ssp_mode = 0x00;
4350
4351 flags = hci_inquiry_cache_update(hdev, &data, false);
4352
4353 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4354 info->dev_class, info->rssi,
4355 flags, NULL, 0, NULL, 0);
4356 }
4357 }
4358
4359 unlock:
4360 hci_dev_unlock(hdev);
4361 }
4362
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4363 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4364 struct sk_buff *skb)
4365 {
4366 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4367 struct hci_conn *conn;
4368
4369 BT_DBG("%s", hdev->name);
4370
4371 hci_dev_lock(hdev);
4372
4373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4374 if (!conn)
4375 goto unlock;
4376
4377 if (ev->page < HCI_MAX_PAGES)
4378 memcpy(conn->features[ev->page], ev->features, 8);
4379
4380 if (!ev->status && ev->page == 0x01) {
4381 struct inquiry_entry *ie;
4382
4383 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4384 if (ie)
4385 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4386
4387 if (ev->features[0] & LMP_HOST_SSP) {
4388 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4389 } else {
4390 /* It is mandatory by the Bluetooth specification that
4391 * Extended Inquiry Results are only used when Secure
4392 * Simple Pairing is enabled, but some devices violate
4393 * this.
4394 *
4395 * To make these devices work, the internal SSP
4396 * enabled flag needs to be cleared if the remote host
4397 * features do not indicate SSP support */
4398 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4399 }
4400
4401 if (ev->features[0] & LMP_HOST_SC)
4402 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4403 }
4404
4405 if (conn->state != BT_CONFIG)
4406 goto unlock;
4407
4408 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4409 struct hci_cp_remote_name_req cp;
4410 memset(&cp, 0, sizeof(cp));
4411 bacpy(&cp.bdaddr, &conn->dst);
4412 cp.pscan_rep_mode = 0x02;
4413 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4414 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4415 mgmt_device_connected(hdev, conn, NULL, 0);
4416
4417 if (!hci_outgoing_auth_needed(hdev, conn)) {
4418 conn->state = BT_CONNECTED;
4419 hci_connect_cfm(conn, ev->status);
4420 hci_conn_drop(conn);
4421 }
4422
4423 unlock:
4424 hci_dev_unlock(hdev);
4425 }
4426
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4427 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4428 struct sk_buff *skb)
4429 {
4430 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4431 struct hci_conn *conn;
4432
4433 switch (ev->link_type) {
4434 case SCO_LINK:
4435 case ESCO_LINK:
4436 break;
4437 default:
4438 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4439 * for HCI_Synchronous_Connection_Complete is limited to
4440 * either SCO or eSCO
4441 */
4442 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4443 return;
4444 }
4445
4446 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4447
4448 hci_dev_lock(hdev);
4449
4450 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4451 if (!conn) {
4452 if (ev->link_type == ESCO_LINK)
4453 goto unlock;
4454
4455 /* When the link type in the event indicates SCO connection
4456 * and lookup of the connection object fails, then check
4457 * if an eSCO connection object exists.
4458 *
4459 * The core limits the synchronous connections to either
4460 * SCO or eSCO. The eSCO connection is preferred and tried
4461 * to be setup first and until successfully established,
4462 * the link type will be hinted as eSCO.
4463 */
4464 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4465 if (!conn)
4466 goto unlock;
4467 }
4468
4469 switch (ev->status) {
4470 case 0x00:
4471 /* The synchronous connection complete event should only be
4472 * sent once per new connection. Receiving a successful
4473 * complete event when the connection status is already
4474 * BT_CONNECTED means that the device is misbehaving and sent
4475 * multiple complete event packets for the same new connection.
4476 *
4477 * Registering the device more than once can corrupt kernel
4478 * memory, hence upon detecting this invalid event, we report
4479 * an error and ignore the packet.
4480 */
4481 if (conn->state == BT_CONNECTED) {
4482 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4483 goto unlock;
4484 }
4485
4486 conn->handle = __le16_to_cpu(ev->handle);
4487 conn->state = BT_CONNECTED;
4488 conn->type = ev->link_type;
4489
4490 hci_debugfs_create_conn(conn);
4491 hci_conn_add_sysfs(conn);
4492 break;
4493
4494 case 0x10: /* Connection Accept Timeout */
4495 case 0x0d: /* Connection Rejected due to Limited Resources */
4496 case 0x11: /* Unsupported Feature or Parameter Value */
4497 case 0x1c: /* SCO interval rejected */
4498 case 0x1a: /* Unsupported Remote Feature */
4499 case 0x1e: /* Invalid LMP Parameters */
4500 case 0x1f: /* Unspecified error */
4501 case 0x20: /* Unsupported LMP Parameter value */
4502 if (conn->out) {
4503 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4504 (hdev->esco_type & EDR_ESCO_MASK);
4505 if (hci_setup_sync(conn, conn->link->handle))
4506 goto unlock;
4507 }
4508 fallthrough;
4509
4510 default:
4511 conn->state = BT_CLOSED;
4512 break;
4513 }
4514
4515 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4516
4517 switch (ev->air_mode) {
4518 case 0x02:
4519 if (hdev->notify)
4520 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4521 break;
4522 case 0x03:
4523 if (hdev->notify)
4524 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4525 break;
4526 }
4527
4528 hci_connect_cfm(conn, ev->status);
4529 if (ev->status)
4530 hci_conn_del(conn);
4531
4532 unlock:
4533 hci_dev_unlock(hdev);
4534 }
4535
eir_get_length(u8 * eir,size_t eir_len)4536 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4537 {
4538 size_t parsed = 0;
4539
4540 while (parsed < eir_len) {
4541 u8 field_len = eir[0];
4542
4543 if (field_len == 0)
4544 return parsed;
4545
4546 parsed += field_len + 1;
4547 eir += field_len + 1;
4548 }
4549
4550 return eir_len;
4551 }
4552
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4553 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4554 struct sk_buff *skb)
4555 {
4556 struct inquiry_data data;
4557 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4558 int num_rsp = *((__u8 *) skb->data);
4559 size_t eir_len;
4560
4561 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4562
4563 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4564 return;
4565
4566 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4567 return;
4568
4569 hci_dev_lock(hdev);
4570
4571 for (; num_rsp; num_rsp--, info++) {
4572 u32 flags;
4573 bool name_known;
4574
4575 bacpy(&data.bdaddr, &info->bdaddr);
4576 data.pscan_rep_mode = info->pscan_rep_mode;
4577 data.pscan_period_mode = info->pscan_period_mode;
4578 data.pscan_mode = 0x00;
4579 memcpy(data.dev_class, info->dev_class, 3);
4580 data.clock_offset = info->clock_offset;
4581 data.rssi = info->rssi;
4582 data.ssp_mode = 0x01;
4583
4584 if (hci_dev_test_flag(hdev, HCI_MGMT))
4585 name_known = eir_get_data(info->data,
4586 sizeof(info->data),
4587 EIR_NAME_COMPLETE, NULL);
4588 else
4589 name_known = true;
4590
4591 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4592
4593 eir_len = eir_get_length(info->data, sizeof(info->data));
4594
4595 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4596 info->dev_class, info->rssi,
4597 flags, info->data, eir_len, NULL, 0);
4598 }
4599
4600 hci_dev_unlock(hdev);
4601 }
4602
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4603 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4604 struct sk_buff *skb)
4605 {
4606 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4607 struct hci_conn *conn;
4608
4609 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4610 __le16_to_cpu(ev->handle));
4611
4612 hci_dev_lock(hdev);
4613
4614 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4615 if (!conn)
4616 goto unlock;
4617
4618 /* For BR/EDR the necessary steps are taken through the
4619 * auth_complete event.
4620 */
4621 if (conn->type != LE_LINK)
4622 goto unlock;
4623
4624 if (!ev->status)
4625 conn->sec_level = conn->pending_sec_level;
4626
4627 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4628
4629 if (ev->status && conn->state == BT_CONNECTED) {
4630 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4631 hci_conn_drop(conn);
4632 goto unlock;
4633 }
4634
4635 if (conn->state == BT_CONFIG) {
4636 if (!ev->status)
4637 conn->state = BT_CONNECTED;
4638
4639 hci_connect_cfm(conn, ev->status);
4640 hci_conn_drop(conn);
4641 } else {
4642 hci_auth_cfm(conn, ev->status);
4643
4644 hci_conn_hold(conn);
4645 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4646 hci_conn_drop(conn);
4647 }
4648
4649 unlock:
4650 hci_dev_unlock(hdev);
4651 }
4652
hci_get_auth_req(struct hci_conn * conn)4653 static u8 hci_get_auth_req(struct hci_conn *conn)
4654 {
4655 /* If remote requests no-bonding follow that lead */
4656 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4657 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4658 return conn->remote_auth | (conn->auth_type & 0x01);
4659
4660 /* If both remote and local have enough IO capabilities, require
4661 * MITM protection
4662 */
4663 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4664 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4665 return conn->remote_auth | 0x01;
4666
4667 /* No MITM protection possible so ignore remote requirement */
4668 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4669 }
4670
bredr_oob_data_present(struct hci_conn * conn)4671 static u8 bredr_oob_data_present(struct hci_conn *conn)
4672 {
4673 struct hci_dev *hdev = conn->hdev;
4674 struct oob_data *data;
4675
4676 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4677 if (!data)
4678 return 0x00;
4679
4680 if (bredr_sc_enabled(hdev)) {
4681 /* When Secure Connections is enabled, then just
4682 * return the present value stored with the OOB
4683 * data. The stored value contains the right present
4684 * information. However it can only be trusted when
4685 * not in Secure Connection Only mode.
4686 */
4687 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4688 return data->present;
4689
4690 /* When Secure Connections Only mode is enabled, then
4691 * the P-256 values are required. If they are not
4692 * available, then do not declare that OOB data is
4693 * present.
4694 */
4695 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4696 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4697 return 0x00;
4698
4699 return 0x02;
4700 }
4701
4702 /* When Secure Connections is not enabled or actually
4703 * not supported by the hardware, then check that if
4704 * P-192 data values are present.
4705 */
4706 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4707 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4708 return 0x00;
4709
4710 return 0x01;
4711 }
4712
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4713 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4714 {
4715 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4716 struct hci_conn *conn;
4717
4718 BT_DBG("%s", hdev->name);
4719
4720 hci_dev_lock(hdev);
4721
4722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4723 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4724 goto unlock;
4725
4726 /* Assume remote supports SSP since it has triggered this event */
4727 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4728
4729 hci_conn_hold(conn);
4730
4731 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4732 goto unlock;
4733
4734 /* Allow pairing if we're pairable, the initiators of the
4735 * pairing or if the remote is not requesting bonding.
4736 */
4737 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4738 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4739 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4740 struct hci_cp_io_capability_reply cp;
4741
4742 bacpy(&cp.bdaddr, &ev->bdaddr);
4743 /* Change the IO capability from KeyboardDisplay
4744 * to DisplayYesNo as it is not supported by BT spec. */
4745 cp.capability = (conn->io_capability == 0x04) ?
4746 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4747
4748 /* If we are initiators, there is no remote information yet */
4749 if (conn->remote_auth == 0xff) {
4750 /* Request MITM protection if our IO caps allow it
4751 * except for the no-bonding case.
4752 */
4753 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4754 conn->auth_type != HCI_AT_NO_BONDING)
4755 conn->auth_type |= 0x01;
4756 } else {
4757 conn->auth_type = hci_get_auth_req(conn);
4758 }
4759
4760 /* If we're not bondable, force one of the non-bondable
4761 * authentication requirement values.
4762 */
4763 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4764 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4765
4766 cp.authentication = conn->auth_type;
4767 cp.oob_data = bredr_oob_data_present(conn);
4768
4769 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4770 sizeof(cp), &cp);
4771 } else {
4772 struct hci_cp_io_capability_neg_reply cp;
4773
4774 bacpy(&cp.bdaddr, &ev->bdaddr);
4775 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4776
4777 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4778 sizeof(cp), &cp);
4779 }
4780
4781 unlock:
4782 hci_dev_unlock(hdev);
4783 }
4784
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4785 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4786 {
4787 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4788 struct hci_conn *conn;
4789
4790 BT_DBG("%s", hdev->name);
4791
4792 hci_dev_lock(hdev);
4793
4794 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4795 if (!conn)
4796 goto unlock;
4797
4798 conn->remote_cap = ev->capability;
4799 conn->remote_auth = ev->authentication;
4800
4801 unlock:
4802 hci_dev_unlock(hdev);
4803 }
4804
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4805 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4806 struct sk_buff *skb)
4807 {
4808 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4809 int loc_mitm, rem_mitm, confirm_hint = 0;
4810 struct hci_conn *conn;
4811
4812 BT_DBG("%s", hdev->name);
4813
4814 hci_dev_lock(hdev);
4815
4816 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4817 goto unlock;
4818
4819 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4820 if (!conn)
4821 goto unlock;
4822
4823 loc_mitm = (conn->auth_type & 0x01);
4824 rem_mitm = (conn->remote_auth & 0x01);
4825
4826 /* If we require MITM but the remote device can't provide that
4827 * (it has NoInputNoOutput) then reject the confirmation
4828 * request. We check the security level here since it doesn't
4829 * necessarily match conn->auth_type.
4830 */
4831 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4832 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4833 BT_DBG("Rejecting request: remote device can't provide MITM");
4834 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4835 sizeof(ev->bdaddr), &ev->bdaddr);
4836 goto unlock;
4837 }
4838
4839 /* If no side requires MITM protection; auto-accept */
4840 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4841 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4842
4843 /* If we're not the initiators request authorization to
4844 * proceed from user space (mgmt_user_confirm with
4845 * confirm_hint set to 1). The exception is if neither
4846 * side had MITM or if the local IO capability is
4847 * NoInputNoOutput, in which case we do auto-accept
4848 */
4849 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4850 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4851 (loc_mitm || rem_mitm)) {
4852 BT_DBG("Confirming auto-accept as acceptor");
4853 confirm_hint = 1;
4854 goto confirm;
4855 }
4856
4857 /* If there already exists link key in local host, leave the
4858 * decision to user space since the remote device could be
4859 * legitimate or malicious.
4860 */
4861 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4862 bt_dev_dbg(hdev, "Local host already has link key");
4863 confirm_hint = 1;
4864 goto confirm;
4865 }
4866
4867 BT_DBG("Auto-accept of user confirmation with %ums delay",
4868 hdev->auto_accept_delay);
4869
4870 if (hdev->auto_accept_delay > 0) {
4871 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4872 queue_delayed_work(conn->hdev->workqueue,
4873 &conn->auto_accept_work, delay);
4874 goto unlock;
4875 }
4876
4877 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4878 sizeof(ev->bdaddr), &ev->bdaddr);
4879 goto unlock;
4880 }
4881
4882 confirm:
4883 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4884 le32_to_cpu(ev->passkey), confirm_hint);
4885
4886 unlock:
4887 hci_dev_unlock(hdev);
4888 }
4889
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4890 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4891 struct sk_buff *skb)
4892 {
4893 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4894
4895 BT_DBG("%s", hdev->name);
4896
4897 if (hci_dev_test_flag(hdev, HCI_MGMT))
4898 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4899 }
4900
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4901 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4902 struct sk_buff *skb)
4903 {
4904 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4905 struct hci_conn *conn;
4906
4907 BT_DBG("%s", hdev->name);
4908
4909 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4910 if (!conn)
4911 return;
4912
4913 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4914 conn->passkey_entered = 0;
4915
4916 if (hci_dev_test_flag(hdev, HCI_MGMT))
4917 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4918 conn->dst_type, conn->passkey_notify,
4919 conn->passkey_entered);
4920 }
4921
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4922 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4923 {
4924 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4925 struct hci_conn *conn;
4926
4927 BT_DBG("%s", hdev->name);
4928
4929 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4930 if (!conn)
4931 return;
4932
4933 switch (ev->type) {
4934 case HCI_KEYPRESS_STARTED:
4935 conn->passkey_entered = 0;
4936 return;
4937
4938 case HCI_KEYPRESS_ENTERED:
4939 conn->passkey_entered++;
4940 break;
4941
4942 case HCI_KEYPRESS_ERASED:
4943 conn->passkey_entered--;
4944 break;
4945
4946 case HCI_KEYPRESS_CLEARED:
4947 conn->passkey_entered = 0;
4948 break;
4949
4950 case HCI_KEYPRESS_COMPLETED:
4951 return;
4952 }
4953
4954 if (hci_dev_test_flag(hdev, HCI_MGMT))
4955 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4956 conn->dst_type, conn->passkey_notify,
4957 conn->passkey_entered);
4958 }
4959
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4960 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4961 struct sk_buff *skb)
4962 {
4963 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4964 struct hci_conn *conn;
4965
4966 BT_DBG("%s", hdev->name);
4967
4968 hci_dev_lock(hdev);
4969
4970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4971 if (!conn || !hci_conn_ssp_enabled(conn))
4972 goto unlock;
4973
4974 /* Reset the authentication requirement to unknown */
4975 conn->remote_auth = 0xff;
4976
4977 /* To avoid duplicate auth_failed events to user space we check
4978 * the HCI_CONN_AUTH_PEND flag which will be set if we
4979 * initiated the authentication. A traditional auth_complete
4980 * event gets always produced as initiator and is also mapped to
4981 * the mgmt_auth_failed event */
4982 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4983 mgmt_auth_failed(conn, ev->status);
4984
4985 hci_conn_drop(conn);
4986
4987 unlock:
4988 hci_dev_unlock(hdev);
4989 }
4990
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4991 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4992 struct sk_buff *skb)
4993 {
4994 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4995 struct inquiry_entry *ie;
4996 struct hci_conn *conn;
4997
4998 BT_DBG("%s", hdev->name);
4999
5000 hci_dev_lock(hdev);
5001
5002 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5003 if (conn)
5004 memcpy(conn->features[1], ev->features, 8);
5005
5006 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5007 if (ie)
5008 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5009
5010 hci_dev_unlock(hdev);
5011 }
5012
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5013 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5014 struct sk_buff *skb)
5015 {
5016 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5017 struct oob_data *data;
5018
5019 BT_DBG("%s", hdev->name);
5020
5021 hci_dev_lock(hdev);
5022
5023 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5024 goto unlock;
5025
5026 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5027 if (!data) {
5028 struct hci_cp_remote_oob_data_neg_reply cp;
5029
5030 bacpy(&cp.bdaddr, &ev->bdaddr);
5031 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5032 sizeof(cp), &cp);
5033 goto unlock;
5034 }
5035
5036 if (bredr_sc_enabled(hdev)) {
5037 struct hci_cp_remote_oob_ext_data_reply cp;
5038
5039 bacpy(&cp.bdaddr, &ev->bdaddr);
5040 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5041 memset(cp.hash192, 0, sizeof(cp.hash192));
5042 memset(cp.rand192, 0, sizeof(cp.rand192));
5043 } else {
5044 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5045 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5046 }
5047 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5048 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5049
5050 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5051 sizeof(cp), &cp);
5052 } else {
5053 struct hci_cp_remote_oob_data_reply cp;
5054
5055 bacpy(&cp.bdaddr, &ev->bdaddr);
5056 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5057 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5058
5059 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5060 sizeof(cp), &cp);
5061 }
5062
5063 unlock:
5064 hci_dev_unlock(hdev);
5065 }
5066
5067 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)5068 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5069 {
5070 struct hci_ev_channel_selected *ev = (void *)skb->data;
5071 struct hci_conn *hcon;
5072
5073 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5074
5075 skb_pull(skb, sizeof(*ev));
5076
5077 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5078 if (!hcon)
5079 return;
5080
5081 amp_read_loc_assoc_final_data(hdev, hcon);
5082 }
5083
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5084 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5085 struct sk_buff *skb)
5086 {
5087 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5088 struct hci_conn *hcon, *bredr_hcon;
5089
5090 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5091 ev->status);
5092
5093 hci_dev_lock(hdev);
5094
5095 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5096 if (!hcon)
5097 goto unlock;
5098
5099 if (!hcon->amp_mgr)
5100 goto unlock;
5101
5102 if (ev->status) {
5103 hci_conn_del(hcon);
5104 goto unlock;
5105 }
5106
5107 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5108
5109 hcon->state = BT_CONNECTED;
5110 bacpy(&hcon->dst, &bredr_hcon->dst);
5111
5112 hci_conn_hold(hcon);
5113 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5114 hci_conn_drop(hcon);
5115
5116 hci_debugfs_create_conn(hcon);
5117 hci_conn_add_sysfs(hcon);
5118
5119 amp_physical_cfm(bredr_hcon, hcon);
5120
5121 unlock:
5122 hci_dev_unlock(hdev);
5123 }
5124
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5125 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5126 {
5127 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5128 struct hci_conn *hcon;
5129 struct hci_chan *hchan;
5130 struct amp_mgr *mgr;
5131
5132 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5133 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5134 ev->status);
5135
5136 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5137 if (!hcon)
5138 return;
5139
5140 /* Create AMP hchan */
5141 hchan = hci_chan_create(hcon);
5142 if (!hchan)
5143 return;
5144
5145 hchan->handle = le16_to_cpu(ev->handle);
5146 hchan->amp = true;
5147
5148 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5149
5150 mgr = hcon->amp_mgr;
5151 if (mgr && mgr->bredr_chan) {
5152 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5153
5154 l2cap_chan_lock(bredr_chan);
5155
5156 bredr_chan->conn->mtu = hdev->block_mtu;
5157 l2cap_logical_cfm(bredr_chan, hchan, 0);
5158 hci_conn_hold(hcon);
5159
5160 l2cap_chan_unlock(bredr_chan);
5161 }
5162 }
5163
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5164 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5165 struct sk_buff *skb)
5166 {
5167 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5168 struct hci_chan *hchan;
5169
5170 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5171 le16_to_cpu(ev->handle), ev->status);
5172
5173 if (ev->status)
5174 return;
5175
5176 hci_dev_lock(hdev);
5177
5178 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5179 if (!hchan || !hchan->amp)
5180 goto unlock;
5181
5182 amp_destroy_logical_link(hchan, ev->reason);
5183
5184 unlock:
5185 hci_dev_unlock(hdev);
5186 }
5187
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5188 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5189 struct sk_buff *skb)
5190 {
5191 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5192 struct hci_conn *hcon;
5193
5194 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5195
5196 if (ev->status)
5197 return;
5198
5199 hci_dev_lock(hdev);
5200
5201 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5202 if (hcon && hcon->type == AMP_LINK) {
5203 hcon->state = BT_CLOSED;
5204 hci_disconn_cfm(hcon, ev->reason);
5205 hci_conn_del(hcon);
5206 }
5207
5208 hci_dev_unlock(hdev);
5209 }
5210 #endif
5211
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5212 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5213 u8 bdaddr_type, bdaddr_t *local_rpa)
5214 {
5215 if (conn->out) {
5216 conn->dst_type = bdaddr_type;
5217 conn->resp_addr_type = bdaddr_type;
5218 bacpy(&conn->resp_addr, bdaddr);
5219
5220 /* Check if the controller has set a Local RPA then it must be
5221 * used instead or hdev->rpa.
5222 */
5223 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5224 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5225 bacpy(&conn->init_addr, local_rpa);
5226 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5227 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5228 bacpy(&conn->init_addr, &conn->hdev->rpa);
5229 } else {
5230 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5231 &conn->init_addr_type);
5232 }
5233 } else {
5234 conn->resp_addr_type = conn->hdev->adv_addr_type;
5235 /* Check if the controller has set a Local RPA then it must be
5236 * used instead or hdev->rpa.
5237 */
5238 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5239 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5240 bacpy(&conn->resp_addr, local_rpa);
5241 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5242 /* In case of ext adv, resp_addr will be updated in
5243 * Adv Terminated event.
5244 */
5245 if (!ext_adv_capable(conn->hdev))
5246 bacpy(&conn->resp_addr,
5247 &conn->hdev->random_addr);
5248 } else {
5249 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5250 }
5251
5252 conn->init_addr_type = bdaddr_type;
5253 bacpy(&conn->init_addr, bdaddr);
5254
5255 /* For incoming connections, set the default minimum
5256 * and maximum connection interval. They will be used
5257 * to check if the parameters are in range and if not
5258 * trigger the connection update procedure.
5259 */
5260 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5261 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5262 }
5263 }
5264
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5265 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5266 bdaddr_t *bdaddr, u8 bdaddr_type,
5267 bdaddr_t *local_rpa, u8 role, u16 handle,
5268 u16 interval, u16 latency,
5269 u16 supervision_timeout)
5270 {
5271 struct hci_conn_params *params;
5272 struct hci_conn *conn;
5273 struct smp_irk *irk;
5274 u8 addr_type;
5275
5276 hci_dev_lock(hdev);
5277
5278 /* All controllers implicitly stop advertising in the event of a
5279 * connection, so ensure that the state bit is cleared.
5280 */
5281 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5282
5283 conn = hci_lookup_le_connect(hdev);
5284 if (!conn) {
5285 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5286 if (!conn) {
5287 bt_dev_err(hdev, "no memory for new connection");
5288 goto unlock;
5289 }
5290
5291 conn->dst_type = bdaddr_type;
5292
5293 /* If we didn't have a hci_conn object previously
5294 * but we're in central role this must be something
5295 * initiated using an accept list. Since accept list based
5296 * connections are not "first class citizens" we don't
5297 * have full tracking of them. Therefore, we go ahead
5298 * with a "best effort" approach of determining the
5299 * initiator address based on the HCI_PRIVACY flag.
5300 */
5301 if (conn->out) {
5302 conn->resp_addr_type = bdaddr_type;
5303 bacpy(&conn->resp_addr, bdaddr);
5304 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5305 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5306 bacpy(&conn->init_addr, &hdev->rpa);
5307 } else {
5308 hci_copy_identity_address(hdev,
5309 &conn->init_addr,
5310 &conn->init_addr_type);
5311 }
5312 }
5313 } else {
5314 cancel_delayed_work(&conn->le_conn_timeout);
5315 }
5316
5317 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5318
5319 /* Lookup the identity address from the stored connection
5320 * address and address type.
5321 *
5322 * When establishing connections to an identity address, the
5323 * connection procedure will store the resolvable random
5324 * address first. Now if it can be converted back into the
5325 * identity address, start using the identity address from
5326 * now on.
5327 */
5328 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5329 if (irk) {
5330 bacpy(&conn->dst, &irk->bdaddr);
5331 conn->dst_type = irk->addr_type;
5332 }
5333
5334 /* When using controller based address resolution, then the new
5335 * address types 0x02 and 0x03 are used. These types need to be
5336 * converted back into either public address or random address type
5337 */
5338 if (use_ll_privacy(hdev) &&
5339 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5340 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5341 switch (conn->dst_type) {
5342 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5343 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5344 break;
5345 case ADDR_LE_DEV_RANDOM_RESOLVED:
5346 conn->dst_type = ADDR_LE_DEV_RANDOM;
5347 break;
5348 }
5349 }
5350
5351 if (status) {
5352 hci_le_conn_failed(conn, status);
5353 goto unlock;
5354 }
5355
5356 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5357 addr_type = BDADDR_LE_PUBLIC;
5358 else
5359 addr_type = BDADDR_LE_RANDOM;
5360
5361 /* Drop the connection if the device is blocked */
5362 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5363 hci_conn_drop(conn);
5364 goto unlock;
5365 }
5366
5367 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5368 mgmt_device_connected(hdev, conn, NULL, 0);
5369
5370 conn->sec_level = BT_SECURITY_LOW;
5371 conn->handle = handle;
5372 conn->state = BT_CONFIG;
5373
5374 /* Store current advertising instance as connection advertising instance
5375 * when sotfware rotation is in use so it can be re-enabled when
5376 * disconnected.
5377 */
5378 if (!ext_adv_capable(hdev))
5379 conn->adv_instance = hdev->cur_adv_instance;
5380
5381 conn->le_conn_interval = interval;
5382 conn->le_conn_latency = latency;
5383 conn->le_supv_timeout = supervision_timeout;
5384
5385 hci_debugfs_create_conn(conn);
5386 hci_conn_add_sysfs(conn);
5387
5388 /* The remote features procedure is defined for central
5389 * role only. So only in case of an initiated connection
5390 * request the remote features.
5391 *
5392 * If the local controller supports peripheral-initiated features
5393 * exchange, then requesting the remote features in peripheral
5394 * role is possible. Otherwise just transition into the
5395 * connected state without requesting the remote features.
5396 */
5397 if (conn->out ||
5398 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5399 struct hci_cp_le_read_remote_features cp;
5400
5401 cp.handle = __cpu_to_le16(conn->handle);
5402
5403 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5404 sizeof(cp), &cp);
5405
5406 hci_conn_hold(conn);
5407 } else {
5408 conn->state = BT_CONNECTED;
5409 hci_connect_cfm(conn, status);
5410 }
5411
5412 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5413 conn->dst_type);
5414 if (params) {
5415 list_del_init(¶ms->action);
5416 if (params->conn) {
5417 hci_conn_drop(params->conn);
5418 hci_conn_put(params->conn);
5419 params->conn = NULL;
5420 }
5421 }
5422
5423 unlock:
5424 hci_update_background_scan(hdev);
5425 hci_dev_unlock(hdev);
5426 }
5427
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5428 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5429 {
5430 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5431
5432 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5433
5434 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5435 NULL, ev->role, le16_to_cpu(ev->handle),
5436 le16_to_cpu(ev->interval),
5437 le16_to_cpu(ev->latency),
5438 le16_to_cpu(ev->supervision_timeout));
5439 }
5440
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5441 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5442 struct sk_buff *skb)
5443 {
5444 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5445
5446 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5447
5448 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5449 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5450 le16_to_cpu(ev->interval),
5451 le16_to_cpu(ev->latency),
5452 le16_to_cpu(ev->supervision_timeout));
5453
5454 if (use_ll_privacy(hdev) &&
5455 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5456 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5457 hci_req_disable_address_resolution(hdev);
5458 }
5459
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5460 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5461 {
5462 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5463 struct hci_conn *conn;
5464 struct adv_info *adv;
5465
5466 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5467
5468 adv = hci_find_adv_instance(hdev, ev->handle);
5469
5470 if (ev->status) {
5471 if (!adv)
5472 return;
5473
5474 /* Remove advertising as it has been terminated */
5475 hci_remove_adv_instance(hdev, ev->handle);
5476 mgmt_advertising_removed(NULL, hdev, ev->handle);
5477
5478 return;
5479 }
5480
5481 if (adv)
5482 adv->enabled = false;
5483
5484 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5485 if (conn) {
5486 /* Store handle in the connection so the correct advertising
5487 * instance can be re-enabled when disconnected.
5488 */
5489 conn->adv_instance = ev->handle;
5490
5491 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5492 bacmp(&conn->resp_addr, BDADDR_ANY))
5493 return;
5494
5495 if (!ev->handle) {
5496 bacpy(&conn->resp_addr, &hdev->random_addr);
5497 return;
5498 }
5499
5500 if (adv)
5501 bacpy(&conn->resp_addr, &adv->random_addr);
5502 }
5503 }
5504
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5505 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5506 struct sk_buff *skb)
5507 {
5508 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5509 struct hci_conn *conn;
5510
5511 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5512
5513 if (ev->status)
5514 return;
5515
5516 hci_dev_lock(hdev);
5517
5518 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5519 if (conn) {
5520 conn->le_conn_interval = le16_to_cpu(ev->interval);
5521 conn->le_conn_latency = le16_to_cpu(ev->latency);
5522 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5523 }
5524
5525 hci_dev_unlock(hdev);
5526 }
5527
5528 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5529 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5530 bdaddr_t *addr,
5531 u8 addr_type, u8 adv_type,
5532 bdaddr_t *direct_rpa)
5533 {
5534 struct hci_conn *conn;
5535 struct hci_conn_params *params;
5536
5537 /* If the event is not connectable don't proceed further */
5538 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5539 return NULL;
5540
5541 /* Ignore if the device is blocked */
5542 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5543 return NULL;
5544
5545 /* Most controller will fail if we try to create new connections
5546 * while we have an existing one in peripheral role.
5547 */
5548 if (hdev->conn_hash.le_num_peripheral > 0 &&
5549 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5550 !(hdev->le_states[3] & 0x10)))
5551 return NULL;
5552
5553 /* If we're not connectable only connect devices that we have in
5554 * our pend_le_conns list.
5555 */
5556 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5557 addr_type);
5558 if (!params)
5559 return NULL;
5560
5561 if (!params->explicit_connect) {
5562 switch (params->auto_connect) {
5563 case HCI_AUTO_CONN_DIRECT:
5564 /* Only devices advertising with ADV_DIRECT_IND are
5565 * triggering a connection attempt. This is allowing
5566 * incoming connections from peripheral devices.
5567 */
5568 if (adv_type != LE_ADV_DIRECT_IND)
5569 return NULL;
5570 break;
5571 case HCI_AUTO_CONN_ALWAYS:
5572 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5573 * are triggering a connection attempt. This means
5574 * that incoming connections from peripheral device are
5575 * accepted and also outgoing connections to peripheral
5576 * devices are established when found.
5577 */
5578 break;
5579 default:
5580 return NULL;
5581 }
5582 }
5583
5584 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5585 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5586 direct_rpa);
5587 if (!IS_ERR(conn)) {
5588 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5589 * by higher layer that tried to connect, if no then
5590 * store the pointer since we don't really have any
5591 * other owner of the object besides the params that
5592 * triggered it. This way we can abort the connection if
5593 * the parameters get removed and keep the reference
5594 * count consistent once the connection is established.
5595 */
5596
5597 if (!params->explicit_connect)
5598 params->conn = hci_conn_get(conn);
5599
5600 return conn;
5601 }
5602
5603 switch (PTR_ERR(conn)) {
5604 case -EBUSY:
5605 /* If hci_connect() returns -EBUSY it means there is already
5606 * an LE connection attempt going on. Since controllers don't
5607 * support more than one connection attempt at the time, we
5608 * don't consider this an error case.
5609 */
5610 break;
5611 default:
5612 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5613 return NULL;
5614 }
5615
5616 return NULL;
5617 }
5618
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5619 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5620 u8 bdaddr_type, bdaddr_t *direct_addr,
5621 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5622 bool ext_adv)
5623 {
5624 struct discovery_state *d = &hdev->discovery;
5625 struct smp_irk *irk;
5626 struct hci_conn *conn;
5627 bool match;
5628 u32 flags;
5629 u8 *ptr;
5630
5631 switch (type) {
5632 case LE_ADV_IND:
5633 case LE_ADV_DIRECT_IND:
5634 case LE_ADV_SCAN_IND:
5635 case LE_ADV_NONCONN_IND:
5636 case LE_ADV_SCAN_RSP:
5637 break;
5638 default:
5639 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5640 "type: 0x%02x", type);
5641 return;
5642 }
5643
5644 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5645 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5646 return;
5647 }
5648
5649 /* Find the end of the data in case the report contains padded zero
5650 * bytes at the end causing an invalid length value.
5651 *
5652 * When data is NULL, len is 0 so there is no need for extra ptr
5653 * check as 'ptr < data + 0' is already false in such case.
5654 */
5655 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5656 if (ptr + 1 + *ptr > data + len)
5657 break;
5658 }
5659
5660 /* Adjust for actual length. This handles the case when remote
5661 * device is advertising with incorrect data length.
5662 */
5663 len = ptr - data;
5664
5665 /* If the direct address is present, then this report is from
5666 * a LE Direct Advertising Report event. In that case it is
5667 * important to see if the address is matching the local
5668 * controller address.
5669 */
5670 if (direct_addr) {
5671 /* Only resolvable random addresses are valid for these
5672 * kind of reports and others can be ignored.
5673 */
5674 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5675 return;
5676
5677 /* If the controller is not using resolvable random
5678 * addresses, then this report can be ignored.
5679 */
5680 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5681 return;
5682
5683 /* If the local IRK of the controller does not match
5684 * with the resolvable random address provided, then
5685 * this report can be ignored.
5686 */
5687 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5688 return;
5689 }
5690
5691 /* Check if we need to convert to identity address */
5692 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5693 if (irk) {
5694 bdaddr = &irk->bdaddr;
5695 bdaddr_type = irk->addr_type;
5696 }
5697
5698 /* Check if we have been requested to connect to this device.
5699 *
5700 * direct_addr is set only for directed advertising reports (it is NULL
5701 * for advertising reports) and is already verified to be RPA above.
5702 */
5703 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5704 direct_addr);
5705 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5706 /* Store report for later inclusion by
5707 * mgmt_device_connected
5708 */
5709 memcpy(conn->le_adv_data, data, len);
5710 conn->le_adv_data_len = len;
5711 }
5712
5713 /* Passive scanning shouldn't trigger any device found events,
5714 * except for devices marked as CONN_REPORT for which we do send
5715 * device found events, or advertisement monitoring requested.
5716 */
5717 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5718 if (type == LE_ADV_DIRECT_IND)
5719 return;
5720
5721 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5722 bdaddr, bdaddr_type) &&
5723 idr_is_empty(&hdev->adv_monitors_idr))
5724 return;
5725
5726 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5727 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5728 else
5729 flags = 0;
5730 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5731 rssi, flags, data, len, NULL, 0);
5732 return;
5733 }
5734
5735 /* When receiving non-connectable or scannable undirected
5736 * advertising reports, this means that the remote device is
5737 * not connectable and then clearly indicate this in the
5738 * device found event.
5739 *
5740 * When receiving a scan response, then there is no way to
5741 * know if the remote device is connectable or not. However
5742 * since scan responses are merged with a previously seen
5743 * advertising report, the flags field from that report
5744 * will be used.
5745 *
5746 * In the really unlikely case that a controller get confused
5747 * and just sends a scan response event, then it is marked as
5748 * not connectable as well.
5749 */
5750 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5751 type == LE_ADV_SCAN_RSP)
5752 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5753 else
5754 flags = 0;
5755
5756 /* If there's nothing pending either store the data from this
5757 * event or send an immediate device found event if the data
5758 * should not be stored for later.
5759 */
5760 if (!ext_adv && !has_pending_adv_report(hdev)) {
5761 /* If the report will trigger a SCAN_REQ store it for
5762 * later merging.
5763 */
5764 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5765 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5766 rssi, flags, data, len);
5767 return;
5768 }
5769
5770 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5771 rssi, flags, data, len, NULL, 0);
5772 return;
5773 }
5774
5775 /* Check if the pending report is for the same device as the new one */
5776 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5777 bdaddr_type == d->last_adv_addr_type);
5778
5779 /* If the pending data doesn't match this report or this isn't a
5780 * scan response (e.g. we got a duplicate ADV_IND) then force
5781 * sending of the pending data.
5782 */
5783 if (type != LE_ADV_SCAN_RSP || !match) {
5784 /* Send out whatever is in the cache, but skip duplicates */
5785 if (!match)
5786 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5787 d->last_adv_addr_type, NULL,
5788 d->last_adv_rssi, d->last_adv_flags,
5789 d->last_adv_data,
5790 d->last_adv_data_len, NULL, 0);
5791
5792 /* If the new report will trigger a SCAN_REQ store it for
5793 * later merging.
5794 */
5795 if (!ext_adv && (type == LE_ADV_IND ||
5796 type == LE_ADV_SCAN_IND)) {
5797 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5798 rssi, flags, data, len);
5799 return;
5800 }
5801
5802 /* The advertising reports cannot be merged, so clear
5803 * the pending report and send out a device found event.
5804 */
5805 clear_pending_adv_report(hdev);
5806 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5807 rssi, flags, data, len, NULL, 0);
5808 return;
5809 }
5810
5811 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5812 * the new event is a SCAN_RSP. We can therefore proceed with
5813 * sending a merged device found event.
5814 */
5815 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5816 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5817 d->last_adv_data, d->last_adv_data_len, data, len);
5818 clear_pending_adv_report(hdev);
5819 }
5820
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5821 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5822 {
5823 u8 num_reports = skb->data[0];
5824 void *ptr = &skb->data[1];
5825
5826 hci_dev_lock(hdev);
5827
5828 while (num_reports--) {
5829 struct hci_ev_le_advertising_info *ev = ptr;
5830 s8 rssi;
5831
5832 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5833 bt_dev_err(hdev, "Malicious advertising data.");
5834 break;
5835 }
5836
5837 if (ev->length <= HCI_MAX_AD_LENGTH &&
5838 ev->data + ev->length <= skb_tail_pointer(skb)) {
5839 rssi = ev->data[ev->length];
5840 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5841 ev->bdaddr_type, NULL, 0, rssi,
5842 ev->data, ev->length, false);
5843 } else {
5844 bt_dev_err(hdev, "Dropping invalid advertising data");
5845 }
5846
5847 ptr += sizeof(*ev) + ev->length + 1;
5848 }
5849
5850 hci_dev_unlock(hdev);
5851 }
5852
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)5853 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5854 {
5855 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5856 switch (evt_type) {
5857 case LE_LEGACY_ADV_IND:
5858 return LE_ADV_IND;
5859 case LE_LEGACY_ADV_DIRECT_IND:
5860 return LE_ADV_DIRECT_IND;
5861 case LE_LEGACY_ADV_SCAN_IND:
5862 return LE_ADV_SCAN_IND;
5863 case LE_LEGACY_NONCONN_IND:
5864 return LE_ADV_NONCONN_IND;
5865 case LE_LEGACY_SCAN_RSP_ADV:
5866 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5867 return LE_ADV_SCAN_RSP;
5868 }
5869
5870 goto invalid;
5871 }
5872
5873 if (evt_type & LE_EXT_ADV_CONN_IND) {
5874 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5875 return LE_ADV_DIRECT_IND;
5876
5877 return LE_ADV_IND;
5878 }
5879
5880 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5881 return LE_ADV_SCAN_RSP;
5882
5883 if (evt_type & LE_EXT_ADV_SCAN_IND)
5884 return LE_ADV_SCAN_IND;
5885
5886 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5887 evt_type & LE_EXT_ADV_DIRECT_IND)
5888 return LE_ADV_NONCONN_IND;
5889
5890 invalid:
5891 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5892 evt_type);
5893
5894 return LE_ADV_INVALID;
5895 }
5896
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5897 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5898 {
5899 u8 num_reports = skb->data[0];
5900 void *ptr = &skb->data[1];
5901
5902 hci_dev_lock(hdev);
5903
5904 while (num_reports--) {
5905 struct hci_ev_le_ext_adv_report *ev = ptr;
5906 u8 legacy_evt_type;
5907 u16 evt_type;
5908
5909 evt_type = __le16_to_cpu(ev->evt_type);
5910 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5911 if (legacy_evt_type != LE_ADV_INVALID) {
5912 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5913 ev->bdaddr_type, NULL, 0, ev->rssi,
5914 ev->data, ev->length,
5915 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5916 }
5917
5918 ptr += sizeof(*ev) + ev->length;
5919 }
5920
5921 hci_dev_unlock(hdev);
5922 }
5923
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5924 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5925 struct sk_buff *skb)
5926 {
5927 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5928 struct hci_conn *conn;
5929
5930 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5931
5932 hci_dev_lock(hdev);
5933
5934 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5935 if (conn) {
5936 if (!ev->status)
5937 memcpy(conn->features[0], ev->features, 8);
5938
5939 if (conn->state == BT_CONFIG) {
5940 __u8 status;
5941
5942 /* If the local controller supports peripheral-initiated
5943 * features exchange, but the remote controller does
5944 * not, then it is possible that the error code 0x1a
5945 * for unsupported remote feature gets returned.
5946 *
5947 * In this specific case, allow the connection to
5948 * transition into connected state and mark it as
5949 * successful.
5950 */
5951 if (!conn->out && ev->status == 0x1a &&
5952 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5953 status = 0x00;
5954 else
5955 status = ev->status;
5956
5957 conn->state = BT_CONNECTED;
5958 hci_connect_cfm(conn, status);
5959 hci_conn_drop(conn);
5960 }
5961 }
5962
5963 hci_dev_unlock(hdev);
5964 }
5965
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5966 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5967 {
5968 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5969 struct hci_cp_le_ltk_reply cp;
5970 struct hci_cp_le_ltk_neg_reply neg;
5971 struct hci_conn *conn;
5972 struct smp_ltk *ltk;
5973
5974 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5975
5976 hci_dev_lock(hdev);
5977
5978 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5979 if (conn == NULL)
5980 goto not_found;
5981
5982 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5983 if (!ltk)
5984 goto not_found;
5985
5986 if (smp_ltk_is_sc(ltk)) {
5987 /* With SC both EDiv and Rand are set to zero */
5988 if (ev->ediv || ev->rand)
5989 goto not_found;
5990 } else {
5991 /* For non-SC keys check that EDiv and Rand match */
5992 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5993 goto not_found;
5994 }
5995
5996 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5997 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5998 cp.handle = cpu_to_le16(conn->handle);
5999
6000 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6001
6002 conn->enc_key_size = ltk->enc_size;
6003
6004 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6005
6006 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6007 * temporary key used to encrypt a connection following
6008 * pairing. It is used during the Encrypted Session Setup to
6009 * distribute the keys. Later, security can be re-established
6010 * using a distributed LTK.
6011 */
6012 if (ltk->type == SMP_STK) {
6013 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6014 list_del_rcu(<k->list);
6015 kfree_rcu(ltk, rcu);
6016 } else {
6017 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6018 }
6019
6020 hci_dev_unlock(hdev);
6021
6022 return;
6023
6024 not_found:
6025 neg.handle = ev->handle;
6026 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6027 hci_dev_unlock(hdev);
6028 }
6029
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6030 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6031 u8 reason)
6032 {
6033 struct hci_cp_le_conn_param_req_neg_reply cp;
6034
6035 cp.handle = cpu_to_le16(handle);
6036 cp.reason = reason;
6037
6038 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6039 &cp);
6040 }
6041
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)6042 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6043 struct sk_buff *skb)
6044 {
6045 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6046 struct hci_cp_le_conn_param_req_reply cp;
6047 struct hci_conn *hcon;
6048 u16 handle, min, max, latency, timeout;
6049
6050 handle = le16_to_cpu(ev->handle);
6051 min = le16_to_cpu(ev->interval_min);
6052 max = le16_to_cpu(ev->interval_max);
6053 latency = le16_to_cpu(ev->latency);
6054 timeout = le16_to_cpu(ev->timeout);
6055
6056 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6057 if (!hcon || hcon->state != BT_CONNECTED)
6058 return send_conn_param_neg_reply(hdev, handle,
6059 HCI_ERROR_UNKNOWN_CONN_ID);
6060
6061 if (max > hcon->le_conn_max_interval)
6062 return send_conn_param_neg_reply(hdev, handle,
6063 HCI_ERROR_INVALID_LL_PARAMS);
6064
6065 if (hci_check_conn_params(min, max, latency, timeout))
6066 return send_conn_param_neg_reply(hdev, handle,
6067 HCI_ERROR_INVALID_LL_PARAMS);
6068
6069 if (hcon->role == HCI_ROLE_MASTER) {
6070 struct hci_conn_params *params;
6071 u8 store_hint;
6072
6073 hci_dev_lock(hdev);
6074
6075 params = hci_conn_params_lookup(hdev, &hcon->dst,
6076 hcon->dst_type);
6077 if (params) {
6078 params->conn_min_interval = min;
6079 params->conn_max_interval = max;
6080 params->conn_latency = latency;
6081 params->supervision_timeout = timeout;
6082 store_hint = 0x01;
6083 } else {
6084 store_hint = 0x00;
6085 }
6086
6087 hci_dev_unlock(hdev);
6088
6089 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6090 store_hint, min, max, latency, timeout);
6091 }
6092
6093 cp.handle = ev->handle;
6094 cp.interval_min = ev->interval_min;
6095 cp.interval_max = ev->interval_max;
6096 cp.latency = ev->latency;
6097 cp.timeout = ev->timeout;
6098 cp.min_ce_len = 0;
6099 cp.max_ce_len = 0;
6100
6101 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6102 }
6103
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)6104 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6105 struct sk_buff *skb)
6106 {
6107 u8 num_reports = skb->data[0];
6108 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6109
6110 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6111 return;
6112
6113 hci_dev_lock(hdev);
6114
6115 for (; num_reports; num_reports--, ev++)
6116 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6117 ev->bdaddr_type, &ev->direct_addr,
6118 ev->direct_addr_type, ev->rssi, NULL, 0,
6119 false);
6120
6121 hci_dev_unlock(hdev);
6122 }
6123
hci_le_phy_update_evt(struct hci_dev * hdev,struct sk_buff * skb)6124 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6125 {
6126 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6127 struct hci_conn *conn;
6128
6129 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6130
6131 if (ev->status)
6132 return;
6133
6134 hci_dev_lock(hdev);
6135
6136 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6137 if (!conn)
6138 goto unlock;
6139
6140 conn->le_tx_phy = ev->tx_phy;
6141 conn->le_rx_phy = ev->rx_phy;
6142
6143 unlock:
6144 hci_dev_unlock(hdev);
6145 }
6146
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)6147 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6148 {
6149 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6150
6151 skb_pull(skb, sizeof(*le_ev));
6152
6153 switch (le_ev->subevent) {
6154 case HCI_EV_LE_CONN_COMPLETE:
6155 hci_le_conn_complete_evt(hdev, skb);
6156 break;
6157
6158 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6159 hci_le_conn_update_complete_evt(hdev, skb);
6160 break;
6161
6162 case HCI_EV_LE_ADVERTISING_REPORT:
6163 hci_le_adv_report_evt(hdev, skb);
6164 break;
6165
6166 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6167 hci_le_remote_feat_complete_evt(hdev, skb);
6168 break;
6169
6170 case HCI_EV_LE_LTK_REQ:
6171 hci_le_ltk_request_evt(hdev, skb);
6172 break;
6173
6174 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6175 hci_le_remote_conn_param_req_evt(hdev, skb);
6176 break;
6177
6178 case HCI_EV_LE_DIRECT_ADV_REPORT:
6179 hci_le_direct_adv_report_evt(hdev, skb);
6180 break;
6181
6182 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6183 hci_le_phy_update_evt(hdev, skb);
6184 break;
6185
6186 case HCI_EV_LE_EXT_ADV_REPORT:
6187 hci_le_ext_adv_report_evt(hdev, skb);
6188 break;
6189
6190 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6191 hci_le_enh_conn_complete_evt(hdev, skb);
6192 break;
6193
6194 case HCI_EV_LE_EXT_ADV_SET_TERM:
6195 hci_le_ext_adv_term_evt(hdev, skb);
6196 break;
6197
6198 default:
6199 break;
6200 }
6201 }
6202
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)6203 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6204 u8 event, struct sk_buff *skb)
6205 {
6206 struct hci_ev_cmd_complete *ev;
6207 struct hci_event_hdr *hdr;
6208
6209 if (!skb)
6210 return false;
6211
6212 if (skb->len < sizeof(*hdr)) {
6213 bt_dev_err(hdev, "too short HCI event");
6214 return false;
6215 }
6216
6217 hdr = (void *) skb->data;
6218 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6219
6220 if (event) {
6221 if (hdr->evt != event)
6222 return false;
6223 return true;
6224 }
6225
6226 /* Check if request ended in Command Status - no way to retrieve
6227 * any extra parameters in this case.
6228 */
6229 if (hdr->evt == HCI_EV_CMD_STATUS)
6230 return false;
6231
6232 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6233 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6234 hdr->evt);
6235 return false;
6236 }
6237
6238 if (skb->len < sizeof(*ev)) {
6239 bt_dev_err(hdev, "too short cmd_complete event");
6240 return false;
6241 }
6242
6243 ev = (void *) skb->data;
6244 skb_pull(skb, sizeof(*ev));
6245
6246 if (opcode != __le16_to_cpu(ev->opcode)) {
6247 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6248 __le16_to_cpu(ev->opcode));
6249 return false;
6250 }
6251
6252 return true;
6253 }
6254
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)6255 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6256 struct sk_buff *skb)
6257 {
6258 struct hci_ev_le_advertising_info *adv;
6259 struct hci_ev_le_direct_adv_info *direct_adv;
6260 struct hci_ev_le_ext_adv_report *ext_adv;
6261 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6262 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6263
6264 hci_dev_lock(hdev);
6265
6266 /* If we are currently suspended and this is the first BT event seen,
6267 * save the wake reason associated with the event.
6268 */
6269 if (!hdev->suspended || hdev->wake_reason)
6270 goto unlock;
6271
6272 /* Default to remote wake. Values for wake_reason are documented in the
6273 * Bluez mgmt api docs.
6274 */
6275 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6276
6277 /* Once configured for remote wakeup, we should only wake up for
6278 * reconnections. It's useful to see which device is waking us up so
6279 * keep track of the bdaddr of the connection event that woke us up.
6280 */
6281 if (event == HCI_EV_CONN_REQUEST) {
6282 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6283 hdev->wake_addr_type = BDADDR_BREDR;
6284 } else if (event == HCI_EV_CONN_COMPLETE) {
6285 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6286 hdev->wake_addr_type = BDADDR_BREDR;
6287 } else if (event == HCI_EV_LE_META) {
6288 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6289 u8 subevent = le_ev->subevent;
6290 u8 *ptr = &skb->data[sizeof(*le_ev)];
6291 u8 num_reports = *ptr;
6292
6293 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6294 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6295 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6296 num_reports) {
6297 adv = (void *)(ptr + 1);
6298 direct_adv = (void *)(ptr + 1);
6299 ext_adv = (void *)(ptr + 1);
6300
6301 switch (subevent) {
6302 case HCI_EV_LE_ADVERTISING_REPORT:
6303 bacpy(&hdev->wake_addr, &adv->bdaddr);
6304 hdev->wake_addr_type = adv->bdaddr_type;
6305 break;
6306 case HCI_EV_LE_DIRECT_ADV_REPORT:
6307 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6308 hdev->wake_addr_type = direct_adv->bdaddr_type;
6309 break;
6310 case HCI_EV_LE_EXT_ADV_REPORT:
6311 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6312 hdev->wake_addr_type = ext_adv->bdaddr_type;
6313 break;
6314 }
6315 }
6316 } else {
6317 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6318 }
6319
6320 unlock:
6321 hci_dev_unlock(hdev);
6322 }
6323
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)6324 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6325 {
6326 struct hci_event_hdr *hdr = (void *) skb->data;
6327 hci_req_complete_t req_complete = NULL;
6328 hci_req_complete_skb_t req_complete_skb = NULL;
6329 struct sk_buff *orig_skb = NULL;
6330 u8 status = 0, event = hdr->evt, req_evt = 0;
6331 u16 opcode = HCI_OP_NOP;
6332
6333 if (!event) {
6334 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6335 goto done;
6336 }
6337
6338 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6339 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6340 opcode = __le16_to_cpu(cmd_hdr->opcode);
6341 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6342 &req_complete_skb);
6343 req_evt = event;
6344 }
6345
6346 /* If it looks like we might end up having to call
6347 * req_complete_skb, store a pristine copy of the skb since the
6348 * various handlers may modify the original one through
6349 * skb_pull() calls, etc.
6350 */
6351 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6352 event == HCI_EV_CMD_COMPLETE)
6353 orig_skb = skb_clone(skb, GFP_KERNEL);
6354
6355 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6356
6357 /* Store wake reason if we're suspended */
6358 hci_store_wake_reason(hdev, event, skb);
6359
6360 switch (event) {
6361 case HCI_EV_INQUIRY_COMPLETE:
6362 hci_inquiry_complete_evt(hdev, skb);
6363 break;
6364
6365 case HCI_EV_INQUIRY_RESULT:
6366 hci_inquiry_result_evt(hdev, skb);
6367 break;
6368
6369 case HCI_EV_CONN_COMPLETE:
6370 hci_conn_complete_evt(hdev, skb);
6371 break;
6372
6373 case HCI_EV_CONN_REQUEST:
6374 hci_conn_request_evt(hdev, skb);
6375 break;
6376
6377 case HCI_EV_DISCONN_COMPLETE:
6378 hci_disconn_complete_evt(hdev, skb);
6379 break;
6380
6381 case HCI_EV_AUTH_COMPLETE:
6382 hci_auth_complete_evt(hdev, skb);
6383 break;
6384
6385 case HCI_EV_REMOTE_NAME:
6386 hci_remote_name_evt(hdev, skb);
6387 break;
6388
6389 case HCI_EV_ENCRYPT_CHANGE:
6390 hci_encrypt_change_evt(hdev, skb);
6391 break;
6392
6393 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6394 hci_change_link_key_complete_evt(hdev, skb);
6395 break;
6396
6397 case HCI_EV_REMOTE_FEATURES:
6398 hci_remote_features_evt(hdev, skb);
6399 break;
6400
6401 case HCI_EV_CMD_COMPLETE:
6402 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6403 &req_complete, &req_complete_skb);
6404 break;
6405
6406 case HCI_EV_CMD_STATUS:
6407 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6408 &req_complete_skb);
6409 break;
6410
6411 case HCI_EV_HARDWARE_ERROR:
6412 hci_hardware_error_evt(hdev, skb);
6413 break;
6414
6415 case HCI_EV_ROLE_CHANGE:
6416 hci_role_change_evt(hdev, skb);
6417 break;
6418
6419 case HCI_EV_NUM_COMP_PKTS:
6420 hci_num_comp_pkts_evt(hdev, skb);
6421 break;
6422
6423 case HCI_EV_MODE_CHANGE:
6424 hci_mode_change_evt(hdev, skb);
6425 break;
6426
6427 case HCI_EV_PIN_CODE_REQ:
6428 hci_pin_code_request_evt(hdev, skb);
6429 break;
6430
6431 case HCI_EV_LINK_KEY_REQ:
6432 hci_link_key_request_evt(hdev, skb);
6433 break;
6434
6435 case HCI_EV_LINK_KEY_NOTIFY:
6436 hci_link_key_notify_evt(hdev, skb);
6437 break;
6438
6439 case HCI_EV_CLOCK_OFFSET:
6440 hci_clock_offset_evt(hdev, skb);
6441 break;
6442
6443 case HCI_EV_PKT_TYPE_CHANGE:
6444 hci_pkt_type_change_evt(hdev, skb);
6445 break;
6446
6447 case HCI_EV_PSCAN_REP_MODE:
6448 hci_pscan_rep_mode_evt(hdev, skb);
6449 break;
6450
6451 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6452 hci_inquiry_result_with_rssi_evt(hdev, skb);
6453 break;
6454
6455 case HCI_EV_REMOTE_EXT_FEATURES:
6456 hci_remote_ext_features_evt(hdev, skb);
6457 break;
6458
6459 case HCI_EV_SYNC_CONN_COMPLETE:
6460 hci_sync_conn_complete_evt(hdev, skb);
6461 break;
6462
6463 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6464 hci_extended_inquiry_result_evt(hdev, skb);
6465 break;
6466
6467 case HCI_EV_KEY_REFRESH_COMPLETE:
6468 hci_key_refresh_complete_evt(hdev, skb);
6469 break;
6470
6471 case HCI_EV_IO_CAPA_REQUEST:
6472 hci_io_capa_request_evt(hdev, skb);
6473 break;
6474
6475 case HCI_EV_IO_CAPA_REPLY:
6476 hci_io_capa_reply_evt(hdev, skb);
6477 break;
6478
6479 case HCI_EV_USER_CONFIRM_REQUEST:
6480 hci_user_confirm_request_evt(hdev, skb);
6481 break;
6482
6483 case HCI_EV_USER_PASSKEY_REQUEST:
6484 hci_user_passkey_request_evt(hdev, skb);
6485 break;
6486
6487 case HCI_EV_USER_PASSKEY_NOTIFY:
6488 hci_user_passkey_notify_evt(hdev, skb);
6489 break;
6490
6491 case HCI_EV_KEYPRESS_NOTIFY:
6492 hci_keypress_notify_evt(hdev, skb);
6493 break;
6494
6495 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6496 hci_simple_pair_complete_evt(hdev, skb);
6497 break;
6498
6499 case HCI_EV_REMOTE_HOST_FEATURES:
6500 hci_remote_host_features_evt(hdev, skb);
6501 break;
6502
6503 case HCI_EV_LE_META:
6504 hci_le_meta_evt(hdev, skb);
6505 break;
6506
6507 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6508 hci_remote_oob_data_request_evt(hdev, skb);
6509 break;
6510
6511 #if IS_ENABLED(CONFIG_BT_HS)
6512 case HCI_EV_CHANNEL_SELECTED:
6513 hci_chan_selected_evt(hdev, skb);
6514 break;
6515
6516 case HCI_EV_PHY_LINK_COMPLETE:
6517 hci_phy_link_complete_evt(hdev, skb);
6518 break;
6519
6520 case HCI_EV_LOGICAL_LINK_COMPLETE:
6521 hci_loglink_complete_evt(hdev, skb);
6522 break;
6523
6524 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6525 hci_disconn_loglink_complete_evt(hdev, skb);
6526 break;
6527
6528 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6529 hci_disconn_phylink_complete_evt(hdev, skb);
6530 break;
6531 #endif
6532
6533 case HCI_EV_NUM_COMP_BLOCKS:
6534 hci_num_comp_blocks_evt(hdev, skb);
6535 break;
6536
6537 case HCI_EV_VENDOR:
6538 msft_vendor_evt(hdev, skb);
6539 break;
6540
6541 default:
6542 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6543 break;
6544 }
6545
6546 if (req_complete) {
6547 req_complete(hdev, status, opcode);
6548 } else if (req_complete_skb) {
6549 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6550 kfree_skb(orig_skb);
6551 orig_skb = NULL;
6552 }
6553 req_complete_skb(hdev, status, opcode, orig_skb);
6554 }
6555
6556 done:
6557 kfree_skb(orig_skb);
6558 kfree_skb(skb);
6559 hdev->stat.evt_rx++;
6560 }
6561