1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
129 MGMT_EV_INDEX_ADDED,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
142 MGMT_EV_AUTH_FAILED,
143 MGMT_EV_DEVICE_FOUND,
144 MGMT_EV_DISCOVERING,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
149 MGMT_EV_NEW_IRK,
150 MGMT_EV_NEW_CSRK,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
168 };
169
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
172 MGMT_OP_READ_INFO,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
181 };
182
183 static const u16 mgmt_untrusted_events[] = {
184 MGMT_EV_INDEX_ADDED,
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
198 };
199
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
201
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
204
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
207 MGMT_STATUS_SUCCESS,
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_FAILED, /* Reserved for future use */
251 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
252 MGMT_STATUS_REJECTED, /* QoS Rejected */
253 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
254 MGMT_STATUS_REJECTED, /* Insufficient Security */
255 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
271 };
272
mgmt_status(u8 hci_status)273 static u8 mgmt_status(u8 hci_status)
274 {
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
277
278 return MGMT_STATUS_FAILED;
279 }
280
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
282 u16 len, int flag)
283 {
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 flag, NULL);
286 }
287
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
290 {
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 flag, skip_sk);
293 }
294
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
297 {
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
300 }
301
le_addr_type(u8 mgmt_addr_type)302 static u8 le_addr_type(u8 mgmt_addr_type)
303 {
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
306 else
307 return ADDR_LE_DEV_RANDOM;
308 }
309
mgmt_fill_version_info(void * ver)310 void mgmt_fill_version_info(void *ver)
311 {
312 struct mgmt_rp_read_version *rp = ver;
313
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
316 }
317
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 u16 data_len)
320 {
321 struct mgmt_rp_read_version rp;
322
323 bt_dev_dbg(hdev, "sock %p", sk);
324
325 mgmt_fill_version_info(&rp);
326
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 &rp, sizeof(rp));
329 }
330
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 u16 data_len)
333 {
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
336 size_t rp_size;
337 int i, err;
338
339 bt_dev_dbg(hdev, "sock %p", sk);
340
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
344 } else {
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
347 }
348
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350
351 rp = kmalloc(rp_size, GFP_KERNEL);
352 if (!rp)
353 return -ENOMEM;
354
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
357
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
360
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
363
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
366 } else {
367 __le16 *opcode = rp->opcodes;
368
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
374 }
375
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
377 rp, rp_size);
378 kfree(rp);
379
380 return err;
381 }
382
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
384 u16 data_len)
385 {
386 struct mgmt_rp_read_index_list *rp;
387 struct hci_dev *d;
388 size_t rp_len;
389 u16 count;
390 int err;
391
392 bt_dev_dbg(hdev, "sock %p", sk);
393
394 read_lock(&hci_dev_list_lock);
395
396 count = 0;
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 count++;
401 }
402
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
405 if (!rp) {
406 read_unlock(&hci_dev_list_lock);
407 return -ENOMEM;
408 }
409
410 count = 0;
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
415 continue;
416
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
419 */
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 continue;
422
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
427 }
428 }
429
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
432
433 read_unlock(&hci_dev_list_lock);
434
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
436 0, rp, rp_len);
437
438 kfree(rp);
439
440 return err;
441 }
442
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
445 {
446 struct mgmt_rp_read_unconf_index_list *rp;
447 struct hci_dev *d;
448 size_t rp_len;
449 u16 count;
450 int err;
451
452 bt_dev_dbg(hdev, "sock %p", sk);
453
454 read_lock(&hci_dev_list_lock);
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 count++;
461 }
462
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
465 if (!rp) {
466 read_unlock(&hci_dev_list_lock);
467 return -ENOMEM;
468 }
469
470 count = 0;
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
475 continue;
476
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
479 */
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
481 continue;
482
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
487 }
488 }
489
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
492
493 read_unlock(&hci_dev_list_lock);
494
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497
498 kfree(rp);
499
500 return err;
501 }
502
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
505 {
506 struct mgmt_rp_read_ext_index_list *rp;
507 struct hci_dev *d;
508 u16 count;
509 int err;
510
511 bt_dev_dbg(hdev, "sock %p", sk);
512
513 read_lock(&hci_dev_list_lock);
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 count++;
519 }
520
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
522 if (!rp) {
523 read_unlock(&hci_dev_list_lock);
524 return -ENOMEM;
525 }
526
527 count = 0;
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
532 continue;
533
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
536 */
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
538 continue;
539
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
543 else
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
547 } else {
548 continue;
549 }
550
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
554 }
555
556 rp->num_controllers = cpu_to_le16(count);
557
558 read_unlock(&hci_dev_list_lock);
559
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
563 */
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
571
572 kfree(rp);
573
574 return err;
575 }
576
is_configured(struct hci_dev * hdev)577 static bool is_configured(struct hci_dev *hdev)
578 {
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
581 return false;
582
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
586 return false;
587
588 return true;
589 }
590
get_missing_options(struct hci_dev * hdev)591 static __le32 get_missing_options(struct hci_dev *hdev)
592 {
593 u32 options = 0;
594
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
598
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
603
604 return cpu_to_le32(options);
605 }
606
new_options(struct hci_dev * hdev,struct sock * skip)607 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 {
609 __le32 options = get_missing_options(hdev);
610
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
613 }
614
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 {
617 __le32 options = get_missing_options(hdev);
618
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 sizeof(options));
621 }
622
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
625 {
626 struct mgmt_rp_read_config_info rp;
627 u32 options = 0;
628
629 bt_dev_dbg(hdev, "sock %p", sk);
630
631 hci_dev_lock(hdev);
632
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
638
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
641
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
644
645 hci_dev_unlock(hdev);
646
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 &rp, sizeof(rp));
649 }
650
get_supported_phys(struct hci_dev * hdev)651 static u32 get_supported_phys(struct hci_dev *hdev)
652 {
653 u32 supported_phys = 0;
654
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
681 }
682 }
683 }
684
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
688
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
692 }
693
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
697 }
698 }
699
700 return supported_phys;
701 }
702
get_selected_phys(struct hci_dev * hdev)703 static u32 get_selected_phys(struct hci_dev *hdev)
704 {
705 u32 selected_phys = 0;
706
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
739 }
740 }
741 }
742
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
746
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
749
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
752
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
755
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
758
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
761 }
762
763 return selected_phys;
764 }
765
get_configurable_phys(struct hci_dev * hdev)766 static u32 get_configurable_phys(struct hci_dev *hdev)
767 {
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
770 }
771
get_supported_settings(struct hci_dev * hdev)772 static u32 get_supported_settings(struct hci_dev *hdev)
773 {
774 u32 settings = 0;
775
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
781
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
787
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
792 }
793
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
796
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
798 &hdev->quirks))
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
800 }
801
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
807
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
810 */
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
813 }
814
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
816 hdev->set_bdaddr)
817 settings |= MGMT_SETTING_CONFIGURATION;
818
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
820
821 return settings;
822 }
823
get_current_settings(struct hci_dev * hdev)824 static u32 get_current_settings(struct hci_dev *hdev)
825 {
826 u32 settings = 0;
827
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
830
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
833
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
839
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
842
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
845
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
848
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
851
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
854
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
857
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
860
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
863
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
866
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
869
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
873 *
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
877 *
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
880 * be evaluated.
881 */
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
887 }
888
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891
892 return settings;
893 }
894
pending_find(u16 opcode,struct hci_dev * hdev)895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 {
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
898 }
899
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
902 const void *data)
903 {
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
905 }
906
mgmt_get_adv_discov_flags(struct hci_dev * hdev)907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 {
909 struct mgmt_pending_cmd *cmd;
910
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
913 */
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
915 if (cmd) {
916 struct mgmt_mode *cp = cmd->param;
917 if (cp->val == 0x01)
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
921 } else {
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
926 }
927
928 return 0;
929 }
930
mgmt_get_connectable(struct hci_dev * hdev)931 bool mgmt_get_connectable(struct hci_dev *hdev)
932 {
933 struct mgmt_pending_cmd *cmd;
934
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
937 */
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
939 if (cmd) {
940 struct mgmt_mode *cp = cmd->param;
941
942 return cp->val;
943 }
944
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
946 }
947
service_cache_off(struct work_struct * work)948 static void service_cache_off(struct work_struct *work)
949 {
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
951 service_cache.work);
952 struct hci_request req;
953
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
955 return;
956
957 hci_req_init(&req, hdev);
958
959 hci_dev_lock(hdev);
960
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
963
964 hci_dev_unlock(hdev);
965
966 hci_req_run(&req, NULL);
967 }
968
rpa_expired(struct work_struct * work)969 static void rpa_expired(struct work_struct *work)
970 {
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
972 rpa_expired.work);
973 struct hci_request req;
974
975 bt_dev_dbg(hdev, "");
976
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
980 return;
981
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
984 * function.
985 */
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
989 else
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
992 }
993
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 {
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
997 return;
998
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1005 * it
1006 */
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1008 }
1009
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1012 {
1013 struct mgmt_rp_read_info rp;
1014
1015 bt_dev_dbg(hdev, "sock %p", sk);
1016
1017 hci_dev_lock(hdev);
1018
1019 memset(&rp, 0, sizeof(rp));
1020
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1022
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1030
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033
1034 hci_dev_unlock(hdev);
1035
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 sizeof(rp));
1038 }
1039
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 {
1042 u16 eir_len = 0;
1043 size_t name_len;
1044
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1048
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1051 hdev->appearance);
1052
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1056
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1060
1061 return eir_len;
1062 }
1063
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1066 {
1067 char buf[512];
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1069 u16 eir_len;
1070
1071 bt_dev_dbg(hdev, "sock %p", sk);
1072
1073 memset(&buf, 0, sizeof(buf));
1074
1075 hci_dev_lock(hdev);
1076
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1078
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1084
1085
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1088
1089 hci_dev_unlock(hdev);
1090
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1094 * is used.
1095 */
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1102 }
1103
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 {
1106 char buf[512];
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1108 u16 eir_len;
1109
1110 memset(buf, 0, sizeof(buf));
1111
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1114
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1118 }
1119
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 {
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1123
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 sizeof(settings));
1126 }
1127
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 {
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1131
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 }
1136 }
1137
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 {
1140 struct mgmt_ev_advertising_added ev;
1141
1142 ev.instance = instance;
1143
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1145 }
1146
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1148 u8 instance)
1149 {
1150 struct mgmt_ev_advertising_removed ev;
1151
1152 ev.instance = instance;
1153
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1155 }
1156
cancel_adv_timeout(struct hci_dev * hdev)1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 {
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1162 }
1163 }
1164
clean_up_hci_state(struct hci_dev * hdev)1165 static int clean_up_hci_state(struct hci_dev *hdev)
1166 {
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1170 int err;
1171
1172 hci_req_init(&req, hdev);
1173
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1176 u8 scan = 0x00;
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 }
1179
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1184
1185 discov_stopped = hci_req_stop_discovery(&req);
1186
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1190 }
1191
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195
1196 return err;
1197 }
1198
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1200 u16 len)
1201 {
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1204 int err;
1205
1206 bt_dev_dbg(hdev, "sock %p", sk);
1207
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1211
1212 hci_dev_lock(hdev);
1213
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 MGMT_STATUS_BUSY);
1217 goto failed;
1218 }
1219
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 goto failed;
1223 }
1224
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1226 if (!cmd) {
1227 err = -ENOMEM;
1228 goto failed;
1229 }
1230
1231 if (cp->val) {
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1233 err = 0;
1234 } else {
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1237 if (!err)
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1240
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1245 err = 0;
1246 }
1247 }
1248
1249 failed:
1250 hci_dev_unlock(hdev);
1251 return err;
1252 }
1253
new_settings(struct hci_dev * hdev,struct sock * skip)1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1257
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1260 }
1261
mgmt_new_settings(struct hci_dev * hdev)1262 int mgmt_new_settings(struct hci_dev *hdev)
1263 {
1264 return new_settings(hdev, NULL);
1265 }
1266
1267 struct cmd_lookup {
1268 struct sock *sk;
1269 struct hci_dev *hdev;
1270 u8 mgmt_status;
1271 };
1272
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 {
1275 struct cmd_lookup *match = data;
1276
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278
1279 if (match->sk == NULL) {
1280 match->sk = cmd->sk;
1281 sock_hold(match->sk);
1282 }
1283 }
1284
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1285 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1286 {
1287 u8 *status = data;
1288
1289 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1290 }
1291
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1292 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1293 {
1294 if (cmd->cmd_complete) {
1295 u8 *status = data;
1296
1297 cmd->cmd_complete(cmd, *status);
1298 return;
1299 }
1300
1301 cmd_status_rsp(cmd, data);
1302 }
1303
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1304 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1305 {
1306 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1307 cmd->param, cmd->param_len);
1308 }
1309
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1310 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1311 {
1312 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1313 cmd->param, sizeof(struct mgmt_addr_info));
1314 }
1315
mgmt_bredr_support(struct hci_dev * hdev)1316 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1317 {
1318 if (!lmp_bredr_capable(hdev))
1319 return MGMT_STATUS_NOT_SUPPORTED;
1320 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1321 return MGMT_STATUS_REJECTED;
1322 else
1323 return MGMT_STATUS_SUCCESS;
1324 }
1325
mgmt_le_support(struct hci_dev * hdev)1326 static u8 mgmt_le_support(struct hci_dev *hdev)
1327 {
1328 if (!lmp_le_capable(hdev))
1329 return MGMT_STATUS_NOT_SUPPORTED;
1330 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1331 return MGMT_STATUS_REJECTED;
1332 else
1333 return MGMT_STATUS_SUCCESS;
1334 }
1335
mgmt_set_discoverable_complete(struct hci_dev * hdev,u8 status)1336 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1337 {
1338 struct mgmt_pending_cmd *cmd;
1339
1340 bt_dev_dbg(hdev, "status 0x%02x", status);
1341
1342 hci_dev_lock(hdev);
1343
1344 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1345 if (!cmd)
1346 goto unlock;
1347
1348 if (status) {
1349 u8 mgmt_err = mgmt_status(status);
1350 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1351 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1352 goto remove_cmd;
1353 }
1354
1355 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1356 hdev->discov_timeout > 0) {
1357 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1358 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1359 }
1360
1361 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1362 new_settings(hdev, cmd->sk);
1363
1364 remove_cmd:
1365 mgmt_pending_remove(cmd);
1366
1367 unlock:
1368 hci_dev_unlock(hdev);
1369 }
1370
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1371 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1372 u16 len)
1373 {
1374 struct mgmt_cp_set_discoverable *cp = data;
1375 struct mgmt_pending_cmd *cmd;
1376 u16 timeout;
1377 int err;
1378
1379 bt_dev_dbg(hdev, "sock %p", sk);
1380
1381 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1382 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 MGMT_STATUS_REJECTED);
1385
1386 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1388 MGMT_STATUS_INVALID_PARAMS);
1389
1390 timeout = __le16_to_cpu(cp->timeout);
1391
1392 /* Disabling discoverable requires that no timeout is set,
1393 * and enabling limited discoverable requires a timeout.
1394 */
1395 if ((cp->val == 0x00 && timeout > 0) ||
1396 (cp->val == 0x02 && timeout == 0))
1397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1398 MGMT_STATUS_INVALID_PARAMS);
1399
1400 hci_dev_lock(hdev);
1401
1402 if (!hdev_is_powered(hdev) && timeout > 0) {
1403 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_NOT_POWERED);
1405 goto failed;
1406 }
1407
1408 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1409 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_BUSY);
1412 goto failed;
1413 }
1414
1415 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 MGMT_STATUS_REJECTED);
1418 goto failed;
1419 }
1420
1421 if (hdev->advertising_paused) {
1422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 MGMT_STATUS_BUSY);
1424 goto failed;
1425 }
1426
1427 if (!hdev_is_powered(hdev)) {
1428 bool changed = false;
1429
1430 /* Setting limited discoverable when powered off is
1431 * not a valid operation since it requires a timeout
1432 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1433 */
1434 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1435 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1436 changed = true;
1437 }
1438
1439 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1440 if (err < 0)
1441 goto failed;
1442
1443 if (changed)
1444 err = new_settings(hdev, sk);
1445
1446 goto failed;
1447 }
1448
1449 /* If the current mode is the same, then just update the timeout
1450 * value with the new value. And if only the timeout gets updated,
1451 * then no need for any HCI transactions.
1452 */
1453 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1454 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1455 HCI_LIMITED_DISCOVERABLE)) {
1456 cancel_delayed_work(&hdev->discov_off);
1457 hdev->discov_timeout = timeout;
1458
1459 if (cp->val && hdev->discov_timeout > 0) {
1460 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1461 queue_delayed_work(hdev->req_workqueue,
1462 &hdev->discov_off, to);
1463 }
1464
1465 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1466 goto failed;
1467 }
1468
1469 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1470 if (!cmd) {
1471 err = -ENOMEM;
1472 goto failed;
1473 }
1474
1475 /* Cancel any potential discoverable timeout that might be
1476 * still active and store new timeout value. The arming of
1477 * the timeout happens in the complete handler.
1478 */
1479 cancel_delayed_work(&hdev->discov_off);
1480 hdev->discov_timeout = timeout;
1481
1482 if (cp->val)
1483 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1484 else
1485 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1486
1487 /* Limited discoverable mode */
1488 if (cp->val == 0x02)
1489 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1490 else
1491 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1492
1493 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1494 err = 0;
1495
1496 failed:
1497 hci_dev_unlock(hdev);
1498 return err;
1499 }
1500
mgmt_set_connectable_complete(struct hci_dev * hdev,u8 status)1501 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1502 {
1503 struct mgmt_pending_cmd *cmd;
1504
1505 bt_dev_dbg(hdev, "status 0x%02x", status);
1506
1507 hci_dev_lock(hdev);
1508
1509 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1510 if (!cmd)
1511 goto unlock;
1512
1513 if (status) {
1514 u8 mgmt_err = mgmt_status(status);
1515 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1516 goto remove_cmd;
1517 }
1518
1519 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1521
1522 remove_cmd:
1523 mgmt_pending_remove(cmd);
1524
1525 unlock:
1526 hci_dev_unlock(hdev);
1527 }
1528
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1529 static int set_connectable_update_settings(struct hci_dev *hdev,
1530 struct sock *sk, u8 val)
1531 {
1532 bool changed = false;
1533 int err;
1534
1535 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1536 changed = true;
1537
1538 if (val) {
1539 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1540 } else {
1541 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1542 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1543 }
1544
1545 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1546 if (err < 0)
1547 return err;
1548
1549 if (changed) {
1550 hci_req_update_scan(hdev);
1551 hci_update_background_scan(hdev);
1552 return new_settings(hdev, sk);
1553 }
1554
1555 return 0;
1556 }
1557
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1558 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1559 u16 len)
1560 {
1561 struct mgmt_mode *cp = data;
1562 struct mgmt_pending_cmd *cmd;
1563 int err;
1564
1565 bt_dev_dbg(hdev, "sock %p", sk);
1566
1567 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1568 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1570 MGMT_STATUS_REJECTED);
1571
1572 if (cp->val != 0x00 && cp->val != 0x01)
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1574 MGMT_STATUS_INVALID_PARAMS);
1575
1576 hci_dev_lock(hdev);
1577
1578 if (!hdev_is_powered(hdev)) {
1579 err = set_connectable_update_settings(hdev, sk, cp->val);
1580 goto failed;
1581 }
1582
1583 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_BUSY);
1587 goto failed;
1588 }
1589
1590 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1591 if (!cmd) {
1592 err = -ENOMEM;
1593 goto failed;
1594 }
1595
1596 if (cp->val) {
1597 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1598 } else {
1599 if (hdev->discov_timeout > 0)
1600 cancel_delayed_work(&hdev->discov_off);
1601
1602 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1603 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1604 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1605 }
1606
1607 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1608 err = 0;
1609
1610 failed:
1611 hci_dev_unlock(hdev);
1612 return err;
1613 }
1614
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1615 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1616 u16 len)
1617 {
1618 struct mgmt_mode *cp = data;
1619 bool changed;
1620 int err;
1621
1622 bt_dev_dbg(hdev, "sock %p", sk);
1623
1624 if (cp->val != 0x00 && cp->val != 0x01)
1625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1626 MGMT_STATUS_INVALID_PARAMS);
1627
1628 hci_dev_lock(hdev);
1629
1630 if (cp->val)
1631 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1632 else
1633 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1634
1635 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1636 if (err < 0)
1637 goto unlock;
1638
1639 if (changed) {
1640 /* In limited privacy mode the change of bondable mode
1641 * may affect the local advertising address.
1642 */
1643 if (hdev_is_powered(hdev) &&
1644 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1645 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1646 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1647 queue_work(hdev->req_workqueue,
1648 &hdev->discoverable_update);
1649
1650 err = new_settings(hdev, sk);
1651 }
1652
1653 unlock:
1654 hci_dev_unlock(hdev);
1655 return err;
1656 }
1657
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1658 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1659 u16 len)
1660 {
1661 struct mgmt_mode *cp = data;
1662 struct mgmt_pending_cmd *cmd;
1663 u8 val, status;
1664 int err;
1665
1666 bt_dev_dbg(hdev, "sock %p", sk);
1667
1668 status = mgmt_bredr_support(hdev);
1669 if (status)
1670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1671 status);
1672
1673 if (cp->val != 0x00 && cp->val != 0x01)
1674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1675 MGMT_STATUS_INVALID_PARAMS);
1676
1677 hci_dev_lock(hdev);
1678
1679 if (!hdev_is_powered(hdev)) {
1680 bool changed = false;
1681
1682 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1683 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1684 changed = true;
1685 }
1686
1687 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1688 if (err < 0)
1689 goto failed;
1690
1691 if (changed)
1692 err = new_settings(hdev, sk);
1693
1694 goto failed;
1695 }
1696
1697 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1699 MGMT_STATUS_BUSY);
1700 goto failed;
1701 }
1702
1703 val = !!cp->val;
1704
1705 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1706 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1707 goto failed;
1708 }
1709
1710 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1711 if (!cmd) {
1712 err = -ENOMEM;
1713 goto failed;
1714 }
1715
1716 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1717 if (err < 0) {
1718 mgmt_pending_remove(cmd);
1719 goto failed;
1720 }
1721
1722 failed:
1723 hci_dev_unlock(hdev);
1724 return err;
1725 }
1726
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1727 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1728 {
1729 struct mgmt_mode *cp = data;
1730 struct mgmt_pending_cmd *cmd;
1731 u8 status;
1732 int err;
1733
1734 bt_dev_dbg(hdev, "sock %p", sk);
1735
1736 status = mgmt_bredr_support(hdev);
1737 if (status)
1738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1739
1740 if (!lmp_ssp_capable(hdev))
1741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1742 MGMT_STATUS_NOT_SUPPORTED);
1743
1744 if (cp->val != 0x00 && cp->val != 0x01)
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1746 MGMT_STATUS_INVALID_PARAMS);
1747
1748 hci_dev_lock(hdev);
1749
1750 if (!hdev_is_powered(hdev)) {
1751 bool changed;
1752
1753 if (cp->val) {
1754 changed = !hci_dev_test_and_set_flag(hdev,
1755 HCI_SSP_ENABLED);
1756 } else {
1757 changed = hci_dev_test_and_clear_flag(hdev,
1758 HCI_SSP_ENABLED);
1759 if (!changed)
1760 changed = hci_dev_test_and_clear_flag(hdev,
1761 HCI_HS_ENABLED);
1762 else
1763 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1764 }
1765
1766 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1767 if (err < 0)
1768 goto failed;
1769
1770 if (changed)
1771 err = new_settings(hdev, sk);
1772
1773 goto failed;
1774 }
1775
1776 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1778 MGMT_STATUS_BUSY);
1779 goto failed;
1780 }
1781
1782 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1783 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1784 goto failed;
1785 }
1786
1787 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1788 if (!cmd) {
1789 err = -ENOMEM;
1790 goto failed;
1791 }
1792
1793 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1794 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1795 sizeof(cp->val), &cp->val);
1796
1797 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1798 if (err < 0) {
1799 mgmt_pending_remove(cmd);
1800 goto failed;
1801 }
1802
1803 failed:
1804 hci_dev_unlock(hdev);
1805 return err;
1806 }
1807
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1808 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1809 {
1810 struct mgmt_mode *cp = data;
1811 bool changed;
1812 u8 status;
1813 int err;
1814
1815 bt_dev_dbg(hdev, "sock %p", sk);
1816
1817 if (!IS_ENABLED(CONFIG_BT_HS))
1818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1819 MGMT_STATUS_NOT_SUPPORTED);
1820
1821 status = mgmt_bredr_support(hdev);
1822 if (status)
1823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1824
1825 if (!lmp_ssp_capable(hdev))
1826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1827 MGMT_STATUS_NOT_SUPPORTED);
1828
1829 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_REJECTED);
1832
1833 if (cp->val != 0x00 && cp->val != 0x01)
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_INVALID_PARAMS);
1836
1837 hci_dev_lock(hdev);
1838
1839 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 MGMT_STATUS_BUSY);
1842 goto unlock;
1843 }
1844
1845 if (cp->val) {
1846 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1847 } else {
1848 if (hdev_is_powered(hdev)) {
1849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 MGMT_STATUS_REJECTED);
1851 goto unlock;
1852 }
1853
1854 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1855 }
1856
1857 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1858 if (err < 0)
1859 goto unlock;
1860
1861 if (changed)
1862 err = new_settings(hdev, sk);
1863
1864 unlock:
1865 hci_dev_unlock(hdev);
1866 return err;
1867 }
1868
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1869 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1870 {
1871 struct cmd_lookup match = { NULL, hdev };
1872
1873 hci_dev_lock(hdev);
1874
1875 if (status) {
1876 u8 mgmt_err = mgmt_status(status);
1877
1878 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
1879 &mgmt_err);
1880 goto unlock;
1881 }
1882
1883 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
1884
1885 new_settings(hdev, match.sk);
1886
1887 if (match.sk)
1888 sock_put(match.sk);
1889
1890 /* Make sure the controller has a good default for
1891 * advertising data. Restrict the update to when LE
1892 * has actually been enabled. During power on, the
1893 * update in powered_update_hci will take care of it.
1894 */
1895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1896 struct hci_request req;
1897 hci_req_init(&req, hdev);
1898 if (ext_adv_capable(hdev)) {
1899 int err;
1900
1901 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1902 if (!err)
1903 __hci_req_update_scan_rsp_data(&req, 0x00);
1904 } else {
1905 __hci_req_update_adv_data(&req, 0x00);
1906 __hci_req_update_scan_rsp_data(&req, 0x00);
1907 }
1908 hci_req_run(&req, NULL);
1909 hci_update_background_scan(hdev);
1910 }
1911
1912 unlock:
1913 hci_dev_unlock(hdev);
1914 }
1915
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1916 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1917 {
1918 struct mgmt_mode *cp = data;
1919 struct hci_cp_write_le_host_supported hci_cp;
1920 struct mgmt_pending_cmd *cmd;
1921 struct hci_request req;
1922 int err;
1923 u8 val, enabled;
1924
1925 bt_dev_dbg(hdev, "sock %p", sk);
1926
1927 if (!lmp_le_capable(hdev))
1928 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1929 MGMT_STATUS_NOT_SUPPORTED);
1930
1931 if (cp->val != 0x00 && cp->val != 0x01)
1932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1933 MGMT_STATUS_INVALID_PARAMS);
1934
1935 /* Bluetooth single mode LE only controllers or dual-mode
1936 * controllers configured as LE only devices, do not allow
1937 * switching LE off. These have either LE enabled explicitly
1938 * or BR/EDR has been previously switched off.
1939 *
1940 * When trying to enable an already enabled LE, then gracefully
1941 * send a positive response. Trying to disable it however will
1942 * result into rejection.
1943 */
1944 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1945 if (cp->val == 0x01)
1946 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1947
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_REJECTED);
1950 }
1951
1952 hci_dev_lock(hdev);
1953
1954 val = !!cp->val;
1955 enabled = lmp_host_le_capable(hdev);
1956
1957 if (!val)
1958 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1959
1960 if (!hdev_is_powered(hdev) || val == enabled) {
1961 bool changed = false;
1962
1963 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1964 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1965 changed = true;
1966 }
1967
1968 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1969 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1970 changed = true;
1971 }
1972
1973 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1974 if (err < 0)
1975 goto unlock;
1976
1977 if (changed)
1978 err = new_settings(hdev, sk);
1979
1980 goto unlock;
1981 }
1982
1983 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1984 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1985 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1986 MGMT_STATUS_BUSY);
1987 goto unlock;
1988 }
1989
1990 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1991 if (!cmd) {
1992 err = -ENOMEM;
1993 goto unlock;
1994 }
1995
1996 hci_req_init(&req, hdev);
1997
1998 memset(&hci_cp, 0, sizeof(hci_cp));
1999
2000 if (val) {
2001 hci_cp.le = val;
2002 hci_cp.simul = 0x00;
2003 } else {
2004 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2005 __hci_req_disable_advertising(&req);
2006
2007 if (ext_adv_capable(hdev))
2008 __hci_req_clear_ext_adv_sets(&req);
2009 }
2010
2011 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2012 &hci_cp);
2013
2014 err = hci_req_run(&req, le_enable_complete);
2015 if (err < 0)
2016 mgmt_pending_remove(cmd);
2017
2018 unlock:
2019 hci_dev_unlock(hdev);
2020 return err;
2021 }
2022
2023 /* This is a helper function to test for pending mgmt commands that can
2024 * cause CoD or EIR HCI commands. We can only allow one such pending
2025 * mgmt command at a time since otherwise we cannot easily track what
2026 * the current values are, will be, and based on that calculate if a new
2027 * HCI command needs to be sent and if yes with what value.
2028 */
pending_eir_or_class(struct hci_dev * hdev)2029 static bool pending_eir_or_class(struct hci_dev *hdev)
2030 {
2031 struct mgmt_pending_cmd *cmd;
2032
2033 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2034 switch (cmd->opcode) {
2035 case MGMT_OP_ADD_UUID:
2036 case MGMT_OP_REMOVE_UUID:
2037 case MGMT_OP_SET_DEV_CLASS:
2038 case MGMT_OP_SET_POWERED:
2039 return true;
2040 }
2041 }
2042
2043 return false;
2044 }
2045
2046 static const u8 bluetooth_base_uuid[] = {
2047 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2048 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2049 };
2050
get_uuid_size(const u8 * uuid)2051 static u8 get_uuid_size(const u8 *uuid)
2052 {
2053 u32 val;
2054
2055 if (memcmp(uuid, bluetooth_base_uuid, 12))
2056 return 128;
2057
2058 val = get_unaligned_le32(&uuid[12]);
2059 if (val > 0xffff)
2060 return 32;
2061
2062 return 16;
2063 }
2064
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2065 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2066 {
2067 struct mgmt_pending_cmd *cmd;
2068
2069 hci_dev_lock(hdev);
2070
2071 cmd = pending_find(mgmt_op, hdev);
2072 if (!cmd)
2073 goto unlock;
2074
2075 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2076 mgmt_status(status), hdev->dev_class, 3);
2077
2078 mgmt_pending_remove(cmd);
2079
2080 unlock:
2081 hci_dev_unlock(hdev);
2082 }
2083
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2084 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2085 {
2086 bt_dev_dbg(hdev, "status 0x%02x", status);
2087
2088 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2089 }
2090
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2091 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2092 {
2093 struct mgmt_cp_add_uuid *cp = data;
2094 struct mgmt_pending_cmd *cmd;
2095 struct hci_request req;
2096 struct bt_uuid *uuid;
2097 int err;
2098
2099 bt_dev_dbg(hdev, "sock %p", sk);
2100
2101 hci_dev_lock(hdev);
2102
2103 if (pending_eir_or_class(hdev)) {
2104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2105 MGMT_STATUS_BUSY);
2106 goto failed;
2107 }
2108
2109 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2110 if (!uuid) {
2111 err = -ENOMEM;
2112 goto failed;
2113 }
2114
2115 memcpy(uuid->uuid, cp->uuid, 16);
2116 uuid->svc_hint = cp->svc_hint;
2117 uuid->size = get_uuid_size(cp->uuid);
2118
2119 list_add_tail(&uuid->list, &hdev->uuids);
2120
2121 hci_req_init(&req, hdev);
2122
2123 __hci_req_update_class(&req);
2124 __hci_req_update_eir(&req);
2125
2126 err = hci_req_run(&req, add_uuid_complete);
2127 if (err < 0) {
2128 if (err != -ENODATA)
2129 goto failed;
2130
2131 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2132 hdev->dev_class, 3);
2133 goto failed;
2134 }
2135
2136 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2137 if (!cmd) {
2138 err = -ENOMEM;
2139 goto failed;
2140 }
2141
2142 err = 0;
2143
2144 failed:
2145 hci_dev_unlock(hdev);
2146 return err;
2147 }
2148
enable_service_cache(struct hci_dev * hdev)2149 static bool enable_service_cache(struct hci_dev *hdev)
2150 {
2151 if (!hdev_is_powered(hdev))
2152 return false;
2153
2154 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2155 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2156 CACHE_TIMEOUT);
2157 return true;
2158 }
2159
2160 return false;
2161 }
2162
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2163 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2164 {
2165 bt_dev_dbg(hdev, "status 0x%02x", status);
2166
2167 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2168 }
2169
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2170 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2171 u16 len)
2172 {
2173 struct mgmt_cp_remove_uuid *cp = data;
2174 struct mgmt_pending_cmd *cmd;
2175 struct bt_uuid *match, *tmp;
2176 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2177 struct hci_request req;
2178 int err, found;
2179
2180 bt_dev_dbg(hdev, "sock %p", sk);
2181
2182 hci_dev_lock(hdev);
2183
2184 if (pending_eir_or_class(hdev)) {
2185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2186 MGMT_STATUS_BUSY);
2187 goto unlock;
2188 }
2189
2190 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2191 hci_uuids_clear(hdev);
2192
2193 if (enable_service_cache(hdev)) {
2194 err = mgmt_cmd_complete(sk, hdev->id,
2195 MGMT_OP_REMOVE_UUID,
2196 0, hdev->dev_class, 3);
2197 goto unlock;
2198 }
2199
2200 goto update_class;
2201 }
2202
2203 found = 0;
2204
2205 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2206 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2207 continue;
2208
2209 list_del(&match->list);
2210 kfree(match);
2211 found++;
2212 }
2213
2214 if (found == 0) {
2215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2216 MGMT_STATUS_INVALID_PARAMS);
2217 goto unlock;
2218 }
2219
2220 update_class:
2221 hci_req_init(&req, hdev);
2222
2223 __hci_req_update_class(&req);
2224 __hci_req_update_eir(&req);
2225
2226 err = hci_req_run(&req, remove_uuid_complete);
2227 if (err < 0) {
2228 if (err != -ENODATA)
2229 goto unlock;
2230
2231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2232 hdev->dev_class, 3);
2233 goto unlock;
2234 }
2235
2236 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2237 if (!cmd) {
2238 err = -ENOMEM;
2239 goto unlock;
2240 }
2241
2242 err = 0;
2243
2244 unlock:
2245 hci_dev_unlock(hdev);
2246 return err;
2247 }
2248
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2249 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2250 {
2251 bt_dev_dbg(hdev, "status 0x%02x", status);
2252
2253 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2254 }
2255
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2256 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2257 u16 len)
2258 {
2259 struct mgmt_cp_set_dev_class *cp = data;
2260 struct mgmt_pending_cmd *cmd;
2261 struct hci_request req;
2262 int err;
2263
2264 bt_dev_dbg(hdev, "sock %p", sk);
2265
2266 if (!lmp_bredr_capable(hdev))
2267 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2268 MGMT_STATUS_NOT_SUPPORTED);
2269
2270 hci_dev_lock(hdev);
2271
2272 if (pending_eir_or_class(hdev)) {
2273 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2274 MGMT_STATUS_BUSY);
2275 goto unlock;
2276 }
2277
2278 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_INVALID_PARAMS);
2281 goto unlock;
2282 }
2283
2284 hdev->major_class = cp->major;
2285 hdev->minor_class = cp->minor;
2286
2287 if (!hdev_is_powered(hdev)) {
2288 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2289 hdev->dev_class, 3);
2290 goto unlock;
2291 }
2292
2293 hci_req_init(&req, hdev);
2294
2295 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2296 hci_dev_unlock(hdev);
2297 cancel_delayed_work_sync(&hdev->service_cache);
2298 hci_dev_lock(hdev);
2299 __hci_req_update_eir(&req);
2300 }
2301
2302 __hci_req_update_class(&req);
2303
2304 err = hci_req_run(&req, set_class_complete);
2305 if (err < 0) {
2306 if (err != -ENODATA)
2307 goto unlock;
2308
2309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2310 hdev->dev_class, 3);
2311 goto unlock;
2312 }
2313
2314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2315 if (!cmd) {
2316 err = -ENOMEM;
2317 goto unlock;
2318 }
2319
2320 err = 0;
2321
2322 unlock:
2323 hci_dev_unlock(hdev);
2324 return err;
2325 }
2326
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2327 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2328 u16 len)
2329 {
2330 struct mgmt_cp_load_link_keys *cp = data;
2331 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2332 sizeof(struct mgmt_link_key_info));
2333 u16 key_count, expected_len;
2334 bool changed;
2335 int i;
2336
2337 bt_dev_dbg(hdev, "sock %p", sk);
2338
2339 if (!lmp_bredr_capable(hdev))
2340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2341 MGMT_STATUS_NOT_SUPPORTED);
2342
2343 key_count = __le16_to_cpu(cp->key_count);
2344 if (key_count > max_key_count) {
2345 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2346 key_count);
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_INVALID_PARAMS);
2349 }
2350
2351 expected_len = struct_size(cp, keys, key_count);
2352 if (expected_len != len) {
2353 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2354 expected_len, len);
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 MGMT_STATUS_INVALID_PARAMS);
2357 }
2358
2359 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2361 MGMT_STATUS_INVALID_PARAMS);
2362
2363 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2364 key_count);
2365
2366 for (i = 0; i < key_count; i++) {
2367 struct mgmt_link_key_info *key = &cp->keys[i];
2368
2369 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2370 if (key->type > 0x08)
2371 return mgmt_cmd_status(sk, hdev->id,
2372 MGMT_OP_LOAD_LINK_KEYS,
2373 MGMT_STATUS_INVALID_PARAMS);
2374 }
2375
2376 hci_dev_lock(hdev);
2377
2378 hci_link_keys_clear(hdev);
2379
2380 if (cp->debug_keys)
2381 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2382 else
2383 changed = hci_dev_test_and_clear_flag(hdev,
2384 HCI_KEEP_DEBUG_KEYS);
2385
2386 if (changed)
2387 new_settings(hdev, NULL);
2388
2389 for (i = 0; i < key_count; i++) {
2390 struct mgmt_link_key_info *key = &cp->keys[i];
2391
2392 if (hci_is_blocked_key(hdev,
2393 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2394 key->val)) {
2395 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2396 &key->addr.bdaddr);
2397 continue;
2398 }
2399
2400 /* Always ignore debug keys and require a new pairing if
2401 * the user wants to use them.
2402 */
2403 if (key->type == HCI_LK_DEBUG_COMBINATION)
2404 continue;
2405
2406 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2407 key->type, key->pin_len, NULL);
2408 }
2409
2410 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2411
2412 hci_dev_unlock(hdev);
2413
2414 return 0;
2415 }
2416
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2417 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2418 u8 addr_type, struct sock *skip_sk)
2419 {
2420 struct mgmt_ev_device_unpaired ev;
2421
2422 bacpy(&ev.addr.bdaddr, bdaddr);
2423 ev.addr.type = addr_type;
2424
2425 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2426 skip_sk);
2427 }
2428
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2429 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2430 u16 len)
2431 {
2432 struct mgmt_cp_unpair_device *cp = data;
2433 struct mgmt_rp_unpair_device rp;
2434 struct hci_conn_params *params;
2435 struct mgmt_pending_cmd *cmd;
2436 struct hci_conn *conn;
2437 u8 addr_type;
2438 int err;
2439
2440 memset(&rp, 0, sizeof(rp));
2441 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2442 rp.addr.type = cp->addr.type;
2443
2444 if (!bdaddr_type_is_valid(cp->addr.type))
2445 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2446 MGMT_STATUS_INVALID_PARAMS,
2447 &rp, sizeof(rp));
2448
2449 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2450 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2451 MGMT_STATUS_INVALID_PARAMS,
2452 &rp, sizeof(rp));
2453
2454 hci_dev_lock(hdev);
2455
2456 if (!hdev_is_powered(hdev)) {
2457 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_NOT_POWERED, &rp,
2459 sizeof(rp));
2460 goto unlock;
2461 }
2462
2463 if (cp->addr.type == BDADDR_BREDR) {
2464 /* If disconnection is requested, then look up the
2465 * connection. If the remote device is connected, it
2466 * will be later used to terminate the link.
2467 *
2468 * Setting it to NULL explicitly will cause no
2469 * termination of the link.
2470 */
2471 if (cp->disconnect)
2472 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2473 &cp->addr.bdaddr);
2474 else
2475 conn = NULL;
2476
2477 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2478 if (err < 0) {
2479 err = mgmt_cmd_complete(sk, hdev->id,
2480 MGMT_OP_UNPAIR_DEVICE,
2481 MGMT_STATUS_NOT_PAIRED, &rp,
2482 sizeof(rp));
2483 goto unlock;
2484 }
2485
2486 goto done;
2487 }
2488
2489 /* LE address type */
2490 addr_type = le_addr_type(cp->addr.type);
2491
2492 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2493 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2494 if (err < 0) {
2495 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2497 sizeof(rp));
2498 goto unlock;
2499 }
2500
2501 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2502 if (!conn) {
2503 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2504 goto done;
2505 }
2506
2507
2508 /* Defer clearing up the connection parameters until closing to
2509 * give a chance of keeping them if a repairing happens.
2510 */
2511 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2512
2513 /* Disable auto-connection parameters if present */
2514 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2515 if (params) {
2516 if (params->explicit_connect)
2517 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2518 else
2519 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2520 }
2521
2522 /* If disconnection is not requested, then clear the connection
2523 * variable so that the link is not terminated.
2524 */
2525 if (!cp->disconnect)
2526 conn = NULL;
2527
2528 done:
2529 /* If the connection variable is set, then termination of the
2530 * link is requested.
2531 */
2532 if (!conn) {
2533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2534 &rp, sizeof(rp));
2535 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2536 goto unlock;
2537 }
2538
2539 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2540 sizeof(*cp));
2541 if (!cmd) {
2542 err = -ENOMEM;
2543 goto unlock;
2544 }
2545
2546 cmd->cmd_complete = addr_cmd_complete;
2547
2548 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2549 if (err < 0)
2550 mgmt_pending_remove(cmd);
2551
2552 unlock:
2553 hci_dev_unlock(hdev);
2554 return err;
2555 }
2556
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2557 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2558 u16 len)
2559 {
2560 struct mgmt_cp_disconnect *cp = data;
2561 struct mgmt_rp_disconnect rp;
2562 struct mgmt_pending_cmd *cmd;
2563 struct hci_conn *conn;
2564 int err;
2565
2566 bt_dev_dbg(hdev, "sock %p", sk);
2567
2568 memset(&rp, 0, sizeof(rp));
2569 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2570 rp.addr.type = cp->addr.type;
2571
2572 if (!bdaddr_type_is_valid(cp->addr.type))
2573 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2574 MGMT_STATUS_INVALID_PARAMS,
2575 &rp, sizeof(rp));
2576
2577 hci_dev_lock(hdev);
2578
2579 if (!test_bit(HCI_UP, &hdev->flags)) {
2580 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2581 MGMT_STATUS_NOT_POWERED, &rp,
2582 sizeof(rp));
2583 goto failed;
2584 }
2585
2586 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2589 goto failed;
2590 }
2591
2592 if (cp->addr.type == BDADDR_BREDR)
2593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2594 &cp->addr.bdaddr);
2595 else
2596 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2597 le_addr_type(cp->addr.type));
2598
2599 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2601 MGMT_STATUS_NOT_CONNECTED, &rp,
2602 sizeof(rp));
2603 goto failed;
2604 }
2605
2606 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2607 if (!cmd) {
2608 err = -ENOMEM;
2609 goto failed;
2610 }
2611
2612 cmd->cmd_complete = generic_cmd_complete;
2613
2614 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2615 if (err < 0)
2616 mgmt_pending_remove(cmd);
2617
2618 failed:
2619 hci_dev_unlock(hdev);
2620 return err;
2621 }
2622
link_to_bdaddr(u8 link_type,u8 addr_type)2623 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2624 {
2625 switch (link_type) {
2626 case LE_LINK:
2627 switch (addr_type) {
2628 case ADDR_LE_DEV_PUBLIC:
2629 return BDADDR_LE_PUBLIC;
2630
2631 default:
2632 /* Fallback to LE Random address type */
2633 return BDADDR_LE_RANDOM;
2634 }
2635
2636 default:
2637 /* Fallback to BR/EDR type */
2638 return BDADDR_BREDR;
2639 }
2640 }
2641
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2642 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2643 u16 data_len)
2644 {
2645 struct mgmt_rp_get_connections *rp;
2646 struct hci_conn *c;
2647 int err;
2648 u16 i;
2649
2650 bt_dev_dbg(hdev, "sock %p", sk);
2651
2652 hci_dev_lock(hdev);
2653
2654 if (!hdev_is_powered(hdev)) {
2655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2656 MGMT_STATUS_NOT_POWERED);
2657 goto unlock;
2658 }
2659
2660 i = 0;
2661 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2662 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2663 i++;
2664 }
2665
2666 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2667 if (!rp) {
2668 err = -ENOMEM;
2669 goto unlock;
2670 }
2671
2672 i = 0;
2673 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2674 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2675 continue;
2676 bacpy(&rp->addr[i].bdaddr, &c->dst);
2677 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2678 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2679 continue;
2680 i++;
2681 }
2682
2683 rp->conn_count = cpu_to_le16(i);
2684
2685 /* Recalculate length in case of filtered SCO connections, etc */
2686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2687 struct_size(rp, addr, i));
2688
2689 kfree(rp);
2690
2691 unlock:
2692 hci_dev_unlock(hdev);
2693 return err;
2694 }
2695
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2696 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2697 struct mgmt_cp_pin_code_neg_reply *cp)
2698 {
2699 struct mgmt_pending_cmd *cmd;
2700 int err;
2701
2702 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2703 sizeof(*cp));
2704 if (!cmd)
2705 return -ENOMEM;
2706
2707 cmd->cmd_complete = addr_cmd_complete;
2708
2709 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2710 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2711 if (err < 0)
2712 mgmt_pending_remove(cmd);
2713
2714 return err;
2715 }
2716
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2717 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2718 u16 len)
2719 {
2720 struct hci_conn *conn;
2721 struct mgmt_cp_pin_code_reply *cp = data;
2722 struct hci_cp_pin_code_reply reply;
2723 struct mgmt_pending_cmd *cmd;
2724 int err;
2725
2726 bt_dev_dbg(hdev, "sock %p", sk);
2727
2728 hci_dev_lock(hdev);
2729
2730 if (!hdev_is_powered(hdev)) {
2731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2732 MGMT_STATUS_NOT_POWERED);
2733 goto failed;
2734 }
2735
2736 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2737 if (!conn) {
2738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2739 MGMT_STATUS_NOT_CONNECTED);
2740 goto failed;
2741 }
2742
2743 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2744 struct mgmt_cp_pin_code_neg_reply ncp;
2745
2746 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2747
2748 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2749
2750 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2751 if (err >= 0)
2752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_INVALID_PARAMS);
2754
2755 goto failed;
2756 }
2757
2758 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2759 if (!cmd) {
2760 err = -ENOMEM;
2761 goto failed;
2762 }
2763
2764 cmd->cmd_complete = addr_cmd_complete;
2765
2766 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2767 reply.pin_len = cp->pin_len;
2768 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2769
2770 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2771 if (err < 0)
2772 mgmt_pending_remove(cmd);
2773
2774 failed:
2775 hci_dev_unlock(hdev);
2776 return err;
2777 }
2778
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2779 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2780 u16 len)
2781 {
2782 struct mgmt_cp_set_io_capability *cp = data;
2783
2784 bt_dev_dbg(hdev, "sock %p", sk);
2785
2786 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2788 MGMT_STATUS_INVALID_PARAMS);
2789
2790 hci_dev_lock(hdev);
2791
2792 hdev->io_capability = cp->io_capability;
2793
2794 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2795
2796 hci_dev_unlock(hdev);
2797
2798 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2799 NULL, 0);
2800 }
2801
find_pairing(struct hci_conn * conn)2802 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2803 {
2804 struct hci_dev *hdev = conn->hdev;
2805 struct mgmt_pending_cmd *cmd;
2806
2807 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2808 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2809 continue;
2810
2811 if (cmd->user_data != conn)
2812 continue;
2813
2814 return cmd;
2815 }
2816
2817 return NULL;
2818 }
2819
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2820 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2821 {
2822 struct mgmt_rp_pair_device rp;
2823 struct hci_conn *conn = cmd->user_data;
2824 int err;
2825
2826 bacpy(&rp.addr.bdaddr, &conn->dst);
2827 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2828
2829 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
2830 status, &rp, sizeof(rp));
2831
2832 /* So we don't get further callbacks for this connection */
2833 conn->connect_cfm_cb = NULL;
2834 conn->security_cfm_cb = NULL;
2835 conn->disconn_cfm_cb = NULL;
2836
2837 hci_conn_drop(conn);
2838
2839 /* The device is paired so there is no need to remove
2840 * its connection parameters anymore.
2841 */
2842 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2843
2844 hci_conn_put(conn);
2845
2846 return err;
2847 }
2848
mgmt_smp_complete(struct hci_conn * conn,bool complete)2849 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2850 {
2851 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2852 struct mgmt_pending_cmd *cmd;
2853
2854 cmd = find_pairing(conn);
2855 if (cmd) {
2856 cmd->cmd_complete(cmd, status);
2857 mgmt_pending_remove(cmd);
2858 }
2859 }
2860
pairing_complete_cb(struct hci_conn * conn,u8 status)2861 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2862 {
2863 struct mgmt_pending_cmd *cmd;
2864
2865 BT_DBG("status %u", status);
2866
2867 cmd = find_pairing(conn);
2868 if (!cmd) {
2869 BT_DBG("Unable to find a pending command");
2870 return;
2871 }
2872
2873 cmd->cmd_complete(cmd, mgmt_status(status));
2874 mgmt_pending_remove(cmd);
2875 }
2876
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2877 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 {
2879 struct mgmt_pending_cmd *cmd;
2880
2881 BT_DBG("status %u", status);
2882
2883 if (!status)
2884 return;
2885
2886 cmd = find_pairing(conn);
2887 if (!cmd) {
2888 BT_DBG("Unable to find a pending command");
2889 return;
2890 }
2891
2892 cmd->cmd_complete(cmd, mgmt_status(status));
2893 mgmt_pending_remove(cmd);
2894 }
2895
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2896 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2897 u16 len)
2898 {
2899 struct mgmt_cp_pair_device *cp = data;
2900 struct mgmt_rp_pair_device rp;
2901 struct mgmt_pending_cmd *cmd;
2902 u8 sec_level, auth_type;
2903 struct hci_conn *conn;
2904 int err;
2905
2906 bt_dev_dbg(hdev, "sock %p", sk);
2907
2908 memset(&rp, 0, sizeof(rp));
2909 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2910 rp.addr.type = cp->addr.type;
2911
2912 if (!bdaddr_type_is_valid(cp->addr.type))
2913 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2914 MGMT_STATUS_INVALID_PARAMS,
2915 &rp, sizeof(rp));
2916
2917 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2920 &rp, sizeof(rp));
2921
2922 hci_dev_lock(hdev);
2923
2924 if (!hdev_is_powered(hdev)) {
2925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_NOT_POWERED, &rp,
2927 sizeof(rp));
2928 goto unlock;
2929 }
2930
2931 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_ALREADY_PAIRED, &rp,
2934 sizeof(rp));
2935 goto unlock;
2936 }
2937
2938 sec_level = BT_SECURITY_MEDIUM;
2939 auth_type = HCI_AT_DEDICATED_BONDING;
2940
2941 if (cp->addr.type == BDADDR_BREDR) {
2942 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2943 auth_type, CONN_REASON_PAIR_DEVICE);
2944 } else {
2945 u8 addr_type = le_addr_type(cp->addr.type);
2946 struct hci_conn_params *p;
2947
2948 /* When pairing a new device, it is expected to remember
2949 * this device for future connections. Adding the connection
2950 * parameter information ahead of time allows tracking
2951 * of the slave preferred values and will speed up any
2952 * further connection establishment.
2953 *
2954 * If connection parameters already exist, then they
2955 * will be kept and this function does nothing.
2956 */
2957 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2958 if (!p) {
2959 err = -EIO;
2960 goto unlock;
2961 }
2962
2963 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2964 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2965
2966 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2967 sec_level, HCI_LE_CONN_TIMEOUT,
2968 CONN_REASON_PAIR_DEVICE);
2969 }
2970
2971 if (IS_ERR(conn)) {
2972 int status;
2973
2974 if (PTR_ERR(conn) == -EBUSY)
2975 status = MGMT_STATUS_BUSY;
2976 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2977 status = MGMT_STATUS_NOT_SUPPORTED;
2978 else if (PTR_ERR(conn) == -ECONNREFUSED)
2979 status = MGMT_STATUS_REJECTED;
2980 else
2981 status = MGMT_STATUS_CONNECT_FAILED;
2982
2983 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2984 status, &rp, sizeof(rp));
2985 goto unlock;
2986 }
2987
2988 if (conn->connect_cfm_cb) {
2989 hci_conn_drop(conn);
2990 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2991 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2992 goto unlock;
2993 }
2994
2995 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2996 if (!cmd) {
2997 err = -ENOMEM;
2998 hci_conn_drop(conn);
2999 goto unlock;
3000 }
3001
3002 cmd->cmd_complete = pairing_complete;
3003
3004 /* For LE, just connecting isn't a proof that the pairing finished */
3005 if (cp->addr.type == BDADDR_BREDR) {
3006 conn->connect_cfm_cb = pairing_complete_cb;
3007 conn->security_cfm_cb = pairing_complete_cb;
3008 conn->disconn_cfm_cb = pairing_complete_cb;
3009 } else {
3010 conn->connect_cfm_cb = le_pairing_complete_cb;
3011 conn->security_cfm_cb = le_pairing_complete_cb;
3012 conn->disconn_cfm_cb = le_pairing_complete_cb;
3013 }
3014
3015 conn->io_capability = cp->io_cap;
3016 cmd->user_data = hci_conn_get(conn);
3017
3018 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3019 hci_conn_security(conn, sec_level, auth_type, true)) {
3020 cmd->cmd_complete(cmd, 0);
3021 mgmt_pending_remove(cmd);
3022 }
3023
3024 err = 0;
3025
3026 unlock:
3027 hci_dev_unlock(hdev);
3028 return err;
3029 }
3030
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3031 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3032 u16 len)
3033 {
3034 struct mgmt_addr_info *addr = data;
3035 struct mgmt_pending_cmd *cmd;
3036 struct hci_conn *conn;
3037 int err;
3038
3039 bt_dev_dbg(hdev, "sock %p", sk);
3040
3041 hci_dev_lock(hdev);
3042
3043 if (!hdev_is_powered(hdev)) {
3044 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3045 MGMT_STATUS_NOT_POWERED);
3046 goto unlock;
3047 }
3048
3049 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3050 if (!cmd) {
3051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3052 MGMT_STATUS_INVALID_PARAMS);
3053 goto unlock;
3054 }
3055
3056 conn = cmd->user_data;
3057
3058 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3060 MGMT_STATUS_INVALID_PARAMS);
3061 goto unlock;
3062 }
3063
3064 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3065 mgmt_pending_remove(cmd);
3066
3067 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3068 addr, sizeof(*addr));
3069
3070 /* Since user doesn't want to proceed with the connection, abort any
3071 * ongoing pairing and then terminate the link if it was created
3072 * because of the pair device action.
3073 */
3074 if (addr->type == BDADDR_BREDR)
3075 hci_remove_link_key(hdev, &addr->bdaddr);
3076 else
3077 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3078 le_addr_type(addr->type));
3079
3080 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3081 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3082
3083 unlock:
3084 hci_dev_unlock(hdev);
3085 return err;
3086 }
3087
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3088 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3089 struct mgmt_addr_info *addr, u16 mgmt_op,
3090 u16 hci_op, __le32 passkey)
3091 {
3092 struct mgmt_pending_cmd *cmd;
3093 struct hci_conn *conn;
3094 int err;
3095
3096 hci_dev_lock(hdev);
3097
3098 if (!hdev_is_powered(hdev)) {
3099 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3100 MGMT_STATUS_NOT_POWERED, addr,
3101 sizeof(*addr));
3102 goto done;
3103 }
3104
3105 if (addr->type == BDADDR_BREDR)
3106 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3107 else
3108 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3109 le_addr_type(addr->type));
3110
3111 if (!conn) {
3112 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3113 MGMT_STATUS_NOT_CONNECTED, addr,
3114 sizeof(*addr));
3115 goto done;
3116 }
3117
3118 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3119 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3120 if (!err)
3121 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3122 MGMT_STATUS_SUCCESS, addr,
3123 sizeof(*addr));
3124 else
3125 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3126 MGMT_STATUS_FAILED, addr,
3127 sizeof(*addr));
3128
3129 goto done;
3130 }
3131
3132 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3133 if (!cmd) {
3134 err = -ENOMEM;
3135 goto done;
3136 }
3137
3138 cmd->cmd_complete = addr_cmd_complete;
3139
3140 /* Continue with pairing via HCI */
3141 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3142 struct hci_cp_user_passkey_reply cp;
3143
3144 bacpy(&cp.bdaddr, &addr->bdaddr);
3145 cp.passkey = passkey;
3146 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3147 } else
3148 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3149 &addr->bdaddr);
3150
3151 if (err < 0)
3152 mgmt_pending_remove(cmd);
3153
3154 done:
3155 hci_dev_unlock(hdev);
3156 return err;
3157 }
3158
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3159 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3160 void *data, u16 len)
3161 {
3162 struct mgmt_cp_pin_code_neg_reply *cp = data;
3163
3164 bt_dev_dbg(hdev, "sock %p", sk);
3165
3166 return user_pairing_resp(sk, hdev, &cp->addr,
3167 MGMT_OP_PIN_CODE_NEG_REPLY,
3168 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3169 }
3170
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3171 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3172 u16 len)
3173 {
3174 struct mgmt_cp_user_confirm_reply *cp = data;
3175
3176 bt_dev_dbg(hdev, "sock %p", sk);
3177
3178 if (len != sizeof(*cp))
3179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3180 MGMT_STATUS_INVALID_PARAMS);
3181
3182 return user_pairing_resp(sk, hdev, &cp->addr,
3183 MGMT_OP_USER_CONFIRM_REPLY,
3184 HCI_OP_USER_CONFIRM_REPLY, 0);
3185 }
3186
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3187 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3188 void *data, u16 len)
3189 {
3190 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3191
3192 bt_dev_dbg(hdev, "sock %p", sk);
3193
3194 return user_pairing_resp(sk, hdev, &cp->addr,
3195 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3196 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3197 }
3198
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3199 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3200 u16 len)
3201 {
3202 struct mgmt_cp_user_passkey_reply *cp = data;
3203
3204 bt_dev_dbg(hdev, "sock %p", sk);
3205
3206 return user_pairing_resp(sk, hdev, &cp->addr,
3207 MGMT_OP_USER_PASSKEY_REPLY,
3208 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3209 }
3210
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3211 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3212 void *data, u16 len)
3213 {
3214 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3215
3216 bt_dev_dbg(hdev, "sock %p", sk);
3217
3218 return user_pairing_resp(sk, hdev, &cp->addr,
3219 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3220 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3221 }
3222
adv_expire(struct hci_dev * hdev,u32 flags)3223 static void adv_expire(struct hci_dev *hdev, u32 flags)
3224 {
3225 struct adv_info *adv_instance;
3226 struct hci_request req;
3227 int err;
3228
3229 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3230 if (!adv_instance)
3231 return;
3232
3233 /* stop if current instance doesn't need to be changed */
3234 if (!(adv_instance->flags & flags))
3235 return;
3236
3237 cancel_adv_timeout(hdev);
3238
3239 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3240 if (!adv_instance)
3241 return;
3242
3243 hci_req_init(&req, hdev);
3244 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3245 true);
3246 if (err)
3247 return;
3248
3249 hci_req_run(&req, NULL);
3250 }
3251
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3252 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3253 {
3254 struct mgmt_cp_set_local_name *cp;
3255 struct mgmt_pending_cmd *cmd;
3256
3257 bt_dev_dbg(hdev, "status 0x%02x", status);
3258
3259 hci_dev_lock(hdev);
3260
3261 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3262 if (!cmd)
3263 goto unlock;
3264
3265 cp = cmd->param;
3266
3267 if (status) {
3268 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3269 mgmt_status(status));
3270 } else {
3271 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3272 cp, sizeof(*cp));
3273
3274 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3275 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3276 }
3277
3278 mgmt_pending_remove(cmd);
3279
3280 unlock:
3281 hci_dev_unlock(hdev);
3282 }
3283
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3284 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3285 u16 len)
3286 {
3287 struct mgmt_cp_set_local_name *cp = data;
3288 struct mgmt_pending_cmd *cmd;
3289 struct hci_request req;
3290 int err;
3291
3292 bt_dev_dbg(hdev, "sock %p", sk);
3293
3294 hci_dev_lock(hdev);
3295
3296 /* If the old values are the same as the new ones just return a
3297 * direct command complete event.
3298 */
3299 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3300 !memcmp(hdev->short_name, cp->short_name,
3301 sizeof(hdev->short_name))) {
3302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3303 data, len);
3304 goto failed;
3305 }
3306
3307 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3308
3309 if (!hdev_is_powered(hdev)) {
3310 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3311
3312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3313 data, len);
3314 if (err < 0)
3315 goto failed;
3316
3317 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3318 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3319 ext_info_changed(hdev, sk);
3320
3321 goto failed;
3322 }
3323
3324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3325 if (!cmd) {
3326 err = -ENOMEM;
3327 goto failed;
3328 }
3329
3330 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3331
3332 hci_req_init(&req, hdev);
3333
3334 if (lmp_bredr_capable(hdev)) {
3335 __hci_req_update_name(&req);
3336 __hci_req_update_eir(&req);
3337 }
3338
3339 /* The name is stored in the scan response data and so
3340 * no need to udpate the advertising data here.
3341 */
3342 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3343 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3344
3345 err = hci_req_run(&req, set_name_complete);
3346 if (err < 0)
3347 mgmt_pending_remove(cmd);
3348
3349 failed:
3350 hci_dev_unlock(hdev);
3351 return err;
3352 }
3353
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3354 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3355 u16 len)
3356 {
3357 struct mgmt_cp_set_appearance *cp = data;
3358 u16 appearance;
3359 int err;
3360
3361 bt_dev_dbg(hdev, "sock %p", sk);
3362
3363 if (!lmp_le_capable(hdev))
3364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3365 MGMT_STATUS_NOT_SUPPORTED);
3366
3367 appearance = le16_to_cpu(cp->appearance);
3368
3369 hci_dev_lock(hdev);
3370
3371 if (hdev->appearance != appearance) {
3372 hdev->appearance = appearance;
3373
3374 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3375 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3376
3377 ext_info_changed(hdev, sk);
3378 }
3379
3380 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3381 0);
3382
3383 hci_dev_unlock(hdev);
3384
3385 return err;
3386 }
3387
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3388 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3389 void *data, u16 len)
3390 {
3391 struct mgmt_rp_get_phy_confguration rp;
3392
3393 bt_dev_dbg(hdev, "sock %p", sk);
3394
3395 hci_dev_lock(hdev);
3396
3397 memset(&rp, 0, sizeof(rp));
3398
3399 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3400 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3401 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3402
3403 hci_dev_unlock(hdev);
3404
3405 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3406 &rp, sizeof(rp));
3407 }
3408
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3409 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3410 {
3411 struct mgmt_ev_phy_configuration_changed ev;
3412
3413 memset(&ev, 0, sizeof(ev));
3414
3415 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3416
3417 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3418 sizeof(ev), skip);
3419 }
3420
set_default_phy_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3421 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3422 u16 opcode, struct sk_buff *skb)
3423 {
3424 struct mgmt_pending_cmd *cmd;
3425
3426 bt_dev_dbg(hdev, "status 0x%02x", status);
3427
3428 hci_dev_lock(hdev);
3429
3430 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3431 if (!cmd)
3432 goto unlock;
3433
3434 if (status) {
3435 mgmt_cmd_status(cmd->sk, hdev->id,
3436 MGMT_OP_SET_PHY_CONFIGURATION,
3437 mgmt_status(status));
3438 } else {
3439 mgmt_cmd_complete(cmd->sk, hdev->id,
3440 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3441 NULL, 0);
3442
3443 mgmt_phy_configuration_changed(hdev, cmd->sk);
3444 }
3445
3446 mgmt_pending_remove(cmd);
3447
3448 unlock:
3449 hci_dev_unlock(hdev);
3450 }
3451
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3452 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3453 void *data, u16 len)
3454 {
3455 struct mgmt_cp_set_phy_confguration *cp = data;
3456 struct hci_cp_le_set_default_phy cp_phy;
3457 struct mgmt_pending_cmd *cmd;
3458 struct hci_request req;
3459 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3460 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3461 bool changed = false;
3462 int err;
3463
3464 bt_dev_dbg(hdev, "sock %p", sk);
3465
3466 configurable_phys = get_configurable_phys(hdev);
3467 supported_phys = get_supported_phys(hdev);
3468 selected_phys = __le32_to_cpu(cp->selected_phys);
3469
3470 if (selected_phys & ~supported_phys)
3471 return mgmt_cmd_status(sk, hdev->id,
3472 MGMT_OP_SET_PHY_CONFIGURATION,
3473 MGMT_STATUS_INVALID_PARAMS);
3474
3475 unconfigure_phys = supported_phys & ~configurable_phys;
3476
3477 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3478 return mgmt_cmd_status(sk, hdev->id,
3479 MGMT_OP_SET_PHY_CONFIGURATION,
3480 MGMT_STATUS_INVALID_PARAMS);
3481
3482 if (selected_phys == get_selected_phys(hdev))
3483 return mgmt_cmd_complete(sk, hdev->id,
3484 MGMT_OP_SET_PHY_CONFIGURATION,
3485 0, NULL, 0);
3486
3487 hci_dev_lock(hdev);
3488
3489 if (!hdev_is_powered(hdev)) {
3490 err = mgmt_cmd_status(sk, hdev->id,
3491 MGMT_OP_SET_PHY_CONFIGURATION,
3492 MGMT_STATUS_REJECTED);
3493 goto unlock;
3494 }
3495
3496 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3497 err = mgmt_cmd_status(sk, hdev->id,
3498 MGMT_OP_SET_PHY_CONFIGURATION,
3499 MGMT_STATUS_BUSY);
3500 goto unlock;
3501 }
3502
3503 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3504 pkt_type |= (HCI_DH3 | HCI_DM3);
3505 else
3506 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3507
3508 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3509 pkt_type |= (HCI_DH5 | HCI_DM5);
3510 else
3511 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3512
3513 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3514 pkt_type &= ~HCI_2DH1;
3515 else
3516 pkt_type |= HCI_2DH1;
3517
3518 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3519 pkt_type &= ~HCI_2DH3;
3520 else
3521 pkt_type |= HCI_2DH3;
3522
3523 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3524 pkt_type &= ~HCI_2DH5;
3525 else
3526 pkt_type |= HCI_2DH5;
3527
3528 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3529 pkt_type &= ~HCI_3DH1;
3530 else
3531 pkt_type |= HCI_3DH1;
3532
3533 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3534 pkt_type &= ~HCI_3DH3;
3535 else
3536 pkt_type |= HCI_3DH3;
3537
3538 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3539 pkt_type &= ~HCI_3DH5;
3540 else
3541 pkt_type |= HCI_3DH5;
3542
3543 if (pkt_type != hdev->pkt_type) {
3544 hdev->pkt_type = pkt_type;
3545 changed = true;
3546 }
3547
3548 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3549 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3550 if (changed)
3551 mgmt_phy_configuration_changed(hdev, sk);
3552
3553 err = mgmt_cmd_complete(sk, hdev->id,
3554 MGMT_OP_SET_PHY_CONFIGURATION,
3555 0, NULL, 0);
3556
3557 goto unlock;
3558 }
3559
3560 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3561 len);
3562 if (!cmd) {
3563 err = -ENOMEM;
3564 goto unlock;
3565 }
3566
3567 hci_req_init(&req, hdev);
3568
3569 memset(&cp_phy, 0, sizeof(cp_phy));
3570
3571 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3572 cp_phy.all_phys |= 0x01;
3573
3574 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3575 cp_phy.all_phys |= 0x02;
3576
3577 if (selected_phys & MGMT_PHY_LE_1M_TX)
3578 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3579
3580 if (selected_phys & MGMT_PHY_LE_2M_TX)
3581 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3582
3583 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3584 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3585
3586 if (selected_phys & MGMT_PHY_LE_1M_RX)
3587 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3588
3589 if (selected_phys & MGMT_PHY_LE_2M_RX)
3590 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3591
3592 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3593 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3594
3595 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3596
3597 err = hci_req_run_skb(&req, set_default_phy_complete);
3598 if (err < 0)
3599 mgmt_pending_remove(cmd);
3600
3601 unlock:
3602 hci_dev_unlock(hdev);
3603
3604 return err;
3605 }
3606
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3607 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3608 u16 len)
3609 {
3610 int err = MGMT_STATUS_SUCCESS;
3611 struct mgmt_cp_set_blocked_keys *keys = data;
3612 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3613 sizeof(struct mgmt_blocked_key_info));
3614 u16 key_count, expected_len;
3615 int i;
3616
3617 bt_dev_dbg(hdev, "sock %p", sk);
3618
3619 key_count = __le16_to_cpu(keys->key_count);
3620 if (key_count > max_key_count) {
3621 bt_dev_err(hdev, "too big key_count value %u", key_count);
3622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3623 MGMT_STATUS_INVALID_PARAMS);
3624 }
3625
3626 expected_len = struct_size(keys, keys, key_count);
3627 if (expected_len != len) {
3628 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3629 expected_len, len);
3630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3631 MGMT_STATUS_INVALID_PARAMS);
3632 }
3633
3634 hci_dev_lock(hdev);
3635
3636 hci_blocked_keys_clear(hdev);
3637
3638 for (i = 0; i < keys->key_count; ++i) {
3639 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3640
3641 if (!b) {
3642 err = MGMT_STATUS_NO_RESOURCES;
3643 break;
3644 }
3645
3646 b->type = keys->keys[i].type;
3647 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3648 list_add_rcu(&b->list, &hdev->blocked_keys);
3649 }
3650 hci_dev_unlock(hdev);
3651
3652 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3653 err, NULL, 0);
3654 }
3655
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3656 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3657 void *data, u16 len)
3658 {
3659 struct mgmt_mode *cp = data;
3660 int err;
3661 bool changed = false;
3662
3663 bt_dev_dbg(hdev, "sock %p", sk);
3664
3665 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3666 return mgmt_cmd_status(sk, hdev->id,
3667 MGMT_OP_SET_WIDEBAND_SPEECH,
3668 MGMT_STATUS_NOT_SUPPORTED);
3669
3670 if (cp->val != 0x00 && cp->val != 0x01)
3671 return mgmt_cmd_status(sk, hdev->id,
3672 MGMT_OP_SET_WIDEBAND_SPEECH,
3673 MGMT_STATUS_INVALID_PARAMS);
3674
3675 hci_dev_lock(hdev);
3676
3677 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3678 err = mgmt_cmd_status(sk, hdev->id,
3679 MGMT_OP_SET_WIDEBAND_SPEECH,
3680 MGMT_STATUS_BUSY);
3681 goto unlock;
3682 }
3683
3684 if (hdev_is_powered(hdev) &&
3685 !!cp->val != hci_dev_test_flag(hdev,
3686 HCI_WIDEBAND_SPEECH_ENABLED)) {
3687 err = mgmt_cmd_status(sk, hdev->id,
3688 MGMT_OP_SET_WIDEBAND_SPEECH,
3689 MGMT_STATUS_REJECTED);
3690 goto unlock;
3691 }
3692
3693 if (cp->val)
3694 changed = !hci_dev_test_and_set_flag(hdev,
3695 HCI_WIDEBAND_SPEECH_ENABLED);
3696 else
3697 changed = hci_dev_test_and_clear_flag(hdev,
3698 HCI_WIDEBAND_SPEECH_ENABLED);
3699
3700 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3701 if (err < 0)
3702 goto unlock;
3703
3704 if (changed)
3705 err = new_settings(hdev, sk);
3706
3707 unlock:
3708 hci_dev_unlock(hdev);
3709 return err;
3710 }
3711
read_security_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3712 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3713 void *data, u16 data_len)
3714 {
3715 char buf[16];
3716 struct mgmt_rp_read_security_info *rp = (void *)buf;
3717 u16 sec_len = 0;
3718 u8 flags = 0;
3719
3720 bt_dev_dbg(hdev, "sock %p", sk);
3721
3722 memset(&buf, 0, sizeof(buf));
3723
3724 hci_dev_lock(hdev);
3725
3726 /* When the Read Simple Pairing Options command is supported, then
3727 * the remote public key validation is supported.
3728 */
3729 if (hdev->commands[41] & 0x08)
3730 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3731
3732 flags |= 0x02; /* Remote public key validation (LE) */
3733
3734 /* When the Read Encryption Key Size command is supported, then the
3735 * encryption key size is enforced.
3736 */
3737 if (hdev->commands[20] & 0x10)
3738 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3739
3740 flags |= 0x08; /* Encryption key size enforcement (LE) */
3741
3742 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3743
3744 /* When the Read Simple Pairing Options command is supported, then
3745 * also max encryption key size information is provided.
3746 */
3747 if (hdev->commands[41] & 0x08)
3748 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3749 hdev->max_enc_key_size);
3750
3751 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3752
3753 rp->sec_len = cpu_to_le16(sec_len);
3754
3755 hci_dev_unlock(hdev);
3756
3757 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3758 rp, sizeof(*rp) + sec_len);
3759 }
3760
3761 #ifdef CONFIG_BT_FEATURE_DEBUG
3762 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3763 static const u8 debug_uuid[16] = {
3764 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3765 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3766 };
3767 #endif
3768
3769 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3770 static const u8 simult_central_periph_uuid[16] = {
3771 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3772 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3773 };
3774
3775 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3776 static const u8 rpa_resolution_uuid[16] = {
3777 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3778 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3779 };
3780
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3781 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3782 void *data, u16 data_len)
3783 {
3784 char buf[62]; /* Enough space for 3 features */
3785 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3786 u16 idx = 0;
3787 u32 flags;
3788
3789 bt_dev_dbg(hdev, "sock %p", sk);
3790
3791 memset(&buf, 0, sizeof(buf));
3792
3793 #ifdef CONFIG_BT_FEATURE_DEBUG
3794 if (!hdev) {
3795 flags = bt_dbg_get() ? BIT(0) : 0;
3796
3797 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3798 rp->features[idx].flags = cpu_to_le32(flags);
3799 idx++;
3800 }
3801 #endif
3802
3803 if (hdev) {
3804 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3805 (hdev->le_states[4] & 0x08) && /* Central */
3806 (hdev->le_states[4] & 0x40) && /* Peripheral */
3807 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3808 flags = BIT(0);
3809 else
3810 flags = 0;
3811
3812 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3813 rp->features[idx].flags = cpu_to_le32(flags);
3814 idx++;
3815 }
3816
3817 if (hdev && use_ll_privacy(hdev)) {
3818 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3819 flags = BIT(0) | BIT(1);
3820 else
3821 flags = BIT(1);
3822
3823 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3824 rp->features[idx].flags = cpu_to_le32(flags);
3825 idx++;
3826 }
3827
3828 rp->feature_count = cpu_to_le16(idx);
3829
3830 /* After reading the experimental features information, enable
3831 * the events to update client on any future change.
3832 */
3833 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3834
3835 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3836 MGMT_OP_READ_EXP_FEATURES_INFO,
3837 0, rp, sizeof(*rp) + (20 * idx));
3838 }
3839
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)3840 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3841 struct sock *skip)
3842 {
3843 struct mgmt_ev_exp_feature_changed ev;
3844
3845 memset(&ev, 0, sizeof(ev));
3846 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3847 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3848
3849 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3850 &ev, sizeof(ev),
3851 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3852
3853 }
3854
3855 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled,struct sock * skip)3856 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3857 {
3858 struct mgmt_ev_exp_feature_changed ev;
3859
3860 memset(&ev, 0, sizeof(ev));
3861 memcpy(ev.uuid, debug_uuid, 16);
3862 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3863
3864 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3865 &ev, sizeof(ev),
3866 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3867 }
3868 #endif
3869
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3870 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3871 void *data, u16 data_len)
3872 {
3873 struct mgmt_cp_set_exp_feature *cp = data;
3874 struct mgmt_rp_set_exp_feature rp;
3875
3876 bt_dev_dbg(hdev, "sock %p", sk);
3877
3878 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3879 memset(rp.uuid, 0, 16);
3880 rp.flags = cpu_to_le32(0);
3881
3882 #ifdef CONFIG_BT_FEATURE_DEBUG
3883 if (!hdev) {
3884 bool changed = bt_dbg_get();
3885
3886 bt_dbg_set(false);
3887
3888 if (changed)
3889 exp_debug_feature_changed(false, sk);
3890 }
3891 #endif
3892
3893 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3894 bool changed = hci_dev_test_flag(hdev,
3895 HCI_ENABLE_LL_PRIVACY);
3896
3897 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3898
3899 if (changed)
3900 exp_ll_privacy_feature_changed(false, hdev, sk);
3901 }
3902
3903 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3904
3905 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3906 MGMT_OP_SET_EXP_FEATURE, 0,
3907 &rp, sizeof(rp));
3908 }
3909
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3911 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3912 bool val, changed;
3913 int err;
3914
3915 /* Command requires to use the non-controller index */
3916 if (hdev)
3917 return mgmt_cmd_status(sk, hdev->id,
3918 MGMT_OP_SET_EXP_FEATURE,
3919 MGMT_STATUS_INVALID_INDEX);
3920
3921 /* Parameters are limited to a single octet */
3922 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3923 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3924 MGMT_OP_SET_EXP_FEATURE,
3925 MGMT_STATUS_INVALID_PARAMS);
3926
3927 /* Only boolean on/off is supported */
3928 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3929 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3930 MGMT_OP_SET_EXP_FEATURE,
3931 MGMT_STATUS_INVALID_PARAMS);
3932
3933 val = !!cp->param[0];
3934 changed = val ? !bt_dbg_get() : bt_dbg_get();
3935 bt_dbg_set(val);
3936
3937 memcpy(rp.uuid, debug_uuid, 16);
3938 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3939
3940 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3941
3942 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3943 MGMT_OP_SET_EXP_FEATURE, 0,
3944 &rp, sizeof(rp));
3945
3946 if (changed)
3947 exp_debug_feature_changed(val, sk);
3948
3949 return err;
3950 }
3951 #endif
3952
3953 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3954 bool val, changed;
3955 int err;
3956 u32 flags;
3957
3958 /* Command requires to use the controller index */
3959 if (!hdev)
3960 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3961 MGMT_OP_SET_EXP_FEATURE,
3962 MGMT_STATUS_INVALID_INDEX);
3963
3964 /* Changes can only be made when controller is powered down */
3965 if (hdev_is_powered(hdev))
3966 return mgmt_cmd_status(sk, hdev->id,
3967 MGMT_OP_SET_EXP_FEATURE,
3968 MGMT_STATUS_NOT_POWERED);
3969
3970 /* Parameters are limited to a single octet */
3971 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3972 return mgmt_cmd_status(sk, hdev->id,
3973 MGMT_OP_SET_EXP_FEATURE,
3974 MGMT_STATUS_INVALID_PARAMS);
3975
3976 /* Only boolean on/off is supported */
3977 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3978 return mgmt_cmd_status(sk, hdev->id,
3979 MGMT_OP_SET_EXP_FEATURE,
3980 MGMT_STATUS_INVALID_PARAMS);
3981
3982 val = !!cp->param[0];
3983
3984 if (val) {
3985 changed = !hci_dev_test_flag(hdev,
3986 HCI_ENABLE_LL_PRIVACY);
3987 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3988 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3989
3990 /* Enable LL privacy + supported settings changed */
3991 flags = BIT(0) | BIT(1);
3992 } else {
3993 changed = hci_dev_test_flag(hdev,
3994 HCI_ENABLE_LL_PRIVACY);
3995 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3996
3997 /* Disable LL privacy + supported settings changed */
3998 flags = BIT(1);
3999 }
4000
4001 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4002 rp.flags = cpu_to_le32(flags);
4003
4004 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4005
4006 err = mgmt_cmd_complete(sk, hdev->id,
4007 MGMT_OP_SET_EXP_FEATURE, 0,
4008 &rp, sizeof(rp));
4009
4010 if (changed)
4011 exp_ll_privacy_feature_changed(val, hdev, sk);
4012
4013 return err;
4014 }
4015
4016 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4017 MGMT_OP_SET_EXP_FEATURE,
4018 MGMT_STATUS_NOT_SUPPORTED);
4019 }
4020
4021 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4022
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4023 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4024 u16 data_len)
4025 {
4026 struct mgmt_cp_get_device_flags *cp = data;
4027 struct mgmt_rp_get_device_flags rp;
4028 struct bdaddr_list_with_flags *br_params;
4029 struct hci_conn_params *params;
4030 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4031 u32 current_flags = 0;
4032 u8 status = MGMT_STATUS_INVALID_PARAMS;
4033
4034 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4035 &cp->addr.bdaddr, cp->addr.type);
4036
4037 hci_dev_lock(hdev);
4038
4039 memset(&rp, 0, sizeof(rp));
4040
4041 if (cp->addr.type == BDADDR_BREDR) {
4042 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4043 &cp->addr.bdaddr,
4044 cp->addr.type);
4045 if (!br_params)
4046 goto done;
4047
4048 current_flags = br_params->current_flags;
4049 } else {
4050 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4051 le_addr_type(cp->addr.type));
4052
4053 if (!params)
4054 goto done;
4055
4056 current_flags = params->current_flags;
4057 }
4058
4059 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4060 rp.addr.type = cp->addr.type;
4061 rp.supported_flags = cpu_to_le32(supported_flags);
4062 rp.current_flags = cpu_to_le32(current_flags);
4063
4064 status = MGMT_STATUS_SUCCESS;
4065
4066 done:
4067 hci_dev_unlock(hdev);
4068
4069 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4070 &rp, sizeof(rp));
4071 }
4072
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4073 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4074 bdaddr_t *bdaddr, u8 bdaddr_type,
4075 u32 supported_flags, u32 current_flags)
4076 {
4077 struct mgmt_ev_device_flags_changed ev;
4078
4079 bacpy(&ev.addr.bdaddr, bdaddr);
4080 ev.addr.type = bdaddr_type;
4081 ev.supported_flags = cpu_to_le32(supported_flags);
4082 ev.current_flags = cpu_to_le32(current_flags);
4083
4084 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4085 }
4086
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4087 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4088 u16 len)
4089 {
4090 struct mgmt_cp_set_device_flags *cp = data;
4091 struct bdaddr_list_with_flags *br_params;
4092 struct hci_conn_params *params;
4093 u8 status = MGMT_STATUS_INVALID_PARAMS;
4094 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4095 u32 current_flags = __le32_to_cpu(cp->current_flags);
4096
4097 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4098 &cp->addr.bdaddr, cp->addr.type,
4099 __le32_to_cpu(current_flags));
4100
4101 if ((supported_flags | current_flags) != supported_flags) {
4102 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4103 current_flags, supported_flags);
4104 goto done;
4105 }
4106
4107 hci_dev_lock(hdev);
4108
4109 if (cp->addr.type == BDADDR_BREDR) {
4110 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4111 &cp->addr.bdaddr,
4112 cp->addr.type);
4113
4114 if (br_params) {
4115 br_params->current_flags = current_flags;
4116 status = MGMT_STATUS_SUCCESS;
4117 } else {
4118 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4119 &cp->addr.bdaddr, cp->addr.type);
4120 }
4121 } else {
4122 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4123 le_addr_type(cp->addr.type));
4124 if (params) {
4125 params->current_flags = current_flags;
4126 status = MGMT_STATUS_SUCCESS;
4127 } else {
4128 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4129 &cp->addr.bdaddr,
4130 le_addr_type(cp->addr.type));
4131 }
4132 }
4133
4134 done:
4135 hci_dev_unlock(hdev);
4136
4137 if (status == MGMT_STATUS_SUCCESS)
4138 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4139 supported_flags, current_flags);
4140
4141 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4142 &cp->addr, sizeof(cp->addr));
4143 }
4144
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4145 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4146 u16 handle)
4147 {
4148 struct mgmt_ev_adv_monitor_added ev;
4149
4150 ev.monitor_handle = cpu_to_le16(handle);
4151
4152 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4153 }
4154
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,u16 handle)4155 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4156 u16 handle)
4157 {
4158 struct mgmt_ev_adv_monitor_added ev;
4159
4160 ev.monitor_handle = cpu_to_le16(handle);
4161
4162 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4163 }
4164
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4165 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4166 void *data, u16 len)
4167 {
4168 struct adv_monitor *monitor = NULL;
4169 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4170 int handle, err;
4171 size_t rp_size = 0;
4172 __u32 supported = 0;
4173 __u16 num_handles = 0;
4174 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4175
4176 BT_DBG("request for %s", hdev->name);
4177
4178 hci_dev_lock(hdev);
4179
4180 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4181 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4182
4183 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4184 handles[num_handles++] = monitor->handle;
4185 }
4186
4187 hci_dev_unlock(hdev);
4188
4189 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4190 rp = kmalloc(rp_size, GFP_KERNEL);
4191 if (!rp)
4192 return -ENOMEM;
4193
4194 /* Once controller-based monitoring is in place, the enabled_features
4195 * should reflect the use.
4196 */
4197 rp->supported_features = cpu_to_le32(supported);
4198 rp->enabled_features = 0;
4199 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4200 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4201 rp->num_handles = cpu_to_le16(num_handles);
4202 if (num_handles)
4203 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4204
4205 err = mgmt_cmd_complete(sk, hdev->id,
4206 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4207 MGMT_STATUS_SUCCESS, rp, rp_size);
4208
4209 kfree(rp);
4210
4211 return err;
4212 }
4213
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4214 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4215 void *data, u16 len)
4216 {
4217 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4218 struct mgmt_rp_add_adv_patterns_monitor rp;
4219 struct adv_monitor *m = NULL;
4220 struct adv_pattern *p = NULL;
4221 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4222 __u8 cp_ofst = 0, cp_len = 0;
4223 int err, i;
4224
4225 BT_DBG("request for %s", hdev->name);
4226
4227 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4228 err = mgmt_cmd_status(sk, hdev->id,
4229 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4230 MGMT_STATUS_INVALID_PARAMS);
4231 goto failed;
4232 }
4233
4234 m = kmalloc(sizeof(*m), GFP_KERNEL);
4235 if (!m) {
4236 err = -ENOMEM;
4237 goto failed;
4238 }
4239
4240 INIT_LIST_HEAD(&m->patterns);
4241 m->active = false;
4242
4243 for (i = 0; i < cp->pattern_count; i++) {
4244 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4245 err = mgmt_cmd_status(sk, hdev->id,
4246 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4247 MGMT_STATUS_INVALID_PARAMS);
4248 goto failed;
4249 }
4250
4251 cp_ofst = cp->patterns[i].offset;
4252 cp_len = cp->patterns[i].length;
4253 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4254 cp_len > HCI_MAX_AD_LENGTH ||
4255 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4256 err = mgmt_cmd_status(sk, hdev->id,
4257 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4258 MGMT_STATUS_INVALID_PARAMS);
4259 goto failed;
4260 }
4261
4262 p = kmalloc(sizeof(*p), GFP_KERNEL);
4263 if (!p) {
4264 err = -ENOMEM;
4265 goto failed;
4266 }
4267
4268 p->ad_type = cp->patterns[i].ad_type;
4269 p->offset = cp->patterns[i].offset;
4270 p->length = cp->patterns[i].length;
4271 memcpy(p->value, cp->patterns[i].value, p->length);
4272
4273 INIT_LIST_HEAD(&p->list);
4274 list_add(&p->list, &m->patterns);
4275 }
4276
4277 if (mp_cnt != cp->pattern_count) {
4278 err = mgmt_cmd_status(sk, hdev->id,
4279 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4280 MGMT_STATUS_INVALID_PARAMS);
4281 goto failed;
4282 }
4283
4284 hci_dev_lock(hdev);
4285
4286 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4287
4288 err = hci_add_adv_monitor(hdev, m);
4289 if (err) {
4290 if (err == -ENOSPC) {
4291 mgmt_cmd_status(sk, hdev->id,
4292 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4293 MGMT_STATUS_NO_RESOURCES);
4294 }
4295 goto unlock;
4296 }
4297
4298 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4299 mgmt_adv_monitor_added(sk, hdev, m->handle);
4300
4301 hci_dev_unlock(hdev);
4302
4303 rp.monitor_handle = cpu_to_le16(m->handle);
4304
4305 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4306 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4307
4308 unlock:
4309 hci_dev_unlock(hdev);
4310
4311 failed:
4312 hci_free_adv_monitor(m);
4313 return err;
4314 }
4315
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4316 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4317 void *data, u16 len)
4318 {
4319 struct mgmt_cp_remove_adv_monitor *cp = data;
4320 struct mgmt_rp_remove_adv_monitor rp;
4321 unsigned int prev_adv_monitors_cnt;
4322 u16 handle;
4323 int err;
4324
4325 BT_DBG("request for %s", hdev->name);
4326
4327 hci_dev_lock(hdev);
4328
4329 handle = __le16_to_cpu(cp->monitor_handle);
4330 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4331
4332 err = hci_remove_adv_monitor(hdev, handle);
4333 if (err == -ENOENT) {
4334 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4335 MGMT_STATUS_INVALID_INDEX);
4336 goto unlock;
4337 }
4338
4339 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4340 mgmt_adv_monitor_removed(sk, hdev, handle);
4341
4342 hci_dev_unlock(hdev);
4343
4344 rp.monitor_handle = cp->monitor_handle;
4345
4346 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4347 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4348
4349 unlock:
4350 hci_dev_unlock(hdev);
4351 return err;
4352 }
4353
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)4354 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4355 u16 opcode, struct sk_buff *skb)
4356 {
4357 struct mgmt_rp_read_local_oob_data mgmt_rp;
4358 size_t rp_size = sizeof(mgmt_rp);
4359 struct mgmt_pending_cmd *cmd;
4360
4361 bt_dev_dbg(hdev, "status %u", status);
4362
4363 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4364 if (!cmd)
4365 return;
4366
4367 if (status || !skb) {
4368 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4369 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4370 goto remove;
4371 }
4372
4373 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4374
4375 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4376 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4377
4378 if (skb->len < sizeof(*rp)) {
4379 mgmt_cmd_status(cmd->sk, hdev->id,
4380 MGMT_OP_READ_LOCAL_OOB_DATA,
4381 MGMT_STATUS_FAILED);
4382 goto remove;
4383 }
4384
4385 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4386 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4387
4388 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4389 } else {
4390 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4391
4392 if (skb->len < sizeof(*rp)) {
4393 mgmt_cmd_status(cmd->sk, hdev->id,
4394 MGMT_OP_READ_LOCAL_OOB_DATA,
4395 MGMT_STATUS_FAILED);
4396 goto remove;
4397 }
4398
4399 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4400 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4401
4402 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4403 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4404 }
4405
4406 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4407 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4408
4409 remove:
4410 mgmt_pending_remove(cmd);
4411 }
4412
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4413 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4414 void *data, u16 data_len)
4415 {
4416 struct mgmt_pending_cmd *cmd;
4417 struct hci_request req;
4418 int err;
4419
4420 bt_dev_dbg(hdev, "sock %p", sk);
4421
4422 hci_dev_lock(hdev);
4423
4424 if (!hdev_is_powered(hdev)) {
4425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4426 MGMT_STATUS_NOT_POWERED);
4427 goto unlock;
4428 }
4429
4430 if (!lmp_ssp_capable(hdev)) {
4431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4432 MGMT_STATUS_NOT_SUPPORTED);
4433 goto unlock;
4434 }
4435
4436 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4438 MGMT_STATUS_BUSY);
4439 goto unlock;
4440 }
4441
4442 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4443 if (!cmd) {
4444 err = -ENOMEM;
4445 goto unlock;
4446 }
4447
4448 hci_req_init(&req, hdev);
4449
4450 if (bredr_sc_enabled(hdev))
4451 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4452 else
4453 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4454
4455 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4456 if (err < 0)
4457 mgmt_pending_remove(cmd);
4458
4459 unlock:
4460 hci_dev_unlock(hdev);
4461 return err;
4462 }
4463
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4464 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4465 void *data, u16 len)
4466 {
4467 struct mgmt_addr_info *addr = data;
4468 int err;
4469
4470 bt_dev_dbg(hdev, "sock %p", sk);
4471
4472 if (!bdaddr_type_is_valid(addr->type))
4473 return mgmt_cmd_complete(sk, hdev->id,
4474 MGMT_OP_ADD_REMOTE_OOB_DATA,
4475 MGMT_STATUS_INVALID_PARAMS,
4476 addr, sizeof(*addr));
4477
4478 hci_dev_lock(hdev);
4479
4480 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4481 struct mgmt_cp_add_remote_oob_data *cp = data;
4482 u8 status;
4483
4484 if (cp->addr.type != BDADDR_BREDR) {
4485 err = mgmt_cmd_complete(sk, hdev->id,
4486 MGMT_OP_ADD_REMOTE_OOB_DATA,
4487 MGMT_STATUS_INVALID_PARAMS,
4488 &cp->addr, sizeof(cp->addr));
4489 goto unlock;
4490 }
4491
4492 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4493 cp->addr.type, cp->hash,
4494 cp->rand, NULL, NULL);
4495 if (err < 0)
4496 status = MGMT_STATUS_FAILED;
4497 else
4498 status = MGMT_STATUS_SUCCESS;
4499
4500 err = mgmt_cmd_complete(sk, hdev->id,
4501 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4502 &cp->addr, sizeof(cp->addr));
4503 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4504 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4505 u8 *rand192, *hash192, *rand256, *hash256;
4506 u8 status;
4507
4508 if (bdaddr_type_is_le(cp->addr.type)) {
4509 /* Enforce zero-valued 192-bit parameters as
4510 * long as legacy SMP OOB isn't implemented.
4511 */
4512 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4513 memcmp(cp->hash192, ZERO_KEY, 16)) {
4514 err = mgmt_cmd_complete(sk, hdev->id,
4515 MGMT_OP_ADD_REMOTE_OOB_DATA,
4516 MGMT_STATUS_INVALID_PARAMS,
4517 addr, sizeof(*addr));
4518 goto unlock;
4519 }
4520
4521 rand192 = NULL;
4522 hash192 = NULL;
4523 } else {
4524 /* In case one of the P-192 values is set to zero,
4525 * then just disable OOB data for P-192.
4526 */
4527 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4528 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4529 rand192 = NULL;
4530 hash192 = NULL;
4531 } else {
4532 rand192 = cp->rand192;
4533 hash192 = cp->hash192;
4534 }
4535 }
4536
4537 /* In case one of the P-256 values is set to zero, then just
4538 * disable OOB data for P-256.
4539 */
4540 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4541 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4542 rand256 = NULL;
4543 hash256 = NULL;
4544 } else {
4545 rand256 = cp->rand256;
4546 hash256 = cp->hash256;
4547 }
4548
4549 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4550 cp->addr.type, hash192, rand192,
4551 hash256, rand256);
4552 if (err < 0)
4553 status = MGMT_STATUS_FAILED;
4554 else
4555 status = MGMT_STATUS_SUCCESS;
4556
4557 err = mgmt_cmd_complete(sk, hdev->id,
4558 MGMT_OP_ADD_REMOTE_OOB_DATA,
4559 status, &cp->addr, sizeof(cp->addr));
4560 } else {
4561 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4562 len);
4563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4564 MGMT_STATUS_INVALID_PARAMS);
4565 }
4566
4567 unlock:
4568 hci_dev_unlock(hdev);
4569 return err;
4570 }
4571
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4572 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4573 void *data, u16 len)
4574 {
4575 struct mgmt_cp_remove_remote_oob_data *cp = data;
4576 u8 status;
4577 int err;
4578
4579 bt_dev_dbg(hdev, "sock %p", sk);
4580
4581 if (cp->addr.type != BDADDR_BREDR)
4582 return mgmt_cmd_complete(sk, hdev->id,
4583 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4584 MGMT_STATUS_INVALID_PARAMS,
4585 &cp->addr, sizeof(cp->addr));
4586
4587 hci_dev_lock(hdev);
4588
4589 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4590 hci_remote_oob_data_clear(hdev);
4591 status = MGMT_STATUS_SUCCESS;
4592 goto done;
4593 }
4594
4595 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4596 if (err < 0)
4597 status = MGMT_STATUS_INVALID_PARAMS;
4598 else
4599 status = MGMT_STATUS_SUCCESS;
4600
4601 done:
4602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4603 status, &cp->addr, sizeof(cp->addr));
4604
4605 hci_dev_unlock(hdev);
4606 return err;
4607 }
4608
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)4609 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4610 {
4611 struct mgmt_pending_cmd *cmd;
4612
4613 bt_dev_dbg(hdev, "status %d", status);
4614
4615 hci_dev_lock(hdev);
4616
4617 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4618 if (!cmd)
4619 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4620
4621 if (!cmd)
4622 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4623
4624 if (cmd) {
4625 cmd->cmd_complete(cmd, mgmt_status(status));
4626 mgmt_pending_remove(cmd);
4627 }
4628
4629 hci_dev_unlock(hdev);
4630
4631 /* Handle suspend notifier */
4632 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4633 hdev->suspend_tasks)) {
4634 bt_dev_dbg(hdev, "Unpaused discovery");
4635 wake_up(&hdev->suspend_wait_q);
4636 }
4637 }
4638
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)4639 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4640 uint8_t *mgmt_status)
4641 {
4642 switch (type) {
4643 case DISCOV_TYPE_LE:
4644 *mgmt_status = mgmt_le_support(hdev);
4645 if (*mgmt_status)
4646 return false;
4647 break;
4648 case DISCOV_TYPE_INTERLEAVED:
4649 *mgmt_status = mgmt_le_support(hdev);
4650 if (*mgmt_status)
4651 return false;
4652 fallthrough;
4653 case DISCOV_TYPE_BREDR:
4654 *mgmt_status = mgmt_bredr_support(hdev);
4655 if (*mgmt_status)
4656 return false;
4657 break;
4658 default:
4659 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4660 return false;
4661 }
4662
4663 return true;
4664 }
4665
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)4666 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4667 u16 op, void *data, u16 len)
4668 {
4669 struct mgmt_cp_start_discovery *cp = data;
4670 struct mgmt_pending_cmd *cmd;
4671 u8 status;
4672 int err;
4673
4674 bt_dev_dbg(hdev, "sock %p", sk);
4675
4676 hci_dev_lock(hdev);
4677
4678 if (!hdev_is_powered(hdev)) {
4679 err = mgmt_cmd_complete(sk, hdev->id, op,
4680 MGMT_STATUS_NOT_POWERED,
4681 &cp->type, sizeof(cp->type));
4682 goto failed;
4683 }
4684
4685 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4686 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4687 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4688 &cp->type, sizeof(cp->type));
4689 goto failed;
4690 }
4691
4692 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4693 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4694 &cp->type, sizeof(cp->type));
4695 goto failed;
4696 }
4697
4698 /* Can't start discovery when it is paused */
4699 if (hdev->discovery_paused) {
4700 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4701 &cp->type, sizeof(cp->type));
4702 goto failed;
4703 }
4704
4705 /* Clear the discovery filter first to free any previously
4706 * allocated memory for the UUID list.
4707 */
4708 hci_discovery_filter_clear(hdev);
4709
4710 hdev->discovery.type = cp->type;
4711 hdev->discovery.report_invalid_rssi = false;
4712 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4713 hdev->discovery.limited = true;
4714 else
4715 hdev->discovery.limited = false;
4716
4717 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4718 if (!cmd) {
4719 err = -ENOMEM;
4720 goto failed;
4721 }
4722
4723 cmd->cmd_complete = generic_cmd_complete;
4724
4725 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4726 queue_work(hdev->req_workqueue, &hdev->discov_update);
4727 err = 0;
4728
4729 failed:
4730 hci_dev_unlock(hdev);
4731 return err;
4732 }
4733
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4734 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4735 void *data, u16 len)
4736 {
4737 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4738 data, len);
4739 }
4740
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4741 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4742 void *data, u16 len)
4743 {
4744 return start_discovery_internal(sk, hdev,
4745 MGMT_OP_START_LIMITED_DISCOVERY,
4746 data, len);
4747 }
4748
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4749 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4750 u8 status)
4751 {
4752 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
4753 cmd->param, 1);
4754 }
4755
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4756 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4757 void *data, u16 len)
4758 {
4759 struct mgmt_cp_start_service_discovery *cp = data;
4760 struct mgmt_pending_cmd *cmd;
4761 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4762 u16 uuid_count, expected_len;
4763 u8 status;
4764 int err;
4765
4766 bt_dev_dbg(hdev, "sock %p", sk);
4767
4768 hci_dev_lock(hdev);
4769
4770 if (!hdev_is_powered(hdev)) {
4771 err = mgmt_cmd_complete(sk, hdev->id,
4772 MGMT_OP_START_SERVICE_DISCOVERY,
4773 MGMT_STATUS_NOT_POWERED,
4774 &cp->type, sizeof(cp->type));
4775 goto failed;
4776 }
4777
4778 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4779 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4780 err = mgmt_cmd_complete(sk, hdev->id,
4781 MGMT_OP_START_SERVICE_DISCOVERY,
4782 MGMT_STATUS_BUSY, &cp->type,
4783 sizeof(cp->type));
4784 goto failed;
4785 }
4786
4787 uuid_count = __le16_to_cpu(cp->uuid_count);
4788 if (uuid_count > max_uuid_count) {
4789 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4790 uuid_count);
4791 err = mgmt_cmd_complete(sk, hdev->id,
4792 MGMT_OP_START_SERVICE_DISCOVERY,
4793 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4794 sizeof(cp->type));
4795 goto failed;
4796 }
4797
4798 expected_len = sizeof(*cp) + uuid_count * 16;
4799 if (expected_len != len) {
4800 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4801 expected_len, len);
4802 err = mgmt_cmd_complete(sk, hdev->id,
4803 MGMT_OP_START_SERVICE_DISCOVERY,
4804 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4805 sizeof(cp->type));
4806 goto failed;
4807 }
4808
4809 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4810 err = mgmt_cmd_complete(sk, hdev->id,
4811 MGMT_OP_START_SERVICE_DISCOVERY,
4812 status, &cp->type, sizeof(cp->type));
4813 goto failed;
4814 }
4815
4816 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4817 hdev, data, len);
4818 if (!cmd) {
4819 err = -ENOMEM;
4820 goto failed;
4821 }
4822
4823 cmd->cmd_complete = service_discovery_cmd_complete;
4824
4825 /* Clear the discovery filter first to free any previously
4826 * allocated memory for the UUID list.
4827 */
4828 hci_discovery_filter_clear(hdev);
4829
4830 hdev->discovery.result_filtering = true;
4831 hdev->discovery.type = cp->type;
4832 hdev->discovery.rssi = cp->rssi;
4833 hdev->discovery.uuid_count = uuid_count;
4834
4835 if (uuid_count > 0) {
4836 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4837 GFP_KERNEL);
4838 if (!hdev->discovery.uuids) {
4839 err = mgmt_cmd_complete(sk, hdev->id,
4840 MGMT_OP_START_SERVICE_DISCOVERY,
4841 MGMT_STATUS_FAILED,
4842 &cp->type, sizeof(cp->type));
4843 mgmt_pending_remove(cmd);
4844 goto failed;
4845 }
4846 }
4847
4848 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4849 queue_work(hdev->req_workqueue, &hdev->discov_update);
4850 err = 0;
4851
4852 failed:
4853 hci_dev_unlock(hdev);
4854 return err;
4855 }
4856
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)4857 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4858 {
4859 struct mgmt_pending_cmd *cmd;
4860
4861 bt_dev_dbg(hdev, "status %d", status);
4862
4863 hci_dev_lock(hdev);
4864
4865 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4866 if (cmd) {
4867 cmd->cmd_complete(cmd, mgmt_status(status));
4868 mgmt_pending_remove(cmd);
4869 }
4870
4871 hci_dev_unlock(hdev);
4872
4873 /* Handle suspend notifier */
4874 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4875 bt_dev_dbg(hdev, "Paused discovery");
4876 wake_up(&hdev->suspend_wait_q);
4877 }
4878 }
4879
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4880 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4881 u16 len)
4882 {
4883 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4884 struct mgmt_pending_cmd *cmd;
4885 int err;
4886
4887 bt_dev_dbg(hdev, "sock %p", sk);
4888
4889 hci_dev_lock(hdev);
4890
4891 if (!hci_discovery_active(hdev)) {
4892 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4893 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4894 sizeof(mgmt_cp->type));
4895 goto unlock;
4896 }
4897
4898 if (hdev->discovery.type != mgmt_cp->type) {
4899 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4900 MGMT_STATUS_INVALID_PARAMS,
4901 &mgmt_cp->type, sizeof(mgmt_cp->type));
4902 goto unlock;
4903 }
4904
4905 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4906 if (!cmd) {
4907 err = -ENOMEM;
4908 goto unlock;
4909 }
4910
4911 cmd->cmd_complete = generic_cmd_complete;
4912
4913 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4914 queue_work(hdev->req_workqueue, &hdev->discov_update);
4915 err = 0;
4916
4917 unlock:
4918 hci_dev_unlock(hdev);
4919 return err;
4920 }
4921
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4922 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4923 u16 len)
4924 {
4925 struct mgmt_cp_confirm_name *cp = data;
4926 struct inquiry_entry *e;
4927 int err;
4928
4929 bt_dev_dbg(hdev, "sock %p", sk);
4930
4931 hci_dev_lock(hdev);
4932
4933 if (!hci_discovery_active(hdev)) {
4934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4935 MGMT_STATUS_FAILED, &cp->addr,
4936 sizeof(cp->addr));
4937 goto failed;
4938 }
4939
4940 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4941 if (!e) {
4942 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4943 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4944 sizeof(cp->addr));
4945 goto failed;
4946 }
4947
4948 if (cp->name_known) {
4949 e->name_state = NAME_KNOWN;
4950 list_del(&e->list);
4951 } else {
4952 e->name_state = NAME_NEEDED;
4953 hci_inquiry_cache_update_resolve(hdev, e);
4954 }
4955
4956 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4957 &cp->addr, sizeof(cp->addr));
4958
4959 failed:
4960 hci_dev_unlock(hdev);
4961 return err;
4962 }
4963
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4964 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4965 u16 len)
4966 {
4967 struct mgmt_cp_block_device *cp = data;
4968 u8 status;
4969 int err;
4970
4971 bt_dev_dbg(hdev, "sock %p", sk);
4972
4973 if (!bdaddr_type_is_valid(cp->addr.type))
4974 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4975 MGMT_STATUS_INVALID_PARAMS,
4976 &cp->addr, sizeof(cp->addr));
4977
4978 hci_dev_lock(hdev);
4979
4980 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
4981 cp->addr.type);
4982 if (err < 0) {
4983 status = MGMT_STATUS_FAILED;
4984 goto done;
4985 }
4986
4987 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4988 sk);
4989 status = MGMT_STATUS_SUCCESS;
4990
4991 done:
4992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4993 &cp->addr, sizeof(cp->addr));
4994
4995 hci_dev_unlock(hdev);
4996
4997 return err;
4998 }
4999
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5000 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5001 u16 len)
5002 {
5003 struct mgmt_cp_unblock_device *cp = data;
5004 u8 status;
5005 int err;
5006
5007 bt_dev_dbg(hdev, "sock %p", sk);
5008
5009 if (!bdaddr_type_is_valid(cp->addr.type))
5010 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5011 MGMT_STATUS_INVALID_PARAMS,
5012 &cp->addr, sizeof(cp->addr));
5013
5014 hci_dev_lock(hdev);
5015
5016 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5017 cp->addr.type);
5018 if (err < 0) {
5019 status = MGMT_STATUS_INVALID_PARAMS;
5020 goto done;
5021 }
5022
5023 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5024 sk);
5025 status = MGMT_STATUS_SUCCESS;
5026
5027 done:
5028 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5029 &cp->addr, sizeof(cp->addr));
5030
5031 hci_dev_unlock(hdev);
5032
5033 return err;
5034 }
5035
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5036 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5037 u16 len)
5038 {
5039 struct mgmt_cp_set_device_id *cp = data;
5040 struct hci_request req;
5041 int err;
5042 __u16 source;
5043
5044 bt_dev_dbg(hdev, "sock %p", sk);
5045
5046 source = __le16_to_cpu(cp->source);
5047
5048 if (source > 0x0002)
5049 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5050 MGMT_STATUS_INVALID_PARAMS);
5051
5052 hci_dev_lock(hdev);
5053
5054 hdev->devid_source = source;
5055 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5056 hdev->devid_product = __le16_to_cpu(cp->product);
5057 hdev->devid_version = __le16_to_cpu(cp->version);
5058
5059 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5060 NULL, 0);
5061
5062 hci_req_init(&req, hdev);
5063 __hci_req_update_eir(&req);
5064 hci_req_run(&req, NULL);
5065
5066 hci_dev_unlock(hdev);
5067
5068 return err;
5069 }
5070
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)5071 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5072 u16 opcode)
5073 {
5074 bt_dev_dbg(hdev, "status %d", status);
5075 }
5076
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)5077 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5078 u16 opcode)
5079 {
5080 struct cmd_lookup match = { NULL, hdev };
5081 struct hci_request req;
5082 u8 instance;
5083 struct adv_info *adv_instance;
5084 int err;
5085
5086 hci_dev_lock(hdev);
5087
5088 if (status) {
5089 u8 mgmt_err = mgmt_status(status);
5090
5091 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
5092 cmd_status_rsp, &mgmt_err);
5093 goto unlock;
5094 }
5095
5096 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5097 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5098 else
5099 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5100
5101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
5102 &match);
5103
5104 new_settings(hdev, match.sk);
5105
5106 if (match.sk)
5107 sock_put(match.sk);
5108
5109 /* Handle suspend notifier */
5110 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5111 hdev->suspend_tasks)) {
5112 bt_dev_dbg(hdev, "Paused advertising");
5113 wake_up(&hdev->suspend_wait_q);
5114 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5115 hdev->suspend_tasks)) {
5116 bt_dev_dbg(hdev, "Unpaused advertising");
5117 wake_up(&hdev->suspend_wait_q);
5118 }
5119
5120 /* If "Set Advertising" was just disabled and instance advertising was
5121 * set up earlier, then re-enable multi-instance advertising.
5122 */
5123 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5124 list_empty(&hdev->adv_instances))
5125 goto unlock;
5126
5127 instance = hdev->cur_adv_instance;
5128 if (!instance) {
5129 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5130 struct adv_info, list);
5131 if (!adv_instance)
5132 goto unlock;
5133
5134 instance = adv_instance->instance;
5135 }
5136
5137 hci_req_init(&req, hdev);
5138
5139 err = __hci_req_schedule_adv_instance(&req, instance, true);
5140
5141 if (!err)
5142 err = hci_req_run(&req, enable_advertising_instance);
5143
5144 if (err)
5145 bt_dev_err(hdev, "failed to re-configure advertising");
5146
5147 unlock:
5148 hci_dev_unlock(hdev);
5149 }
5150
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5151 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5152 u16 len)
5153 {
5154 struct mgmt_mode *cp = data;
5155 struct mgmt_pending_cmd *cmd;
5156 struct hci_request req;
5157 u8 val, status;
5158 int err;
5159
5160 bt_dev_dbg(hdev, "sock %p", sk);
5161
5162 status = mgmt_le_support(hdev);
5163 if (status)
5164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5165 status);
5166
5167 /* Enabling the experimental LL Privay support disables support for
5168 * advertising.
5169 */
5170 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5172 MGMT_STATUS_NOT_SUPPORTED);
5173
5174 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5176 MGMT_STATUS_INVALID_PARAMS);
5177
5178 if (hdev->advertising_paused)
5179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5180 MGMT_STATUS_BUSY);
5181
5182 hci_dev_lock(hdev);
5183
5184 val = !!cp->val;
5185
5186 /* The following conditions are ones which mean that we should
5187 * not do any HCI communication but directly send a mgmt
5188 * response to user space (after toggling the flag if
5189 * necessary).
5190 */
5191 if (!hdev_is_powered(hdev) ||
5192 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5193 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5194 hci_conn_num(hdev, LE_LINK) > 0 ||
5195 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5196 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5197 bool changed;
5198
5199 if (cp->val) {
5200 hdev->cur_adv_instance = 0x00;
5201 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5202 if (cp->val == 0x02)
5203 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5204 else
5205 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5206 } else {
5207 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5208 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5209 }
5210
5211 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5212 if (err < 0)
5213 goto unlock;
5214
5215 if (changed)
5216 err = new_settings(hdev, sk);
5217
5218 goto unlock;
5219 }
5220
5221 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5222 pending_find(MGMT_OP_SET_LE, hdev)) {
5223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5224 MGMT_STATUS_BUSY);
5225 goto unlock;
5226 }
5227
5228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5229 if (!cmd) {
5230 err = -ENOMEM;
5231 goto unlock;
5232 }
5233
5234 hci_req_init(&req, hdev);
5235
5236 if (cp->val == 0x02)
5237 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5238 else
5239 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5240
5241 cancel_adv_timeout(hdev);
5242
5243 if (val) {
5244 /* Switch to instance "0" for the Set Advertising setting.
5245 * We cannot use update_[adv|scan_rsp]_data() here as the
5246 * HCI_ADVERTISING flag is not yet set.
5247 */
5248 hdev->cur_adv_instance = 0x00;
5249
5250 if (ext_adv_capable(hdev)) {
5251 __hci_req_start_ext_adv(&req, 0x00);
5252 } else {
5253 __hci_req_update_adv_data(&req, 0x00);
5254 __hci_req_update_scan_rsp_data(&req, 0x00);
5255 __hci_req_enable_advertising(&req);
5256 }
5257 } else {
5258 __hci_req_disable_advertising(&req);
5259 }
5260
5261 err = hci_req_run(&req, set_advertising_complete);
5262 if (err < 0)
5263 mgmt_pending_remove(cmd);
5264
5265 unlock:
5266 hci_dev_unlock(hdev);
5267 return err;
5268 }
5269
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5270 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5271 void *data, u16 len)
5272 {
5273 struct mgmt_cp_set_static_address *cp = data;
5274 int err;
5275
5276 bt_dev_dbg(hdev, "sock %p", sk);
5277
5278 if (!lmp_le_capable(hdev))
5279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5280 MGMT_STATUS_NOT_SUPPORTED);
5281
5282 if (hdev_is_powered(hdev))
5283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5284 MGMT_STATUS_REJECTED);
5285
5286 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5287 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5288 return mgmt_cmd_status(sk, hdev->id,
5289 MGMT_OP_SET_STATIC_ADDRESS,
5290 MGMT_STATUS_INVALID_PARAMS);
5291
5292 /* Two most significant bits shall be set */
5293 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5294 return mgmt_cmd_status(sk, hdev->id,
5295 MGMT_OP_SET_STATIC_ADDRESS,
5296 MGMT_STATUS_INVALID_PARAMS);
5297 }
5298
5299 hci_dev_lock(hdev);
5300
5301 bacpy(&hdev->static_addr, &cp->bdaddr);
5302
5303 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5304 if (err < 0)
5305 goto unlock;
5306
5307 err = new_settings(hdev, sk);
5308
5309 unlock:
5310 hci_dev_unlock(hdev);
5311 return err;
5312 }
5313
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5314 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5315 void *data, u16 len)
5316 {
5317 struct mgmt_cp_set_scan_params *cp = data;
5318 __u16 interval, window;
5319 int err;
5320
5321 bt_dev_dbg(hdev, "sock %p", sk);
5322
5323 if (!lmp_le_capable(hdev))
5324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5325 MGMT_STATUS_NOT_SUPPORTED);
5326
5327 interval = __le16_to_cpu(cp->interval);
5328
5329 if (interval < 0x0004 || interval > 0x4000)
5330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5331 MGMT_STATUS_INVALID_PARAMS);
5332
5333 window = __le16_to_cpu(cp->window);
5334
5335 if (window < 0x0004 || window > 0x4000)
5336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5337 MGMT_STATUS_INVALID_PARAMS);
5338
5339 if (window > interval)
5340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5341 MGMT_STATUS_INVALID_PARAMS);
5342
5343 hci_dev_lock(hdev);
5344
5345 hdev->le_scan_interval = interval;
5346 hdev->le_scan_window = window;
5347
5348 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5349 NULL, 0);
5350
5351 /* If background scan is running, restart it so new parameters are
5352 * loaded.
5353 */
5354 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5355 hdev->discovery.state == DISCOVERY_STOPPED) {
5356 struct hci_request req;
5357
5358 hci_req_init(&req, hdev);
5359
5360 hci_req_add_le_scan_disable(&req, false);
5361 hci_req_add_le_passive_scan(&req);
5362
5363 hci_req_run(&req, NULL);
5364 }
5365
5366 hci_dev_unlock(hdev);
5367
5368 return err;
5369 }
5370
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5371 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5372 u16 opcode)
5373 {
5374 struct mgmt_pending_cmd *cmd;
5375
5376 bt_dev_dbg(hdev, "status 0x%02x", status);
5377
5378 hci_dev_lock(hdev);
5379
5380 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5381 if (!cmd)
5382 goto unlock;
5383
5384 if (status) {
5385 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5386 mgmt_status(status));
5387 } else {
5388 struct mgmt_mode *cp = cmd->param;
5389
5390 if (cp->val)
5391 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5392 else
5393 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5394
5395 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5396 new_settings(hdev, cmd->sk);
5397 }
5398
5399 mgmt_pending_remove(cmd);
5400
5401 unlock:
5402 hci_dev_unlock(hdev);
5403 }
5404
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5405 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5406 void *data, u16 len)
5407 {
5408 struct mgmt_mode *cp = data;
5409 struct mgmt_pending_cmd *cmd;
5410 struct hci_request req;
5411 int err;
5412
5413 bt_dev_dbg(hdev, "sock %p", sk);
5414
5415 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5416 hdev->hci_ver < BLUETOOTH_VER_1_2)
5417 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5418 MGMT_STATUS_NOT_SUPPORTED);
5419
5420 if (cp->val != 0x00 && cp->val != 0x01)
5421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5422 MGMT_STATUS_INVALID_PARAMS);
5423
5424 hci_dev_lock(hdev);
5425
5426 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5427 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5428 MGMT_STATUS_BUSY);
5429 goto unlock;
5430 }
5431
5432 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5433 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5434 hdev);
5435 goto unlock;
5436 }
5437
5438 if (!hdev_is_powered(hdev)) {
5439 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5440 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5441 hdev);
5442 new_settings(hdev, sk);
5443 goto unlock;
5444 }
5445
5446 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5447 data, len);
5448 if (!cmd) {
5449 err = -ENOMEM;
5450 goto unlock;
5451 }
5452
5453 hci_req_init(&req, hdev);
5454
5455 __hci_req_write_fast_connectable(&req, cp->val);
5456
5457 err = hci_req_run(&req, fast_connectable_complete);
5458 if (err < 0) {
5459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5460 MGMT_STATUS_FAILED);
5461 mgmt_pending_remove(cmd);
5462 }
5463
5464 unlock:
5465 hci_dev_unlock(hdev);
5466
5467 return err;
5468 }
5469
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5470 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5471 {
5472 struct mgmt_pending_cmd *cmd;
5473
5474 bt_dev_dbg(hdev, "status 0x%02x", status);
5475
5476 hci_dev_lock(hdev);
5477
5478 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5479 if (!cmd)
5480 goto unlock;
5481
5482 if (status) {
5483 u8 mgmt_err = mgmt_status(status);
5484
5485 /* We need to restore the flag if related HCI commands
5486 * failed.
5487 */
5488 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5489
5490 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
5491 } else {
5492 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5493 new_settings(hdev, cmd->sk);
5494 }
5495
5496 mgmt_pending_remove(cmd);
5497
5498 unlock:
5499 hci_dev_unlock(hdev);
5500 }
5501
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5502 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5503 {
5504 struct mgmt_mode *cp = data;
5505 struct mgmt_pending_cmd *cmd;
5506 struct hci_request req;
5507 int err;
5508
5509 bt_dev_dbg(hdev, "sock %p", sk);
5510
5511 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5513 MGMT_STATUS_NOT_SUPPORTED);
5514
5515 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5517 MGMT_STATUS_REJECTED);
5518
5519 if (cp->val != 0x00 && cp->val != 0x01)
5520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5521 MGMT_STATUS_INVALID_PARAMS);
5522
5523 hci_dev_lock(hdev);
5524
5525 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5526 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5527 goto unlock;
5528 }
5529
5530 if (!hdev_is_powered(hdev)) {
5531 if (!cp->val) {
5532 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5533 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5534 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5535 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5536 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5537 }
5538
5539 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5540
5541 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5542 if (err < 0)
5543 goto unlock;
5544
5545 err = new_settings(hdev, sk);
5546 goto unlock;
5547 }
5548
5549 /* Reject disabling when powered on */
5550 if (!cp->val) {
5551 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5552 MGMT_STATUS_REJECTED);
5553 goto unlock;
5554 } else {
5555 /* When configuring a dual-mode controller to operate
5556 * with LE only and using a static address, then switching
5557 * BR/EDR back on is not allowed.
5558 *
5559 * Dual-mode controllers shall operate with the public
5560 * address as its identity address for BR/EDR and LE. So
5561 * reject the attempt to create an invalid configuration.
5562 *
5563 * The same restrictions applies when secure connections
5564 * has been enabled. For BR/EDR this is a controller feature
5565 * while for LE it is a host stack feature. This means that
5566 * switching BR/EDR back on when secure connections has been
5567 * enabled is not a supported transaction.
5568 */
5569 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5570 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5571 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5572 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5573 MGMT_STATUS_REJECTED);
5574 goto unlock;
5575 }
5576 }
5577
5578 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5580 MGMT_STATUS_BUSY);
5581 goto unlock;
5582 }
5583
5584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5585 if (!cmd) {
5586 err = -ENOMEM;
5587 goto unlock;
5588 }
5589
5590 /* We need to flip the bit already here so that
5591 * hci_req_update_adv_data generates the correct flags.
5592 */
5593 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5594
5595 hci_req_init(&req, hdev);
5596
5597 __hci_req_write_fast_connectable(&req, false);
5598 __hci_req_update_scan(&req);
5599
5600 /* Since only the advertising data flags will change, there
5601 * is no need to update the scan response data.
5602 */
5603 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5604
5605 err = hci_req_run(&req, set_bredr_complete);
5606 if (err < 0)
5607 mgmt_pending_remove(cmd);
5608
5609 unlock:
5610 hci_dev_unlock(hdev);
5611 return err;
5612 }
5613
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5614 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5615 {
5616 struct mgmt_pending_cmd *cmd;
5617 struct mgmt_mode *cp;
5618
5619 bt_dev_dbg(hdev, "status %u", status);
5620
5621 hci_dev_lock(hdev);
5622
5623 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5624 if (!cmd)
5625 goto unlock;
5626
5627 if (status) {
5628 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
5629 mgmt_status(status));
5630 goto remove;
5631 }
5632
5633 cp = cmd->param;
5634
5635 switch (cp->val) {
5636 case 0x00:
5637 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5638 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5639 break;
5640 case 0x01:
5641 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5642 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5643 break;
5644 case 0x02:
5645 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5646 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5647 break;
5648 }
5649
5650 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5651 new_settings(hdev, cmd->sk);
5652
5653 remove:
5654 mgmt_pending_remove(cmd);
5655 unlock:
5656 hci_dev_unlock(hdev);
5657 }
5658
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5659 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5660 void *data, u16 len)
5661 {
5662 struct mgmt_mode *cp = data;
5663 struct mgmt_pending_cmd *cmd;
5664 struct hci_request req;
5665 u8 val;
5666 int err;
5667
5668 bt_dev_dbg(hdev, "sock %p", sk);
5669
5670 if (!lmp_sc_capable(hdev) &&
5671 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5673 MGMT_STATUS_NOT_SUPPORTED);
5674
5675 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5676 lmp_sc_capable(hdev) &&
5677 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5679 MGMT_STATUS_REJECTED);
5680
5681 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5683 MGMT_STATUS_INVALID_PARAMS);
5684
5685 hci_dev_lock(hdev);
5686
5687 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5688 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5689 bool changed;
5690
5691 if (cp->val) {
5692 changed = !hci_dev_test_and_set_flag(hdev,
5693 HCI_SC_ENABLED);
5694 if (cp->val == 0x02)
5695 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5696 else
5697 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5698 } else {
5699 changed = hci_dev_test_and_clear_flag(hdev,
5700 HCI_SC_ENABLED);
5701 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5702 }
5703
5704 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5705 if (err < 0)
5706 goto failed;
5707
5708 if (changed)
5709 err = new_settings(hdev, sk);
5710
5711 goto failed;
5712 }
5713
5714 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5715 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5716 MGMT_STATUS_BUSY);
5717 goto failed;
5718 }
5719
5720 val = !!cp->val;
5721
5722 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5723 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5724 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5725 goto failed;
5726 }
5727
5728 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5729 if (!cmd) {
5730 err = -ENOMEM;
5731 goto failed;
5732 }
5733
5734 hci_req_init(&req, hdev);
5735 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5736 err = hci_req_run(&req, sc_enable_complete);
5737 if (err < 0) {
5738 mgmt_pending_remove(cmd);
5739 goto failed;
5740 }
5741
5742 failed:
5743 hci_dev_unlock(hdev);
5744 return err;
5745 }
5746
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5747 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5748 void *data, u16 len)
5749 {
5750 struct mgmt_mode *cp = data;
5751 bool changed, use_changed;
5752 int err;
5753
5754 bt_dev_dbg(hdev, "sock %p", sk);
5755
5756 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5758 MGMT_STATUS_INVALID_PARAMS);
5759
5760 hci_dev_lock(hdev);
5761
5762 if (cp->val)
5763 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5764 else
5765 changed = hci_dev_test_and_clear_flag(hdev,
5766 HCI_KEEP_DEBUG_KEYS);
5767
5768 if (cp->val == 0x02)
5769 use_changed = !hci_dev_test_and_set_flag(hdev,
5770 HCI_USE_DEBUG_KEYS);
5771 else
5772 use_changed = hci_dev_test_and_clear_flag(hdev,
5773 HCI_USE_DEBUG_KEYS);
5774
5775 if (hdev_is_powered(hdev) && use_changed &&
5776 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5777 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5778 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5779 sizeof(mode), &mode);
5780 }
5781
5782 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5783 if (err < 0)
5784 goto unlock;
5785
5786 if (changed)
5787 err = new_settings(hdev, sk);
5788
5789 unlock:
5790 hci_dev_unlock(hdev);
5791 return err;
5792 }
5793
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5794 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5795 u16 len)
5796 {
5797 struct mgmt_cp_set_privacy *cp = cp_data;
5798 bool changed;
5799 int err;
5800
5801 bt_dev_dbg(hdev, "sock %p", sk);
5802
5803 if (!lmp_le_capable(hdev))
5804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5805 MGMT_STATUS_NOT_SUPPORTED);
5806
5807 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5809 MGMT_STATUS_INVALID_PARAMS);
5810
5811 if (hdev_is_powered(hdev))
5812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5813 MGMT_STATUS_REJECTED);
5814
5815 hci_dev_lock(hdev);
5816
5817 /* If user space supports this command it is also expected to
5818 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5819 */
5820 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5821
5822 if (cp->privacy) {
5823 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5824 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5825 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5826 hci_adv_instances_set_rpa_expired(hdev, true);
5827 if (cp->privacy == 0x02)
5828 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5829 else
5830 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5831 } else {
5832 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5833 memset(hdev->irk, 0, sizeof(hdev->irk));
5834 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5835 hci_adv_instances_set_rpa_expired(hdev, false);
5836 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5837 }
5838
5839 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5840 if (err < 0)
5841 goto unlock;
5842
5843 if (changed)
5844 err = new_settings(hdev, sk);
5845
5846 unlock:
5847 hci_dev_unlock(hdev);
5848 return err;
5849 }
5850
irk_is_valid(struct mgmt_irk_info * irk)5851 static bool irk_is_valid(struct mgmt_irk_info *irk)
5852 {
5853 switch (irk->addr.type) {
5854 case BDADDR_LE_PUBLIC:
5855 return true;
5856
5857 case BDADDR_LE_RANDOM:
5858 /* Two most significant bits shall be set */
5859 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5860 return false;
5861 return true;
5862 }
5863
5864 return false;
5865 }
5866
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5867 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5868 u16 len)
5869 {
5870 struct mgmt_cp_load_irks *cp = cp_data;
5871 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5872 sizeof(struct mgmt_irk_info));
5873 u16 irk_count, expected_len;
5874 int i, err;
5875
5876 bt_dev_dbg(hdev, "sock %p", sk);
5877
5878 if (!lmp_le_capable(hdev))
5879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5880 MGMT_STATUS_NOT_SUPPORTED);
5881
5882 irk_count = __le16_to_cpu(cp->irk_count);
5883 if (irk_count > max_irk_count) {
5884 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5885 irk_count);
5886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5887 MGMT_STATUS_INVALID_PARAMS);
5888 }
5889
5890 expected_len = struct_size(cp, irks, irk_count);
5891 if (expected_len != len) {
5892 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5893 expected_len, len);
5894 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5895 MGMT_STATUS_INVALID_PARAMS);
5896 }
5897
5898 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5899
5900 for (i = 0; i < irk_count; i++) {
5901 struct mgmt_irk_info *key = &cp->irks[i];
5902
5903 if (!irk_is_valid(key))
5904 return mgmt_cmd_status(sk, hdev->id,
5905 MGMT_OP_LOAD_IRKS,
5906 MGMT_STATUS_INVALID_PARAMS);
5907 }
5908
5909 hci_dev_lock(hdev);
5910
5911 hci_smp_irks_clear(hdev);
5912
5913 for (i = 0; i < irk_count; i++) {
5914 struct mgmt_irk_info *irk = &cp->irks[i];
5915 u8 addr_type = le_addr_type(irk->addr.type);
5916
5917 if (hci_is_blocked_key(hdev,
5918 HCI_BLOCKED_KEY_TYPE_IRK,
5919 irk->val)) {
5920 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5921 &irk->addr.bdaddr);
5922 continue;
5923 }
5924
5925 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
5926 if (irk->addr.type == BDADDR_BREDR)
5927 addr_type = BDADDR_BREDR;
5928
5929 hci_add_irk(hdev, &irk->addr.bdaddr,
5930 addr_type, irk->val,
5931 BDADDR_ANY);
5932 }
5933
5934 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5935
5936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5937
5938 hci_dev_unlock(hdev);
5939
5940 return err;
5941 }
5942
ltk_is_valid(struct mgmt_ltk_info * key)5943 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5944 {
5945 if (key->initiator != 0x00 && key->initiator != 0x01)
5946 return false;
5947
5948 switch (key->addr.type) {
5949 case BDADDR_LE_PUBLIC:
5950 return true;
5951
5952 case BDADDR_LE_RANDOM:
5953 /* Two most significant bits shall be set */
5954 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5955 return false;
5956 return true;
5957 }
5958
5959 return false;
5960 }
5961
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5962 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5963 void *cp_data, u16 len)
5964 {
5965 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5966 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5967 sizeof(struct mgmt_ltk_info));
5968 u16 key_count, expected_len;
5969 int i, err;
5970
5971 bt_dev_dbg(hdev, "sock %p", sk);
5972
5973 if (!lmp_le_capable(hdev))
5974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5975 MGMT_STATUS_NOT_SUPPORTED);
5976
5977 key_count = __le16_to_cpu(cp->key_count);
5978 if (key_count > max_key_count) {
5979 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5980 key_count);
5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5982 MGMT_STATUS_INVALID_PARAMS);
5983 }
5984
5985 expected_len = struct_size(cp, keys, key_count);
5986 if (expected_len != len) {
5987 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5988 expected_len, len);
5989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5990 MGMT_STATUS_INVALID_PARAMS);
5991 }
5992
5993 bt_dev_dbg(hdev, "key_count %u", key_count);
5994
5995 for (i = 0; i < key_count; i++) {
5996 struct mgmt_ltk_info *key = &cp->keys[i];
5997
5998 if (!ltk_is_valid(key))
5999 return mgmt_cmd_status(sk, hdev->id,
6000 MGMT_OP_LOAD_LONG_TERM_KEYS,
6001 MGMT_STATUS_INVALID_PARAMS);
6002 }
6003
6004 hci_dev_lock(hdev);
6005
6006 hci_smp_ltks_clear(hdev);
6007
6008 for (i = 0; i < key_count; i++) {
6009 struct mgmt_ltk_info *key = &cp->keys[i];
6010 u8 type, authenticated;
6011 u8 addr_type = le_addr_type(key->addr.type);
6012
6013 if (hci_is_blocked_key(hdev,
6014 HCI_BLOCKED_KEY_TYPE_LTK,
6015 key->val)) {
6016 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6017 &key->addr.bdaddr);
6018 continue;
6019 }
6020
6021 switch (key->type) {
6022 case MGMT_LTK_UNAUTHENTICATED:
6023 authenticated = 0x00;
6024 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6025 break;
6026 case MGMT_LTK_AUTHENTICATED:
6027 authenticated = 0x01;
6028 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6029 break;
6030 case MGMT_LTK_P256_UNAUTH:
6031 authenticated = 0x00;
6032 type = SMP_LTK_P256;
6033 break;
6034 case MGMT_LTK_P256_AUTH:
6035 authenticated = 0x01;
6036 type = SMP_LTK_P256;
6037 break;
6038 case MGMT_LTK_P256_DEBUG:
6039 authenticated = 0x00;
6040 type = SMP_LTK_P256_DEBUG;
6041 fallthrough;
6042 default:
6043 continue;
6044 }
6045
6046 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6047 if (key->addr.type == BDADDR_BREDR)
6048 addr_type = BDADDR_BREDR;
6049
6050 hci_add_ltk(hdev, &key->addr.bdaddr,
6051 addr_type, type, authenticated,
6052 key->val, key->enc_size, key->ediv, key->rand);
6053 }
6054
6055 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6056 NULL, 0);
6057
6058 hci_dev_unlock(hdev);
6059
6060 return err;
6061 }
6062
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6063 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6064 {
6065 struct hci_conn *conn = cmd->user_data;
6066 struct mgmt_rp_get_conn_info rp;
6067 int err;
6068
6069 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6070
6071 if (status == MGMT_STATUS_SUCCESS) {
6072 rp.rssi = conn->rssi;
6073 rp.tx_power = conn->tx_power;
6074 rp.max_tx_power = conn->max_tx_power;
6075 } else {
6076 rp.rssi = HCI_RSSI_INVALID;
6077 rp.tx_power = HCI_TX_POWER_INVALID;
6078 rp.max_tx_power = HCI_TX_POWER_INVALID;
6079 }
6080
6081 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO,
6082 status, &rp, sizeof(rp));
6083
6084 hci_conn_drop(conn);
6085 hci_conn_put(conn);
6086
6087 return err;
6088 }
6089
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)6090 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6091 u16 opcode)
6092 {
6093 struct hci_cp_read_rssi *cp;
6094 struct mgmt_pending_cmd *cmd;
6095 struct hci_conn *conn;
6096 u16 handle;
6097 u8 status;
6098
6099 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6100
6101 hci_dev_lock(hdev);
6102
6103 /* Commands sent in request are either Read RSSI or Read Transmit Power
6104 * Level so we check which one was last sent to retrieve connection
6105 * handle. Both commands have handle as first parameter so it's safe to
6106 * cast data on the same command struct.
6107 *
6108 * First command sent is always Read RSSI and we fail only if it fails.
6109 * In other case we simply override error to indicate success as we
6110 * already remembered if TX power value is actually valid.
6111 */
6112 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6113 if (!cp) {
6114 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6115 status = MGMT_STATUS_SUCCESS;
6116 } else {
6117 status = mgmt_status(hci_status);
6118 }
6119
6120 if (!cp) {
6121 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6122 goto unlock;
6123 }
6124
6125 handle = __le16_to_cpu(cp->handle);
6126 conn = hci_conn_hash_lookup_handle(hdev, handle);
6127 if (!conn) {
6128 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6129 handle);
6130 goto unlock;
6131 }
6132
6133 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6134 if (!cmd)
6135 goto unlock;
6136
6137 cmd->cmd_complete(cmd, status);
6138 mgmt_pending_remove(cmd);
6139
6140 unlock:
6141 hci_dev_unlock(hdev);
6142 }
6143
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6144 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6145 u16 len)
6146 {
6147 struct mgmt_cp_get_conn_info *cp = data;
6148 struct mgmt_rp_get_conn_info rp;
6149 struct hci_conn *conn;
6150 unsigned long conn_info_age;
6151 int err = 0;
6152
6153 bt_dev_dbg(hdev, "sock %p", sk);
6154
6155 memset(&rp, 0, sizeof(rp));
6156 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6157 rp.addr.type = cp->addr.type;
6158
6159 if (!bdaddr_type_is_valid(cp->addr.type))
6160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &rp, sizeof(rp));
6163
6164 hci_dev_lock(hdev);
6165
6166 if (!hdev_is_powered(hdev)) {
6167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6168 MGMT_STATUS_NOT_POWERED, &rp,
6169 sizeof(rp));
6170 goto unlock;
6171 }
6172
6173 if (cp->addr.type == BDADDR_BREDR)
6174 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6175 &cp->addr.bdaddr);
6176 else
6177 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6178
6179 if (!conn || conn->state != BT_CONNECTED) {
6180 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6181 MGMT_STATUS_NOT_CONNECTED, &rp,
6182 sizeof(rp));
6183 goto unlock;
6184 }
6185
6186 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6188 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6189 goto unlock;
6190 }
6191
6192 /* To avoid client trying to guess when to poll again for information we
6193 * calculate conn info age as random value between min/max set in hdev.
6194 */
6195 conn_info_age = hdev->conn_info_min_age +
6196 prandom_u32_max(hdev->conn_info_max_age -
6197 hdev->conn_info_min_age);
6198
6199 /* Query controller to refresh cached values if they are too old or were
6200 * never read.
6201 */
6202 if (time_after(jiffies, conn->conn_info_timestamp +
6203 msecs_to_jiffies(conn_info_age)) ||
6204 !conn->conn_info_timestamp) {
6205 struct hci_request req;
6206 struct hci_cp_read_tx_power req_txp_cp;
6207 struct hci_cp_read_rssi req_rssi_cp;
6208 struct mgmt_pending_cmd *cmd;
6209
6210 hci_req_init(&req, hdev);
6211 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6212 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6213 &req_rssi_cp);
6214
6215 /* For LE links TX power does not change thus we don't need to
6216 * query for it once value is known.
6217 */
6218 if (!bdaddr_type_is_le(cp->addr.type) ||
6219 conn->tx_power == HCI_TX_POWER_INVALID) {
6220 req_txp_cp.handle = cpu_to_le16(conn->handle);
6221 req_txp_cp.type = 0x00;
6222 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6223 sizeof(req_txp_cp), &req_txp_cp);
6224 }
6225
6226 /* Max TX power needs to be read only once per connection */
6227 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6228 req_txp_cp.handle = cpu_to_le16(conn->handle);
6229 req_txp_cp.type = 0x01;
6230 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6231 sizeof(req_txp_cp), &req_txp_cp);
6232 }
6233
6234 err = hci_req_run(&req, conn_info_refresh_complete);
6235 if (err < 0)
6236 goto unlock;
6237
6238 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6239 data, len);
6240 if (!cmd) {
6241 err = -ENOMEM;
6242 goto unlock;
6243 }
6244
6245 hci_conn_hold(conn);
6246 cmd->user_data = hci_conn_get(conn);
6247 cmd->cmd_complete = conn_info_cmd_complete;
6248
6249 conn->conn_info_timestamp = jiffies;
6250 } else {
6251 /* Cache is valid, just reply with values cached in hci_conn */
6252 rp.rssi = conn->rssi;
6253 rp.tx_power = conn->tx_power;
6254 rp.max_tx_power = conn->max_tx_power;
6255
6256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6257 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6258 }
6259
6260 unlock:
6261 hci_dev_unlock(hdev);
6262 return err;
6263 }
6264
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6265 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6266 {
6267 struct hci_conn *conn = cmd->user_data;
6268 struct mgmt_rp_get_clock_info rp;
6269 struct hci_dev *hdev;
6270 int err;
6271
6272 memset(&rp, 0, sizeof(rp));
6273 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6274
6275 if (status)
6276 goto complete;
6277
6278 hdev = hci_dev_get(cmd->hdev->id);
6279 if (hdev) {
6280 rp.local_clock = cpu_to_le32(hdev->clock);
6281 hci_dev_put(hdev);
6282 }
6283
6284 if (conn) {
6285 rp.piconet_clock = cpu_to_le32(conn->clock);
6286 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6287 }
6288
6289 complete:
6290 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
6291 sizeof(rp));
6292
6293 if (conn) {
6294 hci_conn_drop(conn);
6295 hci_conn_put(conn);
6296 }
6297
6298 return err;
6299 }
6300
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)6301 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6302 {
6303 struct hci_cp_read_clock *hci_cp;
6304 struct mgmt_pending_cmd *cmd;
6305 struct hci_conn *conn;
6306
6307 bt_dev_dbg(hdev, "status %u", status);
6308
6309 hci_dev_lock(hdev);
6310
6311 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6312 if (!hci_cp)
6313 goto unlock;
6314
6315 if (hci_cp->which) {
6316 u16 handle = __le16_to_cpu(hci_cp->handle);
6317 conn = hci_conn_hash_lookup_handle(hdev, handle);
6318 } else {
6319 conn = NULL;
6320 }
6321
6322 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6323 if (!cmd)
6324 goto unlock;
6325
6326 cmd->cmd_complete(cmd, mgmt_status(status));
6327 mgmt_pending_remove(cmd);
6328
6329 unlock:
6330 hci_dev_unlock(hdev);
6331 }
6332
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6333 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6334 u16 len)
6335 {
6336 struct mgmt_cp_get_clock_info *cp = data;
6337 struct mgmt_rp_get_clock_info rp;
6338 struct hci_cp_read_clock hci_cp;
6339 struct mgmt_pending_cmd *cmd;
6340 struct hci_request req;
6341 struct hci_conn *conn;
6342 int err;
6343
6344 bt_dev_dbg(hdev, "sock %p", sk);
6345
6346 memset(&rp, 0, sizeof(rp));
6347 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6348 rp.addr.type = cp->addr.type;
6349
6350 if (cp->addr.type != BDADDR_BREDR)
6351 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6352 MGMT_STATUS_INVALID_PARAMS,
6353 &rp, sizeof(rp));
6354
6355 hci_dev_lock(hdev);
6356
6357 if (!hdev_is_powered(hdev)) {
6358 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6359 MGMT_STATUS_NOT_POWERED, &rp,
6360 sizeof(rp));
6361 goto unlock;
6362 }
6363
6364 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6366 &cp->addr.bdaddr);
6367 if (!conn || conn->state != BT_CONNECTED) {
6368 err = mgmt_cmd_complete(sk, hdev->id,
6369 MGMT_OP_GET_CLOCK_INFO,
6370 MGMT_STATUS_NOT_CONNECTED,
6371 &rp, sizeof(rp));
6372 goto unlock;
6373 }
6374 } else {
6375 conn = NULL;
6376 }
6377
6378 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6379 if (!cmd) {
6380 err = -ENOMEM;
6381 goto unlock;
6382 }
6383
6384 cmd->cmd_complete = clock_info_cmd_complete;
6385
6386 hci_req_init(&req, hdev);
6387
6388 memset(&hci_cp, 0, sizeof(hci_cp));
6389 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6390
6391 if (conn) {
6392 hci_conn_hold(conn);
6393 cmd->user_data = hci_conn_get(conn);
6394
6395 hci_cp.handle = cpu_to_le16(conn->handle);
6396 hci_cp.which = 0x01; /* Piconet clock */
6397 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6398 }
6399
6400 err = hci_req_run(&req, get_clock_info_complete);
6401 if (err < 0)
6402 mgmt_pending_remove(cmd);
6403
6404 unlock:
6405 hci_dev_unlock(hdev);
6406 return err;
6407 }
6408
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6409 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6410 {
6411 struct hci_conn *conn;
6412
6413 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6414 if (!conn)
6415 return false;
6416
6417 if (conn->dst_type != type)
6418 return false;
6419
6420 if (conn->state != BT_CONNECTED)
6421 return false;
6422
6423 return true;
6424 }
6425
6426 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6427 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6428 u8 addr_type, u8 auto_connect)
6429 {
6430 struct hci_conn_params *params;
6431
6432 params = hci_conn_params_add(hdev, addr, addr_type);
6433 if (!params)
6434 return -EIO;
6435
6436 if (params->auto_connect == auto_connect)
6437 return 0;
6438
6439 list_del_init(¶ms->action);
6440
6441 switch (auto_connect) {
6442 case HCI_AUTO_CONN_DISABLED:
6443 case HCI_AUTO_CONN_LINK_LOSS:
6444 /* If auto connect is being disabled when we're trying to
6445 * connect to device, keep connecting.
6446 */
6447 if (params->explicit_connect)
6448 list_add(¶ms->action, &hdev->pend_le_conns);
6449 break;
6450 case HCI_AUTO_CONN_REPORT:
6451 if (params->explicit_connect)
6452 list_add(¶ms->action, &hdev->pend_le_conns);
6453 else
6454 list_add(¶ms->action, &hdev->pend_le_reports);
6455 break;
6456 case HCI_AUTO_CONN_DIRECT:
6457 case HCI_AUTO_CONN_ALWAYS:
6458 if (!is_connected(hdev, addr, addr_type))
6459 list_add(¶ms->action, &hdev->pend_le_conns);
6460 break;
6461 }
6462
6463 params->auto_connect = auto_connect;
6464
6465 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6466 addr, addr_type, auto_connect);
6467
6468 return 0;
6469 }
6470
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6471 static void device_added(struct sock *sk, struct hci_dev *hdev,
6472 bdaddr_t *bdaddr, u8 type, u8 action)
6473 {
6474 struct mgmt_ev_device_added ev;
6475
6476 bacpy(&ev.addr.bdaddr, bdaddr);
6477 ev.addr.type = type;
6478 ev.action = action;
6479
6480 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6481 }
6482
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6483 static int add_device(struct sock *sk, struct hci_dev *hdev,
6484 void *data, u16 len)
6485 {
6486 struct mgmt_cp_add_device *cp = data;
6487 u8 auto_conn, addr_type;
6488 struct hci_conn_params *params;
6489 int err;
6490 u32 current_flags = 0;
6491
6492 bt_dev_dbg(hdev, "sock %p", sk);
6493
6494 if (!bdaddr_type_is_valid(cp->addr.type) ||
6495 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6496 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6497 MGMT_STATUS_INVALID_PARAMS,
6498 &cp->addr, sizeof(cp->addr));
6499
6500 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6501 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6502 MGMT_STATUS_INVALID_PARAMS,
6503 &cp->addr, sizeof(cp->addr));
6504
6505 hci_dev_lock(hdev);
6506
6507 if (cp->addr.type == BDADDR_BREDR) {
6508 /* Only incoming connections action is supported for now */
6509 if (cp->action != 0x01) {
6510 err = mgmt_cmd_complete(sk, hdev->id,
6511 MGMT_OP_ADD_DEVICE,
6512 MGMT_STATUS_INVALID_PARAMS,
6513 &cp->addr, sizeof(cp->addr));
6514 goto unlock;
6515 }
6516
6517 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6518 &cp->addr.bdaddr,
6519 cp->addr.type, 0);
6520 if (err)
6521 goto unlock;
6522
6523 hci_req_update_scan(hdev);
6524
6525 goto added;
6526 }
6527
6528 addr_type = le_addr_type(cp->addr.type);
6529
6530 if (cp->action == 0x02)
6531 auto_conn = HCI_AUTO_CONN_ALWAYS;
6532 else if (cp->action == 0x01)
6533 auto_conn = HCI_AUTO_CONN_DIRECT;
6534 else
6535 auto_conn = HCI_AUTO_CONN_REPORT;
6536
6537 /* Kernel internally uses conn_params with resolvable private
6538 * address, but Add Device allows only identity addresses.
6539 * Make sure it is enforced before calling
6540 * hci_conn_params_lookup.
6541 */
6542 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6543 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6544 MGMT_STATUS_INVALID_PARAMS,
6545 &cp->addr, sizeof(cp->addr));
6546 goto unlock;
6547 }
6548
6549 /* If the connection parameters don't exist for this device,
6550 * they will be created and configured with defaults.
6551 */
6552 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6553 auto_conn) < 0) {
6554 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6555 MGMT_STATUS_FAILED, &cp->addr,
6556 sizeof(cp->addr));
6557 goto unlock;
6558 } else {
6559 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6560 addr_type);
6561 if (params)
6562 current_flags = params->current_flags;
6563 }
6564
6565 hci_update_background_scan(hdev);
6566
6567 added:
6568 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6569 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6570 SUPPORTED_DEVICE_FLAGS(), current_flags);
6571
6572 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6573 MGMT_STATUS_SUCCESS, &cp->addr,
6574 sizeof(cp->addr));
6575
6576 unlock:
6577 hci_dev_unlock(hdev);
6578 return err;
6579 }
6580
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6581 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6582 bdaddr_t *bdaddr, u8 type)
6583 {
6584 struct mgmt_ev_device_removed ev;
6585
6586 bacpy(&ev.addr.bdaddr, bdaddr);
6587 ev.addr.type = type;
6588
6589 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6590 }
6591
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6592 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6593 void *data, u16 len)
6594 {
6595 struct mgmt_cp_remove_device *cp = data;
6596 int err;
6597
6598 bt_dev_dbg(hdev, "sock %p", sk);
6599
6600 hci_dev_lock(hdev);
6601
6602 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6603 struct hci_conn_params *params;
6604 u8 addr_type;
6605
6606 if (!bdaddr_type_is_valid(cp->addr.type)) {
6607 err = mgmt_cmd_complete(sk, hdev->id,
6608 MGMT_OP_REMOVE_DEVICE,
6609 MGMT_STATUS_INVALID_PARAMS,
6610 &cp->addr, sizeof(cp->addr));
6611 goto unlock;
6612 }
6613
6614 if (cp->addr.type == BDADDR_BREDR) {
6615 err = hci_bdaddr_list_del(&hdev->accept_list,
6616 &cp->addr.bdaddr,
6617 cp->addr.type);
6618 if (err) {
6619 err = mgmt_cmd_complete(sk, hdev->id,
6620 MGMT_OP_REMOVE_DEVICE,
6621 MGMT_STATUS_INVALID_PARAMS,
6622 &cp->addr,
6623 sizeof(cp->addr));
6624 goto unlock;
6625 }
6626
6627 hci_req_update_scan(hdev);
6628
6629 device_removed(sk, hdev, &cp->addr.bdaddr,
6630 cp->addr.type);
6631 goto complete;
6632 }
6633
6634 addr_type = le_addr_type(cp->addr.type);
6635
6636 /* Kernel internally uses conn_params with resolvable private
6637 * address, but Remove Device allows only identity addresses.
6638 * Make sure it is enforced before calling
6639 * hci_conn_params_lookup.
6640 */
6641 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6642 err = mgmt_cmd_complete(sk, hdev->id,
6643 MGMT_OP_REMOVE_DEVICE,
6644 MGMT_STATUS_INVALID_PARAMS,
6645 &cp->addr, sizeof(cp->addr));
6646 goto unlock;
6647 }
6648
6649 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6650 addr_type);
6651 if (!params) {
6652 err = mgmt_cmd_complete(sk, hdev->id,
6653 MGMT_OP_REMOVE_DEVICE,
6654 MGMT_STATUS_INVALID_PARAMS,
6655 &cp->addr, sizeof(cp->addr));
6656 goto unlock;
6657 }
6658
6659 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6660 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6661 err = mgmt_cmd_complete(sk, hdev->id,
6662 MGMT_OP_REMOVE_DEVICE,
6663 MGMT_STATUS_INVALID_PARAMS,
6664 &cp->addr, sizeof(cp->addr));
6665 goto unlock;
6666 }
6667
6668 list_del(¶ms->action);
6669 list_del(¶ms->list);
6670 kfree(params);
6671 hci_update_background_scan(hdev);
6672
6673 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6674 } else {
6675 struct hci_conn_params *p, *tmp;
6676 struct bdaddr_list *b, *btmp;
6677
6678 if (cp->addr.type) {
6679 err = mgmt_cmd_complete(sk, hdev->id,
6680 MGMT_OP_REMOVE_DEVICE,
6681 MGMT_STATUS_INVALID_PARAMS,
6682 &cp->addr, sizeof(cp->addr));
6683 goto unlock;
6684 }
6685
6686 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6687 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6688 list_del(&b->list);
6689 kfree(b);
6690 }
6691
6692 hci_req_update_scan(hdev);
6693
6694 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6695 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6696 continue;
6697 device_removed(sk, hdev, &p->addr, p->addr_type);
6698 if (p->explicit_connect) {
6699 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6700 continue;
6701 }
6702 list_del(&p->action);
6703 list_del(&p->list);
6704 kfree(p);
6705 }
6706
6707 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6708
6709 hci_update_background_scan(hdev);
6710 }
6711
6712 complete:
6713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6714 MGMT_STATUS_SUCCESS, &cp->addr,
6715 sizeof(cp->addr));
6716 unlock:
6717 hci_dev_unlock(hdev);
6718 return err;
6719 }
6720
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6721 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6722 u16 len)
6723 {
6724 struct mgmt_cp_load_conn_param *cp = data;
6725 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6726 sizeof(struct mgmt_conn_param));
6727 u16 param_count, expected_len;
6728 int i;
6729
6730 if (!lmp_le_capable(hdev))
6731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6732 MGMT_STATUS_NOT_SUPPORTED);
6733
6734 param_count = __le16_to_cpu(cp->param_count);
6735 if (param_count > max_param_count) {
6736 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6737 param_count);
6738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6739 MGMT_STATUS_INVALID_PARAMS);
6740 }
6741
6742 expected_len = struct_size(cp, params, param_count);
6743 if (expected_len != len) {
6744 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6745 expected_len, len);
6746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6747 MGMT_STATUS_INVALID_PARAMS);
6748 }
6749
6750 bt_dev_dbg(hdev, "param_count %u", param_count);
6751
6752 hci_dev_lock(hdev);
6753
6754 hci_conn_params_clear_disabled(hdev);
6755
6756 for (i = 0; i < param_count; i++) {
6757 struct mgmt_conn_param *param = &cp->params[i];
6758 struct hci_conn_params *hci_param;
6759 u16 min, max, latency, timeout;
6760 u8 addr_type;
6761
6762 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6763 param->addr.type);
6764
6765 if (param->addr.type == BDADDR_LE_PUBLIC) {
6766 addr_type = ADDR_LE_DEV_PUBLIC;
6767 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6768 addr_type = ADDR_LE_DEV_RANDOM;
6769 } else {
6770 bt_dev_err(hdev, "ignoring invalid connection parameters");
6771 continue;
6772 }
6773
6774 min = le16_to_cpu(param->min_interval);
6775 max = le16_to_cpu(param->max_interval);
6776 latency = le16_to_cpu(param->latency);
6777 timeout = le16_to_cpu(param->timeout);
6778
6779 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6780 min, max, latency, timeout);
6781
6782 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6783 bt_dev_err(hdev, "ignoring invalid connection parameters");
6784 continue;
6785 }
6786
6787 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6788 addr_type);
6789 if (!hci_param) {
6790 bt_dev_err(hdev, "failed to add connection parameters");
6791 continue;
6792 }
6793
6794 hci_param->conn_min_interval = min;
6795 hci_param->conn_max_interval = max;
6796 hci_param->conn_latency = latency;
6797 hci_param->supervision_timeout = timeout;
6798 }
6799
6800 hci_dev_unlock(hdev);
6801
6802 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6803 NULL, 0);
6804 }
6805
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6806 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6807 void *data, u16 len)
6808 {
6809 struct mgmt_cp_set_external_config *cp = data;
6810 bool changed;
6811 int err;
6812
6813 bt_dev_dbg(hdev, "sock %p", sk);
6814
6815 if (hdev_is_powered(hdev))
6816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6817 MGMT_STATUS_REJECTED);
6818
6819 if (cp->config != 0x00 && cp->config != 0x01)
6820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6821 MGMT_STATUS_INVALID_PARAMS);
6822
6823 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6825 MGMT_STATUS_NOT_SUPPORTED);
6826
6827 hci_dev_lock(hdev);
6828
6829 if (cp->config)
6830 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6831 else
6832 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6833
6834 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6835 if (err < 0)
6836 goto unlock;
6837
6838 if (!changed)
6839 goto unlock;
6840
6841 err = new_options(hdev, sk);
6842
6843 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6844 mgmt_index_removed(hdev);
6845
6846 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6847 hci_dev_set_flag(hdev, HCI_CONFIG);
6848 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6849
6850 queue_work(hdev->req_workqueue, &hdev->power_on);
6851 } else {
6852 set_bit(HCI_RAW, &hdev->flags);
6853 mgmt_index_added(hdev);
6854 }
6855 }
6856
6857 unlock:
6858 hci_dev_unlock(hdev);
6859 return err;
6860 }
6861
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6862 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6863 void *data, u16 len)
6864 {
6865 struct mgmt_cp_set_public_address *cp = data;
6866 bool changed;
6867 int err;
6868
6869 bt_dev_dbg(hdev, "sock %p", sk);
6870
6871 if (hdev_is_powered(hdev))
6872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6873 MGMT_STATUS_REJECTED);
6874
6875 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6877 MGMT_STATUS_INVALID_PARAMS);
6878
6879 if (!hdev->set_bdaddr)
6880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6881 MGMT_STATUS_NOT_SUPPORTED);
6882
6883 hci_dev_lock(hdev);
6884
6885 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6886 bacpy(&hdev->public_addr, &cp->bdaddr);
6887
6888 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6889 if (err < 0)
6890 goto unlock;
6891
6892 if (!changed)
6893 goto unlock;
6894
6895 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6896 err = new_options(hdev, sk);
6897
6898 if (is_configured(hdev)) {
6899 mgmt_index_removed(hdev);
6900
6901 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6902
6903 hci_dev_set_flag(hdev, HCI_CONFIG);
6904 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6905
6906 queue_work(hdev->req_workqueue, &hdev->power_on);
6907 }
6908
6909 unlock:
6910 hci_dev_unlock(hdev);
6911 return err;
6912 }
6913
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6914 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6915 u16 opcode, struct sk_buff *skb)
6916 {
6917 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6918 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6919 u8 *h192, *r192, *h256, *r256;
6920 struct mgmt_pending_cmd *cmd;
6921 u16 eir_len;
6922 int err;
6923
6924 bt_dev_dbg(hdev, "status %u", status);
6925
6926 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6927 if (!cmd)
6928 return;
6929
6930 mgmt_cp = cmd->param;
6931
6932 if (status) {
6933 status = mgmt_status(status);
6934 eir_len = 0;
6935
6936 h192 = NULL;
6937 r192 = NULL;
6938 h256 = NULL;
6939 r256 = NULL;
6940 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6941 struct hci_rp_read_local_oob_data *rp;
6942
6943 if (skb->len != sizeof(*rp)) {
6944 status = MGMT_STATUS_FAILED;
6945 eir_len = 0;
6946 } else {
6947 status = MGMT_STATUS_SUCCESS;
6948 rp = (void *)skb->data;
6949
6950 eir_len = 5 + 18 + 18;
6951 h192 = rp->hash;
6952 r192 = rp->rand;
6953 h256 = NULL;
6954 r256 = NULL;
6955 }
6956 } else {
6957 struct hci_rp_read_local_oob_ext_data *rp;
6958
6959 if (skb->len != sizeof(*rp)) {
6960 status = MGMT_STATUS_FAILED;
6961 eir_len = 0;
6962 } else {
6963 status = MGMT_STATUS_SUCCESS;
6964 rp = (void *)skb->data;
6965
6966 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6967 eir_len = 5 + 18 + 18;
6968 h192 = NULL;
6969 r192 = NULL;
6970 } else {
6971 eir_len = 5 + 18 + 18 + 18 + 18;
6972 h192 = rp->hash192;
6973 r192 = rp->rand192;
6974 }
6975
6976 h256 = rp->hash256;
6977 r256 = rp->rand256;
6978 }
6979 }
6980
6981 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6982 if (!mgmt_rp)
6983 goto done;
6984
6985 if (status)
6986 goto send_rsp;
6987
6988 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6989 hdev->dev_class, 3);
6990
6991 if (h192 && r192) {
6992 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6993 EIR_SSP_HASH_C192, h192, 16);
6994 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6995 EIR_SSP_RAND_R192, r192, 16);
6996 }
6997
6998 if (h256 && r256) {
6999 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7000 EIR_SSP_HASH_C256, h256, 16);
7001 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7002 EIR_SSP_RAND_R256, r256, 16);
7003 }
7004
7005 send_rsp:
7006 mgmt_rp->type = mgmt_cp->type;
7007 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7008
7009 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7010 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7011 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7012 if (err < 0 || status)
7013 goto done;
7014
7015 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7016
7017 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7018 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7019 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7020 done:
7021 kfree(mgmt_rp);
7022 mgmt_pending_remove(cmd);
7023 }
7024
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7025 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7026 struct mgmt_cp_read_local_oob_ext_data *cp)
7027 {
7028 struct mgmt_pending_cmd *cmd;
7029 struct hci_request req;
7030 int err;
7031
7032 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7033 cp, sizeof(*cp));
7034 if (!cmd)
7035 return -ENOMEM;
7036
7037 hci_req_init(&req, hdev);
7038
7039 if (bredr_sc_enabled(hdev))
7040 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7041 else
7042 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7043
7044 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7045 if (err < 0) {
7046 mgmt_pending_remove(cmd);
7047 return err;
7048 }
7049
7050 return 0;
7051 }
7052
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7053 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7054 void *data, u16 data_len)
7055 {
7056 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7057 struct mgmt_rp_read_local_oob_ext_data *rp;
7058 size_t rp_len;
7059 u16 eir_len;
7060 u8 status, flags, role, addr[7], hash[16], rand[16];
7061 int err;
7062
7063 bt_dev_dbg(hdev, "sock %p", sk);
7064
7065 if (hdev_is_powered(hdev)) {
7066 switch (cp->type) {
7067 case BIT(BDADDR_BREDR):
7068 status = mgmt_bredr_support(hdev);
7069 if (status)
7070 eir_len = 0;
7071 else
7072 eir_len = 5;
7073 break;
7074 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7075 status = mgmt_le_support(hdev);
7076 if (status)
7077 eir_len = 0;
7078 else
7079 eir_len = 9 + 3 + 18 + 18 + 3;
7080 break;
7081 default:
7082 status = MGMT_STATUS_INVALID_PARAMS;
7083 eir_len = 0;
7084 break;
7085 }
7086 } else {
7087 status = MGMT_STATUS_NOT_POWERED;
7088 eir_len = 0;
7089 }
7090
7091 rp_len = sizeof(*rp) + eir_len;
7092 rp = kmalloc(rp_len, GFP_ATOMIC);
7093 if (!rp)
7094 return -ENOMEM;
7095
7096 if (status)
7097 goto complete;
7098
7099 hci_dev_lock(hdev);
7100
7101 eir_len = 0;
7102 switch (cp->type) {
7103 case BIT(BDADDR_BREDR):
7104 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7105 err = read_local_ssp_oob_req(hdev, sk, cp);
7106 hci_dev_unlock(hdev);
7107 if (!err)
7108 goto done;
7109
7110 status = MGMT_STATUS_FAILED;
7111 goto complete;
7112 } else {
7113 eir_len = eir_append_data(rp->eir, eir_len,
7114 EIR_CLASS_OF_DEV,
7115 hdev->dev_class, 3);
7116 }
7117 break;
7118 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7119 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7120 smp_generate_oob(hdev, hash, rand) < 0) {
7121 hci_dev_unlock(hdev);
7122 status = MGMT_STATUS_FAILED;
7123 goto complete;
7124 }
7125
7126 /* This should return the active RPA, but since the RPA
7127 * is only programmed on demand, it is really hard to fill
7128 * this in at the moment. For now disallow retrieving
7129 * local out-of-band data when privacy is in use.
7130 *
7131 * Returning the identity address will not help here since
7132 * pairing happens before the identity resolving key is
7133 * known and thus the connection establishment happens
7134 * based on the RPA and not the identity address.
7135 */
7136 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7137 hci_dev_unlock(hdev);
7138 status = MGMT_STATUS_REJECTED;
7139 goto complete;
7140 }
7141
7142 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7143 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7144 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7145 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7146 memcpy(addr, &hdev->static_addr, 6);
7147 addr[6] = 0x01;
7148 } else {
7149 memcpy(addr, &hdev->bdaddr, 6);
7150 addr[6] = 0x00;
7151 }
7152
7153 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7154 addr, sizeof(addr));
7155
7156 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7157 role = 0x02;
7158 else
7159 role = 0x01;
7160
7161 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7162 &role, sizeof(role));
7163
7164 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7165 eir_len = eir_append_data(rp->eir, eir_len,
7166 EIR_LE_SC_CONFIRM,
7167 hash, sizeof(hash));
7168
7169 eir_len = eir_append_data(rp->eir, eir_len,
7170 EIR_LE_SC_RANDOM,
7171 rand, sizeof(rand));
7172 }
7173
7174 flags = mgmt_get_adv_discov_flags(hdev);
7175
7176 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7177 flags |= LE_AD_NO_BREDR;
7178
7179 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7180 &flags, sizeof(flags));
7181 break;
7182 }
7183
7184 hci_dev_unlock(hdev);
7185
7186 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7187
7188 status = MGMT_STATUS_SUCCESS;
7189
7190 complete:
7191 rp->type = cp->type;
7192 rp->eir_len = cpu_to_le16(eir_len);
7193
7194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7195 status, rp, sizeof(*rp) + eir_len);
7196 if (err < 0 || status)
7197 goto done;
7198
7199 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7200 rp, sizeof(*rp) + eir_len,
7201 HCI_MGMT_OOB_DATA_EVENTS, sk);
7202
7203 done:
7204 kfree(rp);
7205
7206 return err;
7207 }
7208
get_supported_adv_flags(struct hci_dev * hdev)7209 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7210 {
7211 u32 flags = 0;
7212
7213 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7214 flags |= MGMT_ADV_FLAG_DISCOV;
7215 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7216 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7217 flags |= MGMT_ADV_FLAG_APPEARANCE;
7218 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7219
7220 /* In extended adv TX_POWER returned from Set Adv Param
7221 * will be always valid.
7222 */
7223 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7224 ext_adv_capable(hdev))
7225 flags |= MGMT_ADV_FLAG_TX_POWER;
7226
7227 if (ext_adv_capable(hdev)) {
7228 flags |= MGMT_ADV_FLAG_SEC_1M;
7229 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7230 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7231
7232 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7233 flags |= MGMT_ADV_FLAG_SEC_2M;
7234
7235 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7236 flags |= MGMT_ADV_FLAG_SEC_CODED;
7237 }
7238
7239 return flags;
7240 }
7241
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7242 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7243 void *data, u16 data_len)
7244 {
7245 struct mgmt_rp_read_adv_features *rp;
7246 size_t rp_len;
7247 int err;
7248 struct adv_info *adv_instance;
7249 u32 supported_flags;
7250 u8 *instance;
7251
7252 bt_dev_dbg(hdev, "sock %p", sk);
7253
7254 if (!lmp_le_capable(hdev))
7255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7256 MGMT_STATUS_REJECTED);
7257
7258 /* Enabling the experimental LL Privay support disables support for
7259 * advertising.
7260 */
7261 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7263 MGMT_STATUS_NOT_SUPPORTED);
7264
7265 hci_dev_lock(hdev);
7266
7267 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7268 rp = kmalloc(rp_len, GFP_ATOMIC);
7269 if (!rp) {
7270 hci_dev_unlock(hdev);
7271 return -ENOMEM;
7272 }
7273
7274 supported_flags = get_supported_adv_flags(hdev);
7275
7276 rp->supported_flags = cpu_to_le32(supported_flags);
7277 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7278 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7279 rp->max_instances = hdev->le_num_of_adv_sets;
7280 rp->num_instances = hdev->adv_instance_cnt;
7281
7282 instance = rp->instance;
7283 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7284 *instance = adv_instance->instance;
7285 instance++;
7286 }
7287
7288 hci_dev_unlock(hdev);
7289
7290 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7291 MGMT_STATUS_SUCCESS, rp, rp_len);
7292
7293 kfree(rp);
7294
7295 return err;
7296 }
7297
calculate_name_len(struct hci_dev * hdev)7298 static u8 calculate_name_len(struct hci_dev *hdev)
7299 {
7300 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7301
7302 return append_local_name(hdev, buf, 0);
7303 }
7304
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7305 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7306 bool is_adv_data)
7307 {
7308 u8 max_len = HCI_MAX_AD_LENGTH;
7309
7310 if (is_adv_data) {
7311 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7312 MGMT_ADV_FLAG_LIMITED_DISCOV |
7313 MGMT_ADV_FLAG_MANAGED_FLAGS))
7314 max_len -= 3;
7315
7316 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7317 max_len -= 3;
7318 } else {
7319 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7320 max_len -= calculate_name_len(hdev);
7321
7322 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7323 max_len -= 4;
7324 }
7325
7326 return max_len;
7327 }
7328
flags_managed(u32 adv_flags)7329 static bool flags_managed(u32 adv_flags)
7330 {
7331 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7332 MGMT_ADV_FLAG_LIMITED_DISCOV |
7333 MGMT_ADV_FLAG_MANAGED_FLAGS);
7334 }
7335
tx_power_managed(u32 adv_flags)7336 static bool tx_power_managed(u32 adv_flags)
7337 {
7338 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7339 }
7340
name_managed(u32 adv_flags)7341 static bool name_managed(u32 adv_flags)
7342 {
7343 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7344 }
7345
appearance_managed(u32 adv_flags)7346 static bool appearance_managed(u32 adv_flags)
7347 {
7348 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7349 }
7350
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7351 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7352 u8 len, bool is_adv_data)
7353 {
7354 int i, cur_len;
7355 u8 max_len;
7356
7357 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7358
7359 if (len > max_len)
7360 return false;
7361
7362 /* Make sure that the data is correctly formatted. */
7363 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7364 cur_len = data[i];
7365
7366 if (!cur_len)
7367 continue;
7368
7369 if (data[i + 1] == EIR_FLAGS &&
7370 (!is_adv_data || flags_managed(adv_flags)))
7371 return false;
7372
7373 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7374 return false;
7375
7376 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7377 return false;
7378
7379 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7380 return false;
7381
7382 if (data[i + 1] == EIR_APPEARANCE &&
7383 appearance_managed(adv_flags))
7384 return false;
7385
7386 /* If the current field length would exceed the total data
7387 * length, then it's invalid.
7388 */
7389 if (i + cur_len >= len)
7390 return false;
7391 }
7392
7393 return true;
7394 }
7395
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7396 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7397 u16 opcode)
7398 {
7399 struct mgmt_pending_cmd *cmd;
7400 struct mgmt_cp_add_advertising *cp;
7401 struct mgmt_rp_add_advertising rp;
7402 struct adv_info *adv_instance, *n;
7403 u8 instance;
7404
7405 bt_dev_dbg(hdev, "status %d", status);
7406
7407 hci_dev_lock(hdev);
7408
7409 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7410
7411 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7412 if (!adv_instance->pending)
7413 continue;
7414
7415 if (!status) {
7416 adv_instance->pending = false;
7417 continue;
7418 }
7419
7420 instance = adv_instance->instance;
7421
7422 if (hdev->cur_adv_instance == instance)
7423 cancel_adv_timeout(hdev);
7424
7425 hci_remove_adv_instance(hdev, instance);
7426 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7427 }
7428
7429 if (!cmd)
7430 goto unlock;
7431
7432 cp = cmd->param;
7433 rp.instance = cp->instance;
7434
7435 if (status)
7436 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
7437 mgmt_status(status));
7438 else
7439 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
7440 mgmt_status(status), &rp, sizeof(rp));
7441
7442 mgmt_pending_remove(cmd);
7443
7444 unlock:
7445 hci_dev_unlock(hdev);
7446 }
7447
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7448 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7449 void *data, u16 data_len)
7450 {
7451 struct mgmt_cp_add_advertising *cp = data;
7452 struct mgmt_rp_add_advertising rp;
7453 u32 flags;
7454 u32 supported_flags, phy_flags;
7455 u8 status;
7456 u16 timeout, duration;
7457 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7458 u8 schedule_instance = 0;
7459 struct adv_info *next_instance;
7460 int err;
7461 struct mgmt_pending_cmd *cmd;
7462 struct hci_request req;
7463
7464 bt_dev_dbg(hdev, "sock %p", sk);
7465
7466 status = mgmt_le_support(hdev);
7467 if (status)
7468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7469 status);
7470
7471 /* Enabling the experimental LL Privay support disables support for
7472 * advertising.
7473 */
7474 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7476 MGMT_STATUS_NOT_SUPPORTED);
7477
7478 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7480 MGMT_STATUS_INVALID_PARAMS);
7481
7482 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7484 MGMT_STATUS_INVALID_PARAMS);
7485
7486 flags = __le32_to_cpu(cp->flags);
7487 timeout = __le16_to_cpu(cp->timeout);
7488 duration = __le16_to_cpu(cp->duration);
7489
7490 /* The current implementation only supports a subset of the specified
7491 * flags. Also need to check mutual exclusiveness of sec flags.
7492 */
7493 supported_flags = get_supported_adv_flags(hdev);
7494 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7495 if (flags & ~supported_flags ||
7496 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7498 MGMT_STATUS_INVALID_PARAMS);
7499
7500 hci_dev_lock(hdev);
7501
7502 if (timeout && !hdev_is_powered(hdev)) {
7503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7504 MGMT_STATUS_REJECTED);
7505 goto unlock;
7506 }
7507
7508 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7509 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7510 pending_find(MGMT_OP_SET_LE, hdev)) {
7511 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7512 MGMT_STATUS_BUSY);
7513 goto unlock;
7514 }
7515
7516 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7517 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7518 cp->scan_rsp_len, false)) {
7519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7520 MGMT_STATUS_INVALID_PARAMS);
7521 goto unlock;
7522 }
7523
7524 err = hci_add_adv_instance(hdev, cp->instance, flags,
7525 cp->adv_data_len, cp->data,
7526 cp->scan_rsp_len,
7527 cp->data + cp->adv_data_len,
7528 timeout, duration);
7529 if (err < 0) {
7530 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7531 MGMT_STATUS_FAILED);
7532 goto unlock;
7533 }
7534
7535 /* Only trigger an advertising added event if a new instance was
7536 * actually added.
7537 */
7538 if (hdev->adv_instance_cnt > prev_instance_cnt)
7539 mgmt_advertising_added(sk, hdev, cp->instance);
7540
7541 if (hdev->cur_adv_instance == cp->instance) {
7542 /* If the currently advertised instance is being changed then
7543 * cancel the current advertising and schedule the next
7544 * instance. If there is only one instance then the overridden
7545 * advertising data will be visible right away.
7546 */
7547 cancel_adv_timeout(hdev);
7548
7549 next_instance = hci_get_next_instance(hdev, cp->instance);
7550 if (next_instance)
7551 schedule_instance = next_instance->instance;
7552 } else if (!hdev->adv_instance_timeout) {
7553 /* Immediately advertise the new instance if no other
7554 * instance is currently being advertised.
7555 */
7556 schedule_instance = cp->instance;
7557 }
7558
7559 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7560 * there is no instance to be advertised then we have no HCI
7561 * communication to make. Simply return.
7562 */
7563 if (!hdev_is_powered(hdev) ||
7564 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7565 !schedule_instance) {
7566 rp.instance = cp->instance;
7567 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7568 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7569 goto unlock;
7570 }
7571
7572 /* We're good to go, update advertising data, parameters, and start
7573 * advertising.
7574 */
7575 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7576 data_len);
7577 if (!cmd) {
7578 err = -ENOMEM;
7579 goto unlock;
7580 }
7581
7582 hci_req_init(&req, hdev);
7583
7584 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7585
7586 if (!err)
7587 err = hci_req_run(&req, add_advertising_complete);
7588
7589 if (err < 0) {
7590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7591 MGMT_STATUS_FAILED);
7592 mgmt_pending_remove(cmd);
7593 }
7594
7595 unlock:
7596 hci_dev_unlock(hdev);
7597
7598 return err;
7599 }
7600
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7601 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7602 u16 opcode)
7603 {
7604 struct mgmt_pending_cmd *cmd;
7605 struct mgmt_cp_remove_advertising *cp;
7606 struct mgmt_rp_remove_advertising rp;
7607
7608 bt_dev_dbg(hdev, "status %d", status);
7609
7610 hci_dev_lock(hdev);
7611
7612 /* A failure status here only means that we failed to disable
7613 * advertising. Otherwise, the advertising instance has been removed,
7614 * so report success.
7615 */
7616 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7617 if (!cmd)
7618 goto unlock;
7619
7620 cp = cmd->param;
7621 rp.instance = cp->instance;
7622
7623 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, MGMT_STATUS_SUCCESS,
7624 &rp, sizeof(rp));
7625 mgmt_pending_remove(cmd);
7626
7627 unlock:
7628 hci_dev_unlock(hdev);
7629 }
7630
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7631 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7632 void *data, u16 data_len)
7633 {
7634 struct mgmt_cp_remove_advertising *cp = data;
7635 struct mgmt_rp_remove_advertising rp;
7636 struct mgmt_pending_cmd *cmd;
7637 struct hci_request req;
7638 int err;
7639
7640 bt_dev_dbg(hdev, "sock %p", sk);
7641
7642 /* Enabling the experimental LL Privay support disables support for
7643 * advertising.
7644 */
7645 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7647 MGMT_STATUS_NOT_SUPPORTED);
7648
7649 hci_dev_lock(hdev);
7650
7651 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7652 err = mgmt_cmd_status(sk, hdev->id,
7653 MGMT_OP_REMOVE_ADVERTISING,
7654 MGMT_STATUS_INVALID_PARAMS);
7655 goto unlock;
7656 }
7657
7658 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7659 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7660 pending_find(MGMT_OP_SET_LE, hdev)) {
7661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7662 MGMT_STATUS_BUSY);
7663 goto unlock;
7664 }
7665
7666 if (list_empty(&hdev->adv_instances)) {
7667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7668 MGMT_STATUS_INVALID_PARAMS);
7669 goto unlock;
7670 }
7671
7672 hci_req_init(&req, hdev);
7673
7674 /* If we use extended advertising, instance is disabled and removed */
7675 if (ext_adv_capable(hdev)) {
7676 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7677 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7678 }
7679
7680 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7681
7682 if (list_empty(&hdev->adv_instances))
7683 __hci_req_disable_advertising(&req);
7684
7685 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7686 * flag is set or the device isn't powered then we have no HCI
7687 * communication to make. Simply return.
7688 */
7689 if (skb_queue_empty(&req.cmd_q) ||
7690 !hdev_is_powered(hdev) ||
7691 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7692 hci_req_purge(&req);
7693 rp.instance = cp->instance;
7694 err = mgmt_cmd_complete(sk, hdev->id,
7695 MGMT_OP_REMOVE_ADVERTISING,
7696 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7697 goto unlock;
7698 }
7699
7700 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7701 data_len);
7702 if (!cmd) {
7703 err = -ENOMEM;
7704 goto unlock;
7705 }
7706
7707 err = hci_req_run(&req, remove_advertising_complete);
7708 if (err < 0)
7709 mgmt_pending_remove(cmd);
7710
7711 unlock:
7712 hci_dev_unlock(hdev);
7713
7714 return err;
7715 }
7716
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7717 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7718 void *data, u16 data_len)
7719 {
7720 struct mgmt_cp_get_adv_size_info *cp = data;
7721 struct mgmt_rp_get_adv_size_info rp;
7722 u32 flags, supported_flags;
7723 int err;
7724
7725 bt_dev_dbg(hdev, "sock %p", sk);
7726
7727 if (!lmp_le_capable(hdev))
7728 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7729 MGMT_STATUS_REJECTED);
7730
7731 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7732 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7733 MGMT_STATUS_INVALID_PARAMS);
7734
7735 flags = __le32_to_cpu(cp->flags);
7736
7737 /* The current implementation only supports a subset of the specified
7738 * flags.
7739 */
7740 supported_flags = get_supported_adv_flags(hdev);
7741 if (flags & ~supported_flags)
7742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7743 MGMT_STATUS_INVALID_PARAMS);
7744
7745 rp.instance = cp->instance;
7746 rp.flags = cp->flags;
7747 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7748 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7749
7750 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7751 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7752
7753 return err;
7754 }
7755
7756 static const struct hci_mgmt_handler mgmt_handlers[] = {
7757 { NULL }, /* 0x0000 (no command) */
7758 { read_version, MGMT_READ_VERSION_SIZE,
7759 HCI_MGMT_NO_HDEV |
7760 HCI_MGMT_UNTRUSTED },
7761 { read_commands, MGMT_READ_COMMANDS_SIZE,
7762 HCI_MGMT_NO_HDEV |
7763 HCI_MGMT_UNTRUSTED },
7764 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7765 HCI_MGMT_NO_HDEV |
7766 HCI_MGMT_UNTRUSTED },
7767 { read_controller_info, MGMT_READ_INFO_SIZE,
7768 HCI_MGMT_UNTRUSTED },
7769 { set_powered, MGMT_SETTING_SIZE },
7770 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7771 { set_connectable, MGMT_SETTING_SIZE },
7772 { set_fast_connectable, MGMT_SETTING_SIZE },
7773 { set_bondable, MGMT_SETTING_SIZE },
7774 { set_link_security, MGMT_SETTING_SIZE },
7775 { set_ssp, MGMT_SETTING_SIZE },
7776 { set_hs, MGMT_SETTING_SIZE },
7777 { set_le, MGMT_SETTING_SIZE },
7778 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7779 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7780 { add_uuid, MGMT_ADD_UUID_SIZE },
7781 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7782 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7783 HCI_MGMT_VAR_LEN },
7784 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7785 HCI_MGMT_VAR_LEN },
7786 { disconnect, MGMT_DISCONNECT_SIZE },
7787 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7788 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7789 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7790 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7791 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7792 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7793 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7794 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7795 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7796 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7797 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7798 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7799 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7800 HCI_MGMT_VAR_LEN },
7801 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7802 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7803 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7804 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7805 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7806 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7807 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7808 { set_advertising, MGMT_SETTING_SIZE },
7809 { set_bredr, MGMT_SETTING_SIZE },
7810 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7811 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7812 { set_secure_conn, MGMT_SETTING_SIZE },
7813 { set_debug_keys, MGMT_SETTING_SIZE },
7814 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7815 { load_irks, MGMT_LOAD_IRKS_SIZE,
7816 HCI_MGMT_VAR_LEN },
7817 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7818 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7819 { add_device, MGMT_ADD_DEVICE_SIZE },
7820 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7821 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7822 HCI_MGMT_VAR_LEN },
7823 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7824 HCI_MGMT_NO_HDEV |
7825 HCI_MGMT_UNTRUSTED },
7826 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7827 HCI_MGMT_UNCONFIGURED |
7828 HCI_MGMT_UNTRUSTED },
7829 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7830 HCI_MGMT_UNCONFIGURED },
7831 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7832 HCI_MGMT_UNCONFIGURED },
7833 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7834 HCI_MGMT_VAR_LEN },
7835 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7836 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7837 HCI_MGMT_NO_HDEV |
7838 HCI_MGMT_UNTRUSTED },
7839 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7840 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7841 HCI_MGMT_VAR_LEN },
7842 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7843 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7844 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7845 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7846 HCI_MGMT_UNTRUSTED },
7847 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7848 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7849 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7850 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7851 HCI_MGMT_VAR_LEN },
7852 { set_wideband_speech, MGMT_SETTING_SIZE },
7853 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7854 HCI_MGMT_UNTRUSTED },
7855 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7856 HCI_MGMT_UNTRUSTED |
7857 HCI_MGMT_HDEV_OPTIONAL },
7858 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7859 HCI_MGMT_VAR_LEN |
7860 HCI_MGMT_HDEV_OPTIONAL },
7861 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7862 HCI_MGMT_UNTRUSTED },
7863 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7864 HCI_MGMT_VAR_LEN },
7865 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7866 HCI_MGMT_UNTRUSTED },
7867 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7868 HCI_MGMT_VAR_LEN },
7869 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7870 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7871 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7872 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7873 HCI_MGMT_VAR_LEN },
7874 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7875 };
7876
mgmt_index_added(struct hci_dev * hdev)7877 void mgmt_index_added(struct hci_dev *hdev)
7878 {
7879 struct mgmt_ev_ext_index ev;
7880
7881 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7882 return;
7883
7884 switch (hdev->dev_type) {
7885 case HCI_PRIMARY:
7886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7887 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7888 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7889 ev.type = 0x01;
7890 } else {
7891 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7892 HCI_MGMT_INDEX_EVENTS);
7893 ev.type = 0x00;
7894 }
7895 break;
7896 case HCI_AMP:
7897 ev.type = 0x02;
7898 break;
7899 default:
7900 return;
7901 }
7902
7903 ev.bus = hdev->bus;
7904
7905 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7906 HCI_MGMT_EXT_INDEX_EVENTS);
7907 }
7908
mgmt_index_removed(struct hci_dev * hdev)7909 void mgmt_index_removed(struct hci_dev *hdev)
7910 {
7911 struct mgmt_ev_ext_index ev;
7912 u8 status = MGMT_STATUS_INVALID_INDEX;
7913
7914 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7915 return;
7916
7917 switch (hdev->dev_type) {
7918 case HCI_PRIMARY:
7919 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &status);
7920
7921 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7922 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7923 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7924 ev.type = 0x01;
7925 } else {
7926 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7927 HCI_MGMT_INDEX_EVENTS);
7928 ev.type = 0x00;
7929 }
7930 break;
7931 case HCI_AMP:
7932 ev.type = 0x02;
7933 break;
7934 default:
7935 return;
7936 }
7937
7938 ev.bus = hdev->bus;
7939
7940 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7941 HCI_MGMT_EXT_INDEX_EVENTS);
7942 }
7943
7944 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)7945 static void restart_le_actions(struct hci_dev *hdev)
7946 {
7947 struct hci_conn_params *p;
7948
7949 list_for_each_entry(p, &hdev->le_conn_params, list) {
7950 /* Needed for AUTO_OFF case where might not "really"
7951 * have been powered off.
7952 */
7953 list_del_init(&p->action);
7954
7955 switch (p->auto_connect) {
7956 case HCI_AUTO_CONN_DIRECT:
7957 case HCI_AUTO_CONN_ALWAYS:
7958 list_add(&p->action, &hdev->pend_le_conns);
7959 break;
7960 case HCI_AUTO_CONN_REPORT:
7961 list_add(&p->action, &hdev->pend_le_reports);
7962 break;
7963 default:
7964 break;
7965 }
7966 }
7967 }
7968
mgmt_power_on(struct hci_dev * hdev,int err)7969 void mgmt_power_on(struct hci_dev *hdev, int err)
7970 {
7971 struct cmd_lookup match = { NULL, hdev };
7972
7973 bt_dev_dbg(hdev, "err %d", err);
7974
7975 hci_dev_lock(hdev);
7976
7977 if (!err) {
7978 restart_le_actions(hdev);
7979 hci_update_background_scan(hdev);
7980 }
7981
7982 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
7983 &match);
7984
7985 new_settings(hdev, match.sk);
7986
7987 if (match.sk)
7988 sock_put(match.sk);
7989
7990 hci_dev_unlock(hdev);
7991 }
7992
__mgmt_power_off(struct hci_dev * hdev)7993 void __mgmt_power_off(struct hci_dev *hdev)
7994 {
7995 struct cmd_lookup match = { NULL, hdev };
7996 u8 status, zero_cod[] = { 0, 0, 0 };
7997
7998 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
7999 &match);
8000
8001 /* If the power off is because of hdev unregistration let
8002 * use the appropriate INVALID_INDEX status. Otherwise use
8003 * NOT_POWERED. We cover both scenarios here since later in
8004 * mgmt_index_removed() any hci_conn callbacks will have already
8005 * been triggered, potentially causing misleading DISCONNECTED
8006 * status responses.
8007 */
8008 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8009 status = MGMT_STATUS_INVALID_INDEX;
8010 else
8011 status = MGMT_STATUS_NOT_POWERED;
8012
8013 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &status);
8014
8015 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8016 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8017 zero_cod, sizeof(zero_cod),
8018 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8019 ext_info_changed(hdev, NULL);
8020 }
8021
8022 new_settings(hdev, match.sk);
8023
8024 if (match.sk)
8025 sock_put(match.sk);
8026 }
8027
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8028 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8029 {
8030 struct mgmt_pending_cmd *cmd;
8031 u8 status;
8032
8033 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8034 if (!cmd)
8035 return;
8036
8037 if (err == -ERFKILL)
8038 status = MGMT_STATUS_RFKILLED;
8039 else
8040 status = MGMT_STATUS_FAILED;
8041
8042 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8043
8044 mgmt_pending_remove(cmd);
8045 }
8046
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8047 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8048 bool persistent)
8049 {
8050 struct mgmt_ev_new_link_key ev;
8051
8052 memset(&ev, 0, sizeof(ev));
8053
8054 ev.store_hint = persistent;
8055 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8056 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8057 ev.key.type = key->type;
8058 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8059 ev.key.pin_len = key->pin_len;
8060
8061 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8062 }
8063
mgmt_ltk_type(struct smp_ltk * ltk)8064 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8065 {
8066 switch (ltk->type) {
8067 case SMP_LTK:
8068 case SMP_LTK_RESPONDER:
8069 if (ltk->authenticated)
8070 return MGMT_LTK_AUTHENTICATED;
8071 return MGMT_LTK_UNAUTHENTICATED;
8072 case SMP_LTK_P256:
8073 if (ltk->authenticated)
8074 return MGMT_LTK_P256_AUTH;
8075 return MGMT_LTK_P256_UNAUTH;
8076 case SMP_LTK_P256_DEBUG:
8077 return MGMT_LTK_P256_DEBUG;
8078 }
8079
8080 return MGMT_LTK_UNAUTHENTICATED;
8081 }
8082
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8083 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8084 {
8085 struct mgmt_ev_new_long_term_key ev;
8086
8087 memset(&ev, 0, sizeof(ev));
8088
8089 /* Devices using resolvable or non-resolvable random addresses
8090 * without providing an identity resolving key don't require
8091 * to store long term keys. Their addresses will change the
8092 * next time around.
8093 *
8094 * Only when a remote device provides an identity address
8095 * make sure the long term key is stored. If the remote
8096 * identity is known, the long term keys are internally
8097 * mapped to the identity address. So allow static random
8098 * and public addresses here.
8099 */
8100 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8101 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8102 ev.store_hint = 0x00;
8103 else
8104 ev.store_hint = persistent;
8105
8106 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8107 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8108 ev.key.type = mgmt_ltk_type(key);
8109 ev.key.enc_size = key->enc_size;
8110 ev.key.ediv = key->ediv;
8111 ev.key.rand = key->rand;
8112
8113 if (key->type == SMP_LTK)
8114 ev.key.initiator = 1;
8115
8116 /* Make sure we copy only the significant bytes based on the
8117 * encryption key size, and set the rest of the value to zeroes.
8118 */
8119 memcpy(ev.key.val, key->val, key->enc_size);
8120 memset(ev.key.val + key->enc_size, 0,
8121 sizeof(ev.key.val) - key->enc_size);
8122
8123 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8124 }
8125
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)8126 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8127 {
8128 struct mgmt_ev_new_irk ev;
8129
8130 memset(&ev, 0, sizeof(ev));
8131
8132 ev.store_hint = persistent;
8133
8134 bacpy(&ev.rpa, &irk->rpa);
8135 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8136 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
8137 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8138
8139 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8140 }
8141
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)8142 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8143 bool persistent)
8144 {
8145 struct mgmt_ev_new_csrk ev;
8146
8147 memset(&ev, 0, sizeof(ev));
8148
8149 /* Devices using resolvable or non-resolvable random addresses
8150 * without providing an identity resolving key don't require
8151 * to store signature resolving keys. Their addresses will change
8152 * the next time around.
8153 *
8154 * Only when a remote device provides an identity address
8155 * make sure the signature resolving key is stored. So allow
8156 * static random and public addresses here.
8157 */
8158 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8159 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8160 ev.store_hint = 0x00;
8161 else
8162 ev.store_hint = persistent;
8163
8164 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8165 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
8166 ev.key.type = csrk->type;
8167 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8168
8169 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8170 }
8171
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)8172 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8173 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8174 u16 max_interval, u16 latency, u16 timeout)
8175 {
8176 struct mgmt_ev_new_conn_param ev;
8177
8178 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8179 return;
8180
8181 memset(&ev, 0, sizeof(ev));
8182 bacpy(&ev.addr.bdaddr, bdaddr);
8183 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8184 ev.store_hint = store_hint;
8185 ev.min_interval = cpu_to_le16(min_interval);
8186 ev.max_interval = cpu_to_le16(max_interval);
8187 ev.latency = cpu_to_le16(latency);
8188 ev.timeout = cpu_to_le16(timeout);
8189
8190 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8191 }
8192
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)8193 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8194 u8 *name, u8 name_len)
8195 {
8196 char buf[512];
8197 struct mgmt_ev_device_connected *ev = (void *) buf;
8198 u16 eir_len = 0;
8199 u32 flags = 0;
8200
8201 bacpy(&ev->addr.bdaddr, &conn->dst);
8202 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8203
8204 if (conn->out)
8205 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
8206
8207 ev->flags = __cpu_to_le32(flags);
8208
8209 /* We must ensure that the EIR Data fields are ordered and
8210 * unique. Keep it simple for now and avoid the problem by not
8211 * adding any BR/EDR data to the LE adv.
8212 */
8213 if (conn->le_adv_data_len > 0) {
8214 memcpy(&ev->eir[eir_len],
8215 conn->le_adv_data, conn->le_adv_data_len);
8216 eir_len = conn->le_adv_data_len;
8217 } else {
8218 if (name_len > 0)
8219 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8220 name, name_len);
8221
8222 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8223 eir_len = eir_append_data(ev->eir, eir_len,
8224 EIR_CLASS_OF_DEV,
8225 conn->dev_class, 3);
8226 }
8227
8228 ev->eir_len = cpu_to_le16(eir_len);
8229
8230 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8231 sizeof(*ev) + eir_len, NULL);
8232 }
8233
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)8234 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8235 {
8236 struct sock **sk = data;
8237
8238 cmd->cmd_complete(cmd, 0);
8239
8240 *sk = cmd->sk;
8241 sock_hold(*sk);
8242
8243 mgmt_pending_remove(cmd);
8244 }
8245
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)8246 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8247 {
8248 struct hci_dev *hdev = data;
8249 struct mgmt_cp_unpair_device *cp = cmd->param;
8250
8251 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8252
8253 cmd->cmd_complete(cmd, 0);
8254 }
8255
mgmt_powering_down(struct hci_dev * hdev)8256 bool mgmt_powering_down(struct hci_dev *hdev)
8257 {
8258 struct mgmt_pending_cmd *cmd;
8259 struct mgmt_mode *cp;
8260
8261 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8262 if (!cmd)
8263 return false;
8264
8265 cp = cmd->param;
8266 if (!cp->val)
8267 return true;
8268
8269 return false;
8270 }
8271
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)8272 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8273 u8 link_type, u8 addr_type, u8 reason,
8274 bool mgmt_connected)
8275 {
8276 struct mgmt_ev_device_disconnected ev;
8277 struct sock *sk = NULL;
8278
8279 /* The connection is still in hci_conn_hash so test for 1
8280 * instead of 0 to know if this is the last one.
8281 */
8282 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8283 cancel_delayed_work(&hdev->power_off);
8284 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8285 }
8286
8287 if (!mgmt_connected)
8288 return;
8289
8290 if (link_type != ACL_LINK && link_type != LE_LINK)
8291 return;
8292
8293 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, true, disconnect_rsp, &sk);
8294
8295 bacpy(&ev.addr.bdaddr, bdaddr);
8296 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8297 ev.reason = reason;
8298
8299 /* Report disconnects due to suspend */
8300 if (hdev->suspended)
8301 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8302
8303 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8304
8305 if (sk)
8306 sock_put(sk);
8307
8308 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true, unpair_device_rsp,
8309 hdev);
8310 }
8311
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8312 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8313 u8 link_type, u8 addr_type, u8 status)
8314 {
8315 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8316 struct mgmt_cp_disconnect *cp;
8317 struct mgmt_pending_cmd *cmd;
8318
8319 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
8320 unpair_device_rsp, hdev);
8321
8322 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8323 if (!cmd)
8324 return;
8325
8326 cp = cmd->param;
8327
8328 if (bacmp(bdaddr, &cp->addr.bdaddr))
8329 return;
8330
8331 if (cp->addr.type != bdaddr_type)
8332 return;
8333
8334 cmd->cmd_complete(cmd, mgmt_status(status));
8335 mgmt_pending_remove(cmd);
8336 }
8337
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8338 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8339 u8 addr_type, u8 status)
8340 {
8341 struct mgmt_ev_connect_failed ev;
8342
8343 /* The connection is still in hci_conn_hash so test for 1
8344 * instead of 0 to know if this is the last one.
8345 */
8346 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8347 cancel_delayed_work(&hdev->power_off);
8348 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8349 }
8350
8351 bacpy(&ev.addr.bdaddr, bdaddr);
8352 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8353 ev.status = mgmt_status(status);
8354
8355 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8356 }
8357
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8358 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8359 {
8360 struct mgmt_ev_pin_code_request ev;
8361
8362 bacpy(&ev.addr.bdaddr, bdaddr);
8363 ev.addr.type = BDADDR_BREDR;
8364 ev.secure = secure;
8365
8366 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8367 }
8368
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8369 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8370 u8 status)
8371 {
8372 struct mgmt_pending_cmd *cmd;
8373
8374 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8375 if (!cmd)
8376 return;
8377
8378 cmd->cmd_complete(cmd, mgmt_status(status));
8379 mgmt_pending_remove(cmd);
8380 }
8381
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8382 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8383 u8 status)
8384 {
8385 struct mgmt_pending_cmd *cmd;
8386
8387 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8388 if (!cmd)
8389 return;
8390
8391 cmd->cmd_complete(cmd, mgmt_status(status));
8392 mgmt_pending_remove(cmd);
8393 }
8394
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)8395 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8396 u8 link_type, u8 addr_type, u32 value,
8397 u8 confirm_hint)
8398 {
8399 struct mgmt_ev_user_confirm_request ev;
8400
8401 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8402
8403 bacpy(&ev.addr.bdaddr, bdaddr);
8404 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8405 ev.confirm_hint = confirm_hint;
8406 ev.value = cpu_to_le32(value);
8407
8408 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8409 NULL);
8410 }
8411
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)8412 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8413 u8 link_type, u8 addr_type)
8414 {
8415 struct mgmt_ev_user_passkey_request ev;
8416
8417 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8418
8419 bacpy(&ev.addr.bdaddr, bdaddr);
8420 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8421
8422 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8423 NULL);
8424 }
8425
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)8426 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8427 u8 link_type, u8 addr_type, u8 status,
8428 u8 opcode)
8429 {
8430 struct mgmt_pending_cmd *cmd;
8431
8432 cmd = pending_find(opcode, hdev);
8433 if (!cmd)
8434 return -ENOENT;
8435
8436 cmd->cmd_complete(cmd, mgmt_status(status));
8437 mgmt_pending_remove(cmd);
8438
8439 return 0;
8440 }
8441
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8442 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8443 u8 link_type, u8 addr_type, u8 status)
8444 {
8445 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8446 status, MGMT_OP_USER_CONFIRM_REPLY);
8447 }
8448
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8449 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8450 u8 link_type, u8 addr_type, u8 status)
8451 {
8452 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8453 status,
8454 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8455 }
8456
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8457 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8458 u8 link_type, u8 addr_type, u8 status)
8459 {
8460 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8461 status, MGMT_OP_USER_PASSKEY_REPLY);
8462 }
8463
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8464 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8465 u8 link_type, u8 addr_type, u8 status)
8466 {
8467 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8468 status,
8469 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8470 }
8471
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)8472 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8473 u8 link_type, u8 addr_type, u32 passkey,
8474 u8 entered)
8475 {
8476 struct mgmt_ev_passkey_notify ev;
8477
8478 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8479
8480 bacpy(&ev.addr.bdaddr, bdaddr);
8481 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8482 ev.passkey = __cpu_to_le32(passkey);
8483 ev.entered = entered;
8484
8485 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8486 }
8487
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)8488 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8489 {
8490 struct mgmt_ev_auth_failed ev;
8491 struct mgmt_pending_cmd *cmd;
8492 u8 status = mgmt_status(hci_status);
8493
8494 bacpy(&ev.addr.bdaddr, &conn->dst);
8495 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8496 ev.status = status;
8497
8498 cmd = find_pairing(conn);
8499
8500 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8501 cmd ? cmd->sk : NULL);
8502
8503 if (cmd) {
8504 cmd->cmd_complete(cmd, status);
8505 mgmt_pending_remove(cmd);
8506 }
8507 }
8508
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8509 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8510 {
8511 struct cmd_lookup match = { NULL, hdev };
8512 bool changed;
8513
8514 if (status) {
8515 u8 mgmt_err = mgmt_status(status);
8516 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
8517 cmd_status_rsp, &mgmt_err);
8518 return;
8519 }
8520
8521 if (test_bit(HCI_AUTH, &hdev->flags))
8522 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8523 else
8524 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8525
8526 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
8527 settings_rsp, &match);
8528
8529 if (changed)
8530 new_settings(hdev, match.sk);
8531
8532 if (match.sk)
8533 sock_put(match.sk);
8534 }
8535
clear_eir(struct hci_request * req)8536 static void clear_eir(struct hci_request *req)
8537 {
8538 struct hci_dev *hdev = req->hdev;
8539 struct hci_cp_write_eir cp;
8540
8541 if (!lmp_ext_inq_capable(hdev))
8542 return;
8543
8544 memset(hdev->eir, 0, sizeof(hdev->eir));
8545
8546 memset(&cp, 0, sizeof(cp));
8547
8548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8549 }
8550
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8551 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8552 {
8553 struct cmd_lookup match = { NULL, hdev };
8554 struct hci_request req;
8555 bool changed = false;
8556
8557 if (status) {
8558 u8 mgmt_err = mgmt_status(status);
8559
8560 if (enable && hci_dev_test_and_clear_flag(hdev,
8561 HCI_SSP_ENABLED)) {
8562 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8563 new_settings(hdev, NULL);
8564 }
8565
8566 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
8567 cmd_status_rsp, &mgmt_err);
8568 return;
8569 }
8570
8571 if (enable) {
8572 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8573 } else {
8574 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8575 if (!changed)
8576 changed = hci_dev_test_and_clear_flag(hdev,
8577 HCI_HS_ENABLED);
8578 else
8579 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8580 }
8581
8582 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
8583
8584 if (changed)
8585 new_settings(hdev, match.sk);
8586
8587 if (match.sk)
8588 sock_put(match.sk);
8589
8590 hci_req_init(&req, hdev);
8591
8592 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8593 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8594 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8595 sizeof(enable), &enable);
8596 __hci_req_update_eir(&req);
8597 } else {
8598 clear_eir(&req);
8599 }
8600
8601 hci_req_run(&req, NULL);
8602 }
8603
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8604 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8605 {
8606 struct cmd_lookup *match = data;
8607
8608 if (match->sk == NULL) {
8609 match->sk = cmd->sk;
8610 sock_hold(match->sk);
8611 }
8612 }
8613
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8614 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8615 u8 status)
8616 {
8617 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8618
8619 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
8620 &match);
8621 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
8622 &match);
8623 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
8624 &match);
8625
8626 if (!status) {
8627 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8628 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8629 ext_info_changed(hdev, NULL);
8630 }
8631
8632 if (match.sk)
8633 sock_put(match.sk);
8634 }
8635
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8636 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8637 {
8638 struct mgmt_cp_set_local_name ev;
8639 struct mgmt_pending_cmd *cmd;
8640
8641 if (status)
8642 return;
8643
8644 memset(&ev, 0, sizeof(ev));
8645 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8646 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8647
8648 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8649 if (!cmd) {
8650 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8651
8652 /* If this is a HCI command related to powering on the
8653 * HCI dev don't send any mgmt signals.
8654 */
8655 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8656 return;
8657 }
8658
8659 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8660 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8661 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8662 }
8663
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8664 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8665 {
8666 int i;
8667
8668 for (i = 0; i < uuid_count; i++) {
8669 if (!memcmp(uuid, uuids[i], 16))
8670 return true;
8671 }
8672
8673 return false;
8674 }
8675
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8676 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8677 {
8678 u16 parsed = 0;
8679
8680 while (parsed < eir_len) {
8681 u8 field_len = eir[0];
8682 u8 uuid[16];
8683 int i;
8684
8685 if (field_len == 0)
8686 break;
8687
8688 if (eir_len - parsed < field_len + 1)
8689 break;
8690
8691 switch (eir[1]) {
8692 case EIR_UUID16_ALL:
8693 case EIR_UUID16_SOME:
8694 for (i = 0; i + 3 <= field_len; i += 2) {
8695 memcpy(uuid, bluetooth_base_uuid, 16);
8696 uuid[13] = eir[i + 3];
8697 uuid[12] = eir[i + 2];
8698 if (has_uuid(uuid, uuid_count, uuids))
8699 return true;
8700 }
8701 break;
8702 case EIR_UUID32_ALL:
8703 case EIR_UUID32_SOME:
8704 for (i = 0; i + 5 <= field_len; i += 4) {
8705 memcpy(uuid, bluetooth_base_uuid, 16);
8706 uuid[15] = eir[i + 5];
8707 uuid[14] = eir[i + 4];
8708 uuid[13] = eir[i + 3];
8709 uuid[12] = eir[i + 2];
8710 if (has_uuid(uuid, uuid_count, uuids))
8711 return true;
8712 }
8713 break;
8714 case EIR_UUID128_ALL:
8715 case EIR_UUID128_SOME:
8716 for (i = 0; i + 17 <= field_len; i += 16) {
8717 memcpy(uuid, eir + i + 2, 16);
8718 if (has_uuid(uuid, uuid_count, uuids))
8719 return true;
8720 }
8721 break;
8722 }
8723
8724 parsed += field_len + 1;
8725 eir += field_len + 1;
8726 }
8727
8728 return false;
8729 }
8730
restart_le_scan(struct hci_dev * hdev)8731 static void restart_le_scan(struct hci_dev *hdev)
8732 {
8733 /* If controller is not scanning we are done. */
8734 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8735 return;
8736
8737 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8738 hdev->discovery.scan_start +
8739 hdev->discovery.scan_duration))
8740 return;
8741
8742 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8743 DISCOV_LE_RESTART_DELAY);
8744 }
8745
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8746 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8747 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8748 {
8749 /* If a RSSI threshold has been specified, and
8750 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8751 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8752 * is set, let it through for further processing, as we might need to
8753 * restart the scan.
8754 *
8755 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8756 * the results are also dropped.
8757 */
8758 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8759 (rssi == HCI_RSSI_INVALID ||
8760 (rssi < hdev->discovery.rssi &&
8761 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8762 return false;
8763
8764 if (hdev->discovery.uuid_count != 0) {
8765 /* If a list of UUIDs is provided in filter, results with no
8766 * matching UUID should be dropped.
8767 */
8768 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8769 hdev->discovery.uuids) &&
8770 !eir_has_uuids(scan_rsp, scan_rsp_len,
8771 hdev->discovery.uuid_count,
8772 hdev->discovery.uuids))
8773 return false;
8774 }
8775
8776 /* If duplicate filtering does not report RSSI changes, then restart
8777 * scanning to ensure updated result with updated RSSI values.
8778 */
8779 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8780 restart_le_scan(hdev);
8781
8782 /* Validate RSSI value against the RSSI threshold once more. */
8783 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8784 rssi < hdev->discovery.rssi)
8785 return false;
8786 }
8787
8788 return true;
8789 }
8790
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8791 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8792 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8793 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8794 {
8795 char buf[512];
8796 struct mgmt_ev_device_found *ev = (void *)buf;
8797 size_t ev_size;
8798
8799 /* Don't send events for a non-kernel initiated discovery. With
8800 * LE one exception is if we have pend_le_reports > 0 in which
8801 * case we're doing passive scanning and want these events.
8802 */
8803 if (!hci_discovery_active(hdev)) {
8804 if (link_type == ACL_LINK)
8805 return;
8806 if (link_type == LE_LINK &&
8807 list_empty(&hdev->pend_le_reports) &&
8808 !hci_is_adv_monitoring(hdev)) {
8809 return;
8810 }
8811 }
8812
8813 if (hdev->discovery.result_filtering) {
8814 /* We are using service discovery */
8815 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8816 scan_rsp_len))
8817 return;
8818 }
8819
8820 if (hdev->discovery.limited) {
8821 /* Check for limited discoverable bit */
8822 if (dev_class) {
8823 if (!(dev_class[1] & 0x20))
8824 return;
8825 } else {
8826 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8827 if (!flags || !(flags[0] & LE_AD_LIMITED))
8828 return;
8829 }
8830 }
8831
8832 /* Make sure that the buffer is big enough. The 5 extra bytes
8833 * are for the potential CoD field.
8834 */
8835 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8836 return;
8837
8838 memset(buf, 0, sizeof(buf));
8839
8840 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8841 * RSSI value was reported as 0 when not available. This behavior
8842 * is kept when using device discovery. This is required for full
8843 * backwards compatibility with the API.
8844 *
8845 * However when using service discovery, the value 127 will be
8846 * returned when the RSSI is not available.
8847 */
8848 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8849 link_type == ACL_LINK)
8850 rssi = 0;
8851
8852 bacpy(&ev->addr.bdaddr, bdaddr);
8853 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8854 ev->rssi = rssi;
8855 ev->flags = cpu_to_le32(flags);
8856
8857 if (eir_len > 0)
8858 /* Copy EIR or advertising data into event */
8859 memcpy(ev->eir, eir, eir_len);
8860
8861 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8862 NULL))
8863 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8864 dev_class, 3);
8865
8866 if (scan_rsp_len > 0)
8867 /* Append scan response data to event */
8868 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8869
8870 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8871 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8872
8873 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8874 }
8875
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8876 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8877 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8878 {
8879 struct mgmt_ev_device_found *ev;
8880 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8881 u16 eir_len;
8882
8883 ev = (struct mgmt_ev_device_found *) buf;
8884
8885 memset(buf, 0, sizeof(buf));
8886
8887 bacpy(&ev->addr.bdaddr, bdaddr);
8888 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8889 ev->rssi = rssi;
8890
8891 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8892 name_len);
8893
8894 ev->eir_len = cpu_to_le16(eir_len);
8895
8896 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8897 }
8898
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8899 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8900 {
8901 struct mgmt_ev_discovering ev;
8902
8903 bt_dev_dbg(hdev, "discovering %u", discovering);
8904
8905 memset(&ev, 0, sizeof(ev));
8906 ev.type = hdev->discovery.type;
8907 ev.discovering = discovering;
8908
8909 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8910 }
8911
mgmt_suspending(struct hci_dev * hdev,u8 state)8912 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8913 {
8914 struct mgmt_ev_controller_suspend ev;
8915
8916 ev.suspend_state = state;
8917 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8918 }
8919
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)8920 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8921 u8 addr_type)
8922 {
8923 struct mgmt_ev_controller_resume ev;
8924
8925 ev.wake_reason = reason;
8926 if (bdaddr) {
8927 bacpy(&ev.addr.bdaddr, bdaddr);
8928 ev.addr.type = addr_type;
8929 } else {
8930 memset(&ev.addr, 0, sizeof(ev.addr));
8931 }
8932
8933 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8934 }
8935
8936 static struct hci_mgmt_chan chan = {
8937 .channel = HCI_CHANNEL_CONTROL,
8938 .handler_count = ARRAY_SIZE(mgmt_handlers),
8939 .handlers = mgmt_handlers,
8940 .hdev_init = mgmt_init_hdev,
8941 };
8942
mgmt_init(void)8943 int mgmt_init(void)
8944 {
8945 return hci_mgmt_chan_register(&chan);
8946 }
8947
mgmt_exit(void)8948 void mgmt_exit(void)
8949 {
8950 hci_mgmt_chan_unregister(&chan);
8951 }
8952