1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
129 MGMT_EV_INDEX_ADDED,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
142 MGMT_EV_AUTH_FAILED,
143 MGMT_EV_DEVICE_FOUND,
144 MGMT_EV_DISCOVERING,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
149 MGMT_EV_NEW_IRK,
150 MGMT_EV_NEW_CSRK,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
168 };
169
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
172 MGMT_OP_READ_INFO,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
181 };
182
183 static const u16 mgmt_untrusted_events[] = {
184 MGMT_EV_INDEX_ADDED,
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
198 };
199
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
201
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
204
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
207 MGMT_STATUS_SUCCESS,
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_FAILED, /* Reserved for future use */
251 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
252 MGMT_STATUS_REJECTED, /* QoS Rejected */
253 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
254 MGMT_STATUS_REJECTED, /* Insufficient Security */
255 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
271 };
272
mgmt_status(u8 hci_status)273 static u8 mgmt_status(u8 hci_status)
274 {
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
277
278 return MGMT_STATUS_FAILED;
279 }
280
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
282 u16 len, int flag)
283 {
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 flag, NULL);
286 }
287
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
290 {
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 flag, skip_sk);
293 }
294
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
297 {
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
300 }
301
le_addr_type(u8 mgmt_addr_type)302 static u8 le_addr_type(u8 mgmt_addr_type)
303 {
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
306 else
307 return ADDR_LE_DEV_RANDOM;
308 }
309
mgmt_fill_version_info(void * ver)310 void mgmt_fill_version_info(void *ver)
311 {
312 struct mgmt_rp_read_version *rp = ver;
313
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
316 }
317
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 u16 data_len)
320 {
321 struct mgmt_rp_read_version rp;
322
323 bt_dev_dbg(hdev, "sock %p", sk);
324
325 mgmt_fill_version_info(&rp);
326
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 &rp, sizeof(rp));
329 }
330
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 u16 data_len)
333 {
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
336 size_t rp_size;
337 int i, err;
338
339 bt_dev_dbg(hdev, "sock %p", sk);
340
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
344 } else {
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
347 }
348
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350
351 rp = kmalloc(rp_size, GFP_KERNEL);
352 if (!rp)
353 return -ENOMEM;
354
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
357
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
360
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
363
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
366 } else {
367 __le16 *opcode = rp->opcodes;
368
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
374 }
375
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
377 rp, rp_size);
378 kfree(rp);
379
380 return err;
381 }
382
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
384 u16 data_len)
385 {
386 struct mgmt_rp_read_index_list *rp;
387 struct hci_dev *d;
388 size_t rp_len;
389 u16 count;
390 int err;
391
392 bt_dev_dbg(hdev, "sock %p", sk);
393
394 read_lock(&hci_dev_list_lock);
395
396 count = 0;
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 count++;
401 }
402
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
405 if (!rp) {
406 read_unlock(&hci_dev_list_lock);
407 return -ENOMEM;
408 }
409
410 count = 0;
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
415 continue;
416
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
419 */
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 continue;
422
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
427 }
428 }
429
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
432
433 read_unlock(&hci_dev_list_lock);
434
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
436 0, rp, rp_len);
437
438 kfree(rp);
439
440 return err;
441 }
442
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
445 {
446 struct mgmt_rp_read_unconf_index_list *rp;
447 struct hci_dev *d;
448 size_t rp_len;
449 u16 count;
450 int err;
451
452 bt_dev_dbg(hdev, "sock %p", sk);
453
454 read_lock(&hci_dev_list_lock);
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 count++;
461 }
462
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
465 if (!rp) {
466 read_unlock(&hci_dev_list_lock);
467 return -ENOMEM;
468 }
469
470 count = 0;
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
475 continue;
476
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
479 */
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
481 continue;
482
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
487 }
488 }
489
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
492
493 read_unlock(&hci_dev_list_lock);
494
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497
498 kfree(rp);
499
500 return err;
501 }
502
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
505 {
506 struct mgmt_rp_read_ext_index_list *rp;
507 struct hci_dev *d;
508 u16 count;
509 int err;
510
511 bt_dev_dbg(hdev, "sock %p", sk);
512
513 read_lock(&hci_dev_list_lock);
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 count++;
519 }
520
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
522 if (!rp) {
523 read_unlock(&hci_dev_list_lock);
524 return -ENOMEM;
525 }
526
527 count = 0;
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
532 continue;
533
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
536 */
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
538 continue;
539
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
543 else
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
547 } else {
548 continue;
549 }
550
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
554 }
555
556 rp->num_controllers = cpu_to_le16(count);
557
558 read_unlock(&hci_dev_list_lock);
559
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
563 */
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
571
572 kfree(rp);
573
574 return err;
575 }
576
is_configured(struct hci_dev * hdev)577 static bool is_configured(struct hci_dev *hdev)
578 {
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
581 return false;
582
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
586 return false;
587
588 return true;
589 }
590
get_missing_options(struct hci_dev * hdev)591 static __le32 get_missing_options(struct hci_dev *hdev)
592 {
593 u32 options = 0;
594
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
598
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
603
604 return cpu_to_le32(options);
605 }
606
new_options(struct hci_dev * hdev,struct sock * skip)607 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 {
609 __le32 options = get_missing_options(hdev);
610
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
613 }
614
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 {
617 __le32 options = get_missing_options(hdev);
618
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 sizeof(options));
621 }
622
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
625 {
626 struct mgmt_rp_read_config_info rp;
627 u32 options = 0;
628
629 bt_dev_dbg(hdev, "sock %p", sk);
630
631 hci_dev_lock(hdev);
632
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
638
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
641
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
644
645 hci_dev_unlock(hdev);
646
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 &rp, sizeof(rp));
649 }
650
get_supported_phys(struct hci_dev * hdev)651 static u32 get_supported_phys(struct hci_dev *hdev)
652 {
653 u32 supported_phys = 0;
654
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
681 }
682 }
683 }
684
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
688
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
692 }
693
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
697 }
698 }
699
700 return supported_phys;
701 }
702
get_selected_phys(struct hci_dev * hdev)703 static u32 get_selected_phys(struct hci_dev *hdev)
704 {
705 u32 selected_phys = 0;
706
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
739 }
740 }
741 }
742
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
746
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
749
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
752
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
755
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
758
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
761 }
762
763 return selected_phys;
764 }
765
get_configurable_phys(struct hci_dev * hdev)766 static u32 get_configurable_phys(struct hci_dev *hdev)
767 {
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
770 }
771
get_supported_settings(struct hci_dev * hdev)772 static u32 get_supported_settings(struct hci_dev *hdev)
773 {
774 u32 settings = 0;
775
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
781
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
787
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
792 }
793
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
796
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
798 &hdev->quirks))
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
800 }
801
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
807
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
810 */
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
813 }
814
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
816 hdev->set_bdaddr)
817 settings |= MGMT_SETTING_CONFIGURATION;
818
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
820
821 return settings;
822 }
823
get_current_settings(struct hci_dev * hdev)824 static u32 get_current_settings(struct hci_dev *hdev)
825 {
826 u32 settings = 0;
827
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
830
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
833
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
839
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
842
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
845
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
848
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
851
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
854
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
857
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
860
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
863
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
866
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
869
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
873 *
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
877 *
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
880 * be evaluated.
881 */
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
887 }
888
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891
892 return settings;
893 }
894
pending_find(u16 opcode,struct hci_dev * hdev)895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 {
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
898 }
899
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
902 const void *data)
903 {
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
905 }
906
mgmt_get_adv_discov_flags(struct hci_dev * hdev)907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 {
909 struct mgmt_pending_cmd *cmd;
910
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
913 */
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
915 if (cmd) {
916 struct mgmt_mode *cp = cmd->param;
917 if (cp->val == 0x01)
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
921 } else {
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
926 }
927
928 return 0;
929 }
930
mgmt_get_connectable(struct hci_dev * hdev)931 bool mgmt_get_connectable(struct hci_dev *hdev)
932 {
933 struct mgmt_pending_cmd *cmd;
934
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
937 */
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
939 if (cmd) {
940 struct mgmt_mode *cp = cmd->param;
941
942 return cp->val;
943 }
944
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
946 }
947
service_cache_off(struct work_struct * work)948 static void service_cache_off(struct work_struct *work)
949 {
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
951 service_cache.work);
952 struct hci_request req;
953
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
955 return;
956
957 hci_req_init(&req, hdev);
958
959 hci_dev_lock(hdev);
960
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
963
964 hci_dev_unlock(hdev);
965
966 hci_req_run(&req, NULL);
967 }
968
rpa_expired(struct work_struct * work)969 static void rpa_expired(struct work_struct *work)
970 {
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
972 rpa_expired.work);
973 struct hci_request req;
974
975 bt_dev_dbg(hdev, "");
976
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
980 return;
981
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
984 * function.
985 */
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
989 else
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
992 }
993
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 {
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
997 return;
998
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1005 * it
1006 */
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1008 }
1009
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1012 {
1013 struct mgmt_rp_read_info rp;
1014
1015 bt_dev_dbg(hdev, "sock %p", sk);
1016
1017 hci_dev_lock(hdev);
1018
1019 memset(&rp, 0, sizeof(rp));
1020
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1022
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1030
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033
1034 hci_dev_unlock(hdev);
1035
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 sizeof(rp));
1038 }
1039
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 {
1042 u16 eir_len = 0;
1043 size_t name_len;
1044
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1048
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1051 hdev->appearance);
1052
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1056
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1060
1061 return eir_len;
1062 }
1063
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1066 {
1067 char buf[512];
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1069 u16 eir_len;
1070
1071 bt_dev_dbg(hdev, "sock %p", sk);
1072
1073 memset(&buf, 0, sizeof(buf));
1074
1075 hci_dev_lock(hdev);
1076
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1078
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1084
1085
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1088
1089 hci_dev_unlock(hdev);
1090
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1094 * is used.
1095 */
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1102 }
1103
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 {
1106 char buf[512];
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1108 u16 eir_len;
1109
1110 memset(buf, 0, sizeof(buf));
1111
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1114
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1118 }
1119
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 {
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1123
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 sizeof(settings));
1126 }
1127
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 {
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1131
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 }
1136 }
1137
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 {
1140 struct mgmt_ev_advertising_added ev;
1141
1142 ev.instance = instance;
1143
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1145 }
1146
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1148 u8 instance)
1149 {
1150 struct mgmt_ev_advertising_removed ev;
1151
1152 ev.instance = instance;
1153
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1155 }
1156
cancel_adv_timeout(struct hci_dev * hdev)1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 {
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1162 }
1163 }
1164
clean_up_hci_state(struct hci_dev * hdev)1165 static int clean_up_hci_state(struct hci_dev *hdev)
1166 {
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1170 int err;
1171
1172 hci_req_init(&req, hdev);
1173
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1176 u8 scan = 0x00;
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 }
1179
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1184
1185 discov_stopped = hci_req_stop_discovery(&req);
1186
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1190 }
1191
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195
1196 return err;
1197 }
1198
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1200 u16 len)
1201 {
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1204 int err;
1205
1206 bt_dev_dbg(hdev, "sock %p", sk);
1207
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1211
1212 hci_dev_lock(hdev);
1213
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 MGMT_STATUS_BUSY);
1217 goto failed;
1218 }
1219
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 goto failed;
1223 }
1224
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1226 if (!cmd) {
1227 err = -ENOMEM;
1228 goto failed;
1229 }
1230
1231 if (cp->val) {
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1233 err = 0;
1234 } else {
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1237 if (!err)
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1240
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1245 err = 0;
1246 }
1247 }
1248
1249 failed:
1250 hci_dev_unlock(hdev);
1251 return err;
1252 }
1253
new_settings(struct hci_dev * hdev,struct sock * skip)1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1257
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1260 }
1261
mgmt_new_settings(struct hci_dev * hdev)1262 int mgmt_new_settings(struct hci_dev *hdev)
1263 {
1264 return new_settings(hdev, NULL);
1265 }
1266
1267 struct cmd_lookup {
1268 struct sock *sk;
1269 struct hci_dev *hdev;
1270 u8 mgmt_status;
1271 };
1272
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 {
1275 struct cmd_lookup *match = data;
1276
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278
1279 list_del(&cmd->list);
1280
1281 if (match->sk == NULL) {
1282 match->sk = cmd->sk;
1283 sock_hold(match->sk);
1284 }
1285
1286 mgmt_pending_free(cmd);
1287 }
1288
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 {
1291 u8 *status = data;
1292
1293 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 mgmt_pending_remove(cmd);
1295 }
1296
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 {
1299 if (cmd->cmd_complete) {
1300 u8 *status = data;
1301
1302 cmd->cmd_complete(cmd, *status);
1303 mgmt_pending_remove(cmd);
1304
1305 return;
1306 }
1307
1308 cmd_status_rsp(cmd, data);
1309 }
1310
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 {
1313 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 cmd->param, cmd->param_len);
1315 }
1316
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 {
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, sizeof(struct mgmt_addr_info));
1321 }
1322
mgmt_bredr_support(struct hci_dev * hdev)1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1324 {
1325 if (!lmp_bredr_capable(hdev))
1326 return MGMT_STATUS_NOT_SUPPORTED;
1327 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 return MGMT_STATUS_REJECTED;
1329 else
1330 return MGMT_STATUS_SUCCESS;
1331 }
1332
mgmt_le_support(struct hci_dev * hdev)1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1334 {
1335 if (!lmp_le_capable(hdev))
1336 return MGMT_STATUS_NOT_SUPPORTED;
1337 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 return MGMT_STATUS_REJECTED;
1339 else
1340 return MGMT_STATUS_SUCCESS;
1341 }
1342
mgmt_set_discoverable_complete(struct hci_dev * hdev,u8 status)1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1344 {
1345 struct mgmt_pending_cmd *cmd;
1346
1347 bt_dev_dbg(hdev, "status 0x%02x", status);
1348
1349 hci_dev_lock(hdev);
1350
1351 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1352 if (!cmd)
1353 goto unlock;
1354
1355 if (status) {
1356 u8 mgmt_err = mgmt_status(status);
1357 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1359 goto remove_cmd;
1360 }
1361
1362 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 hdev->discov_timeout > 0) {
1364 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1366 }
1367
1368 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 new_settings(hdev, cmd->sk);
1370
1371 remove_cmd:
1372 mgmt_pending_remove(cmd);
1373
1374 unlock:
1375 hci_dev_unlock(hdev);
1376 }
1377
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1379 u16 len)
1380 {
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1383 u16 timeout;
1384 int err;
1385
1386 bt_dev_dbg(hdev, "sock %p", sk);
1387
1388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1392
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1396
1397 timeout = __le16_to_cpu(cp->timeout);
1398
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1401 */
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1406
1407 hci_dev_lock(hdev);
1408
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1412 goto failed;
1413 }
1414
1415 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 MGMT_STATUS_BUSY);
1419 goto failed;
1420 }
1421
1422 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1425 goto failed;
1426 }
1427
1428 if (hdev->advertising_paused) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 MGMT_STATUS_BUSY);
1431 goto failed;
1432 }
1433
1434 if (!hdev_is_powered(hdev)) {
1435 bool changed = false;
1436
1437 /* Setting limited discoverable when powered off is
1438 * not a valid operation since it requires a timeout
1439 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 */
1441 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1443 changed = true;
1444 }
1445
1446 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1447 if (err < 0)
1448 goto failed;
1449
1450 if (changed)
1451 err = new_settings(hdev, sk);
1452
1453 goto failed;
1454 }
1455
1456 /* If the current mode is the same, then just update the timeout
1457 * value with the new value. And if only the timeout gets updated,
1458 * then no need for any HCI transactions.
1459 */
1460 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 HCI_LIMITED_DISCOVERABLE)) {
1463 cancel_delayed_work(&hdev->discov_off);
1464 hdev->discov_timeout = timeout;
1465
1466 if (cp->val && hdev->discov_timeout > 0) {
1467 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 queue_delayed_work(hdev->req_workqueue,
1469 &hdev->discov_off, to);
1470 }
1471
1472 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1473 goto failed;
1474 }
1475
1476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1477 if (!cmd) {
1478 err = -ENOMEM;
1479 goto failed;
1480 }
1481
1482 /* Cancel any potential discoverable timeout that might be
1483 * still active and store new timeout value. The arming of
1484 * the timeout happens in the complete handler.
1485 */
1486 cancel_delayed_work(&hdev->discov_off);
1487 hdev->discov_timeout = timeout;
1488
1489 if (cp->val)
1490 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1491 else
1492 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1493
1494 /* Limited discoverable mode */
1495 if (cp->val == 0x02)
1496 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1497 else
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499
1500 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1501 err = 0;
1502
1503 failed:
1504 hci_dev_unlock(hdev);
1505 return err;
1506 }
1507
mgmt_set_connectable_complete(struct hci_dev * hdev,u8 status)1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 {
1510 struct mgmt_pending_cmd *cmd;
1511
1512 bt_dev_dbg(hdev, "status 0x%02x", status);
1513
1514 hci_dev_lock(hdev);
1515
1516 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1517 if (!cmd)
1518 goto unlock;
1519
1520 if (status) {
1521 u8 mgmt_err = mgmt_status(status);
1522 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1523 goto remove_cmd;
1524 }
1525
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1528
1529 remove_cmd:
1530 mgmt_pending_remove(cmd);
1531
1532 unlock:
1533 hci_dev_unlock(hdev);
1534 }
1535
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 struct sock *sk, u8 val)
1538 {
1539 bool changed = false;
1540 int err;
1541
1542 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1543 changed = true;
1544
1545 if (val) {
1546 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1547 } else {
1548 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1550 }
1551
1552 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1553 if (err < 0)
1554 return err;
1555
1556 if (changed) {
1557 hci_req_update_scan(hdev);
1558 hci_update_background_scan(hdev);
1559 return new_settings(hdev, sk);
1560 }
1561
1562 return 0;
1563 }
1564
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1566 u16 len)
1567 {
1568 struct mgmt_mode *cp = data;
1569 struct mgmt_pending_cmd *cmd;
1570 int err;
1571
1572 bt_dev_dbg(hdev, "sock %p", sk);
1573
1574 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 MGMT_STATUS_REJECTED);
1578
1579 if (cp->val != 0x00 && cp->val != 0x01)
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev)) {
1586 err = set_connectable_update_settings(hdev, sk, cp->val);
1587 goto failed;
1588 }
1589
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1598 if (!cmd) {
1599 err = -ENOMEM;
1600 goto failed;
1601 }
1602
1603 if (cp->val) {
1604 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 } else {
1606 if (hdev->discov_timeout > 0)
1607 cancel_delayed_work(&hdev->discov_off);
1608
1609 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1612 }
1613
1614 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1615 err = 0;
1616
1617 failed:
1618 hci_dev_unlock(hdev);
1619 return err;
1620 }
1621
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1623 u16 len)
1624 {
1625 struct mgmt_mode *cp = data;
1626 bool changed;
1627 int err;
1628
1629 bt_dev_dbg(hdev, "sock %p", sk);
1630
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 MGMT_STATUS_INVALID_PARAMS);
1634
1635 hci_dev_lock(hdev);
1636
1637 if (cp->val)
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1639 else
1640 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1641
1642 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1643 if (err < 0)
1644 goto unlock;
1645
1646 if (changed) {
1647 /* In limited privacy mode the change of bondable mode
1648 * may affect the local advertising address.
1649 */
1650 if (hdev_is_powered(hdev) &&
1651 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 queue_work(hdev->req_workqueue,
1655 &hdev->discoverable_update);
1656
1657 err = new_settings(hdev, sk);
1658 }
1659
1660 unlock:
1661 hci_dev_unlock(hdev);
1662 return err;
1663 }
1664
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1666 u16 len)
1667 {
1668 struct mgmt_mode *cp = data;
1669 struct mgmt_pending_cmd *cmd;
1670 u8 val, status;
1671 int err;
1672
1673 bt_dev_dbg(hdev, "sock %p", sk);
1674
1675 status = mgmt_bredr_support(hdev);
1676 if (status)
1677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1678 status);
1679
1680 if (cp->val != 0x00 && cp->val != 0x01)
1681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 MGMT_STATUS_INVALID_PARAMS);
1683
1684 hci_dev_lock(hdev);
1685
1686 if (!hdev_is_powered(hdev)) {
1687 bool changed = false;
1688
1689 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1691 changed = true;
1692 }
1693
1694 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1695 if (err < 0)
1696 goto failed;
1697
1698 if (changed)
1699 err = new_settings(hdev, sk);
1700
1701 goto failed;
1702 }
1703
1704 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1706 MGMT_STATUS_BUSY);
1707 goto failed;
1708 }
1709
1710 val = !!cp->val;
1711
1712 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1714 goto failed;
1715 }
1716
1717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1718 if (!cmd) {
1719 err = -ENOMEM;
1720 goto failed;
1721 }
1722
1723 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1724 if (err < 0) {
1725 mgmt_pending_remove(cmd);
1726 goto failed;
1727 }
1728
1729 failed:
1730 hci_dev_unlock(hdev);
1731 return err;
1732 }
1733
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1735 {
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1738 u8 status;
1739 int err;
1740
1741 bt_dev_dbg(hdev, "sock %p", sk);
1742
1743 status = mgmt_bredr_support(hdev);
1744 if (status)
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1746
1747 if (!lmp_ssp_capable(hdev))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 MGMT_STATUS_NOT_SUPPORTED);
1750
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_INVALID_PARAMS);
1754
1755 hci_dev_lock(hdev);
1756
1757 if (!hdev_is_powered(hdev)) {
1758 bool changed;
1759
1760 if (cp->val) {
1761 changed = !hci_dev_test_and_set_flag(hdev,
1762 HCI_SSP_ENABLED);
1763 } else {
1764 changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_SSP_ENABLED);
1766 if (!changed)
1767 changed = hci_dev_test_and_clear_flag(hdev,
1768 HCI_HS_ENABLED);
1769 else
1770 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1771 }
1772
1773 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1774 if (err < 0)
1775 goto failed;
1776
1777 if (changed)
1778 err = new_settings(hdev, sk);
1779
1780 goto failed;
1781 }
1782
1783 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1785 MGMT_STATUS_BUSY);
1786 goto failed;
1787 }
1788
1789 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1791 goto failed;
1792 }
1793
1794 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1795 if (!cmd) {
1796 err = -ENOMEM;
1797 goto failed;
1798 }
1799
1800 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 sizeof(cp->val), &cp->val);
1803
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1805 if (err < 0) {
1806 mgmt_pending_remove(cmd);
1807 goto failed;
1808 }
1809
1810 failed:
1811 hci_dev_unlock(hdev);
1812 return err;
1813 }
1814
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1816 {
1817 struct mgmt_mode *cp = data;
1818 bool changed;
1819 u8 status;
1820 int err;
1821
1822 bt_dev_dbg(hdev, "sock %p", sk);
1823
1824 if (!IS_ENABLED(CONFIG_BT_HS))
1825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 MGMT_STATUS_NOT_SUPPORTED);
1827
1828 status = mgmt_bredr_support(hdev);
1829 if (status)
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1831
1832 if (!lmp_ssp_capable(hdev))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1835
1836 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 MGMT_STATUS_REJECTED);
1839
1840 if (cp->val != 0x00 && cp->val != 0x01)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_INVALID_PARAMS);
1843
1844 hci_dev_lock(hdev);
1845
1846 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 MGMT_STATUS_BUSY);
1849 goto unlock;
1850 }
1851
1852 if (cp->val) {
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1854 } else {
1855 if (hdev_is_powered(hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 MGMT_STATUS_REJECTED);
1858 goto unlock;
1859 }
1860
1861 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1862 }
1863
1864 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1865 if (err < 0)
1866 goto unlock;
1867
1868 if (changed)
1869 err = new_settings(hdev, sk);
1870
1871 unlock:
1872 hci_dev_unlock(hdev);
1873 return err;
1874 }
1875
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1877 {
1878 struct cmd_lookup match = { NULL, hdev };
1879
1880 hci_dev_lock(hdev);
1881
1882 if (status) {
1883 u8 mgmt_err = mgmt_status(status);
1884
1885 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1886 &mgmt_err);
1887 goto unlock;
1888 }
1889
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1891
1892 new_settings(hdev, match.sk);
1893
1894 if (match.sk)
1895 sock_put(match.sk);
1896
1897 /* Make sure the controller has a good default for
1898 * advertising data. Restrict the update to when LE
1899 * has actually been enabled. During power on, the
1900 * update in powered_update_hci will take care of it.
1901 */
1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 struct hci_request req;
1904 hci_req_init(&req, hdev);
1905 if (ext_adv_capable(hdev)) {
1906 int err;
1907
1908 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1909 if (!err)
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1911 } else {
1912 __hci_req_update_adv_data(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1914 }
1915 hci_req_run(&req, NULL);
1916 hci_update_background_scan(hdev);
1917 }
1918
1919 unlock:
1920 hci_dev_unlock(hdev);
1921 }
1922
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 {
1925 struct mgmt_mode *cp = data;
1926 struct hci_cp_write_le_host_supported hci_cp;
1927 struct mgmt_pending_cmd *cmd;
1928 struct hci_request req;
1929 int err;
1930 u8 val, enabled;
1931
1932 bt_dev_dbg(hdev, "sock %p", sk);
1933
1934 if (!lmp_le_capable(hdev))
1935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 MGMT_STATUS_NOT_SUPPORTED);
1937
1938 if (cp->val != 0x00 && cp->val != 0x01)
1939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 MGMT_STATUS_INVALID_PARAMS);
1941
1942 /* Bluetooth single mode LE only controllers or dual-mode
1943 * controllers configured as LE only devices, do not allow
1944 * switching LE off. These have either LE enabled explicitly
1945 * or BR/EDR has been previously switched off.
1946 *
1947 * When trying to enable an already enabled LE, then gracefully
1948 * send a positive response. Trying to disable it however will
1949 * result into rejection.
1950 */
1951 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 if (cp->val == 0x01)
1953 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1954
1955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_REJECTED);
1957 }
1958
1959 hci_dev_lock(hdev);
1960
1961 val = !!cp->val;
1962 enabled = lmp_host_le_capable(hdev);
1963
1964 if (!val)
1965 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1966
1967 if (!hdev_is_powered(hdev) || val == enabled) {
1968 bool changed = false;
1969
1970 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1972 changed = true;
1973 }
1974
1975 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1977 changed = true;
1978 }
1979
1980 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1981 if (err < 0)
1982 goto unlock;
1983
1984 if (changed)
1985 err = new_settings(hdev, sk);
1986
1987 goto unlock;
1988 }
1989
1990 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 MGMT_STATUS_BUSY);
1994 goto unlock;
1995 }
1996
1997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1998 if (!cmd) {
1999 err = -ENOMEM;
2000 goto unlock;
2001 }
2002
2003 hci_req_init(&req, hdev);
2004
2005 memset(&hci_cp, 0, sizeof(hci_cp));
2006
2007 if (val) {
2008 hci_cp.le = val;
2009 hci_cp.simul = 0x00;
2010 } else {
2011 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 __hci_req_disable_advertising(&req);
2013
2014 if (ext_adv_capable(hdev))
2015 __hci_req_clear_ext_adv_sets(&req);
2016 }
2017
2018 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2019 &hci_cp);
2020
2021 err = hci_req_run(&req, le_enable_complete);
2022 if (err < 0)
2023 mgmt_pending_remove(cmd);
2024
2025 unlock:
2026 hci_dev_unlock(hdev);
2027 return err;
2028 }
2029
2030 /* This is a helper function to test for pending mgmt commands that can
2031 * cause CoD or EIR HCI commands. We can only allow one such pending
2032 * mgmt command at a time since otherwise we cannot easily track what
2033 * the current values are, will be, and based on that calculate if a new
2034 * HCI command needs to be sent and if yes with what value.
2035 */
pending_eir_or_class(struct hci_dev * hdev)2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2037 {
2038 struct mgmt_pending_cmd *cmd;
2039
2040 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 switch (cmd->opcode) {
2042 case MGMT_OP_ADD_UUID:
2043 case MGMT_OP_REMOVE_UUID:
2044 case MGMT_OP_SET_DEV_CLASS:
2045 case MGMT_OP_SET_POWERED:
2046 return true;
2047 }
2048 }
2049
2050 return false;
2051 }
2052
2053 static const u8 bluetooth_base_uuid[] = {
2054 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2056 };
2057
get_uuid_size(const u8 * uuid)2058 static u8 get_uuid_size(const u8 *uuid)
2059 {
2060 u32 val;
2061
2062 if (memcmp(uuid, bluetooth_base_uuid, 12))
2063 return 128;
2064
2065 val = get_unaligned_le32(&uuid[12]);
2066 if (val > 0xffff)
2067 return 32;
2068
2069 return 16;
2070 }
2071
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2073 {
2074 struct mgmt_pending_cmd *cmd;
2075
2076 hci_dev_lock(hdev);
2077
2078 cmd = pending_find(mgmt_op, hdev);
2079 if (!cmd)
2080 goto unlock;
2081
2082 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 mgmt_status(status), hdev->dev_class, 3);
2084
2085 mgmt_pending_remove(cmd);
2086
2087 unlock:
2088 hci_dev_unlock(hdev);
2089 }
2090
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2092 {
2093 bt_dev_dbg(hdev, "status 0x%02x", status);
2094
2095 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2096 }
2097
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2099 {
2100 struct mgmt_cp_add_uuid *cp = data;
2101 struct mgmt_pending_cmd *cmd;
2102 struct hci_request req;
2103 struct bt_uuid *uuid;
2104 int err;
2105
2106 bt_dev_dbg(hdev, "sock %p", sk);
2107
2108 hci_dev_lock(hdev);
2109
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2112 MGMT_STATUS_BUSY);
2113 goto failed;
2114 }
2115
2116 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2117 if (!uuid) {
2118 err = -ENOMEM;
2119 goto failed;
2120 }
2121
2122 memcpy(uuid->uuid, cp->uuid, 16);
2123 uuid->svc_hint = cp->svc_hint;
2124 uuid->size = get_uuid_size(cp->uuid);
2125
2126 list_add_tail(&uuid->list, &hdev->uuids);
2127
2128 hci_req_init(&req, hdev);
2129
2130 __hci_req_update_class(&req);
2131 __hci_req_update_eir(&req);
2132
2133 err = hci_req_run(&req, add_uuid_complete);
2134 if (err < 0) {
2135 if (err != -ENODATA)
2136 goto failed;
2137
2138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 hdev->dev_class, 3);
2140 goto failed;
2141 }
2142
2143 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2144 if (!cmd) {
2145 err = -ENOMEM;
2146 goto failed;
2147 }
2148
2149 err = 0;
2150
2151 failed:
2152 hci_dev_unlock(hdev);
2153 return err;
2154 }
2155
enable_service_cache(struct hci_dev * hdev)2156 static bool enable_service_cache(struct hci_dev *hdev)
2157 {
2158 if (!hdev_is_powered(hdev))
2159 return false;
2160
2161 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2163 CACHE_TIMEOUT);
2164 return true;
2165 }
2166
2167 return false;
2168 }
2169
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2171 {
2172 bt_dev_dbg(hdev, "status 0x%02x", status);
2173
2174 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2175 }
2176
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2178 u16 len)
2179 {
2180 struct mgmt_cp_remove_uuid *cp = data;
2181 struct mgmt_pending_cmd *cmd;
2182 struct bt_uuid *match, *tmp;
2183 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 struct hci_request req;
2185 int err, found;
2186
2187 bt_dev_dbg(hdev, "sock %p", sk);
2188
2189 hci_dev_lock(hdev);
2190
2191 if (pending_eir_or_class(hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2193 MGMT_STATUS_BUSY);
2194 goto unlock;
2195 }
2196
2197 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 hci_uuids_clear(hdev);
2199
2200 if (enable_service_cache(hdev)) {
2201 err = mgmt_cmd_complete(sk, hdev->id,
2202 MGMT_OP_REMOVE_UUID,
2203 0, hdev->dev_class, 3);
2204 goto unlock;
2205 }
2206
2207 goto update_class;
2208 }
2209
2210 found = 0;
2211
2212 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2214 continue;
2215
2216 list_del(&match->list);
2217 kfree(match);
2218 found++;
2219 }
2220
2221 if (found == 0) {
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 MGMT_STATUS_INVALID_PARAMS);
2224 goto unlock;
2225 }
2226
2227 update_class:
2228 hci_req_init(&req, hdev);
2229
2230 __hci_req_update_class(&req);
2231 __hci_req_update_eir(&req);
2232
2233 err = hci_req_run(&req, remove_uuid_complete);
2234 if (err < 0) {
2235 if (err != -ENODATA)
2236 goto unlock;
2237
2238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 hdev->dev_class, 3);
2240 goto unlock;
2241 }
2242
2243 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2244 if (!cmd) {
2245 err = -ENOMEM;
2246 goto unlock;
2247 }
2248
2249 err = 0;
2250
2251 unlock:
2252 hci_dev_unlock(hdev);
2253 return err;
2254 }
2255
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2257 {
2258 bt_dev_dbg(hdev, "status 0x%02x", status);
2259
2260 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2261 }
2262
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2264 u16 len)
2265 {
2266 struct mgmt_cp_set_dev_class *cp = data;
2267 struct mgmt_pending_cmd *cmd;
2268 struct hci_request req;
2269 int err;
2270
2271 bt_dev_dbg(hdev, "sock %p", sk);
2272
2273 if (!lmp_bredr_capable(hdev))
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 MGMT_STATUS_NOT_SUPPORTED);
2276
2277 hci_dev_lock(hdev);
2278
2279 if (pending_eir_or_class(hdev)) {
2280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 MGMT_STATUS_BUSY);
2282 goto unlock;
2283 }
2284
2285 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 MGMT_STATUS_INVALID_PARAMS);
2288 goto unlock;
2289 }
2290
2291 hdev->major_class = cp->major;
2292 hdev->minor_class = cp->minor;
2293
2294 if (!hdev_is_powered(hdev)) {
2295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 hdev->dev_class, 3);
2297 goto unlock;
2298 }
2299
2300 hci_req_init(&req, hdev);
2301
2302 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 hci_dev_unlock(hdev);
2304 cancel_delayed_work_sync(&hdev->service_cache);
2305 hci_dev_lock(hdev);
2306 __hci_req_update_eir(&req);
2307 }
2308
2309 __hci_req_update_class(&req);
2310
2311 err = hci_req_run(&req, set_class_complete);
2312 if (err < 0) {
2313 if (err != -ENODATA)
2314 goto unlock;
2315
2316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 hdev->dev_class, 3);
2318 goto unlock;
2319 }
2320
2321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2322 if (!cmd) {
2323 err = -ENOMEM;
2324 goto unlock;
2325 }
2326
2327 err = 0;
2328
2329 unlock:
2330 hci_dev_unlock(hdev);
2331 return err;
2332 }
2333
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2335 u16 len)
2336 {
2337 struct mgmt_cp_load_link_keys *cp = data;
2338 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 sizeof(struct mgmt_link_key_info));
2340 u16 key_count, expected_len;
2341 bool changed;
2342 int i;
2343
2344 bt_dev_dbg(hdev, "sock %p", sk);
2345
2346 if (!lmp_bredr_capable(hdev))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2349
2350 key_count = __le16_to_cpu(cp->key_count);
2351 if (key_count > max_key_count) {
2352 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2353 key_count);
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 MGMT_STATUS_INVALID_PARAMS);
2356 }
2357
2358 expected_len = struct_size(cp, keys, key_count);
2359 if (expected_len != len) {
2360 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2361 expected_len, len);
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2364 }
2365
2366 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2369
2370 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2371 key_count);
2372
2373 for (i = 0; i < key_count; i++) {
2374 struct mgmt_link_key_info *key = &cp->keys[i];
2375
2376 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2377 if (key->type > 0x08)
2378 return mgmt_cmd_status(sk, hdev->id,
2379 MGMT_OP_LOAD_LINK_KEYS,
2380 MGMT_STATUS_INVALID_PARAMS);
2381 }
2382
2383 hci_dev_lock(hdev);
2384
2385 hci_link_keys_clear(hdev);
2386
2387 if (cp->debug_keys)
2388 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2389 else
2390 changed = hci_dev_test_and_clear_flag(hdev,
2391 HCI_KEEP_DEBUG_KEYS);
2392
2393 if (changed)
2394 new_settings(hdev, NULL);
2395
2396 for (i = 0; i < key_count; i++) {
2397 struct mgmt_link_key_info *key = &cp->keys[i];
2398
2399 if (hci_is_blocked_key(hdev,
2400 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2401 key->val)) {
2402 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2403 &key->addr.bdaddr);
2404 continue;
2405 }
2406
2407 /* Always ignore debug keys and require a new pairing if
2408 * the user wants to use them.
2409 */
2410 if (key->type == HCI_LK_DEBUG_COMBINATION)
2411 continue;
2412
2413 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2414 key->type, key->pin_len, NULL);
2415 }
2416
2417 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2418
2419 hci_dev_unlock(hdev);
2420
2421 return 0;
2422 }
2423
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2424 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2425 u8 addr_type, struct sock *skip_sk)
2426 {
2427 struct mgmt_ev_device_unpaired ev;
2428
2429 bacpy(&ev.addr.bdaddr, bdaddr);
2430 ev.addr.type = addr_type;
2431
2432 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2433 skip_sk);
2434 }
2435
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2436 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2437 u16 len)
2438 {
2439 struct mgmt_cp_unpair_device *cp = data;
2440 struct mgmt_rp_unpair_device rp;
2441 struct hci_conn_params *params;
2442 struct mgmt_pending_cmd *cmd;
2443 struct hci_conn *conn;
2444 u8 addr_type;
2445 int err;
2446
2447 memset(&rp, 0, sizeof(rp));
2448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2449 rp.addr.type = cp->addr.type;
2450
2451 if (!bdaddr_type_is_valid(cp->addr.type))
2452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2453 MGMT_STATUS_INVALID_PARAMS,
2454 &rp, sizeof(rp));
2455
2456 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_INVALID_PARAMS,
2459 &rp, sizeof(rp));
2460
2461 hci_dev_lock(hdev);
2462
2463 if (!hdev_is_powered(hdev)) {
2464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_NOT_POWERED, &rp,
2466 sizeof(rp));
2467 goto unlock;
2468 }
2469
2470 if (cp->addr.type == BDADDR_BREDR) {
2471 /* If disconnection is requested, then look up the
2472 * connection. If the remote device is connected, it
2473 * will be later used to terminate the link.
2474 *
2475 * Setting it to NULL explicitly will cause no
2476 * termination of the link.
2477 */
2478 if (cp->disconnect)
2479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2480 &cp->addr.bdaddr);
2481 else
2482 conn = NULL;
2483
2484 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2485 if (err < 0) {
2486 err = mgmt_cmd_complete(sk, hdev->id,
2487 MGMT_OP_UNPAIR_DEVICE,
2488 MGMT_STATUS_NOT_PAIRED, &rp,
2489 sizeof(rp));
2490 goto unlock;
2491 }
2492
2493 goto done;
2494 }
2495
2496 /* LE address type */
2497 addr_type = le_addr_type(cp->addr.type);
2498
2499 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2500 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2501 if (err < 0) {
2502 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 MGMT_STATUS_NOT_PAIRED, &rp,
2504 sizeof(rp));
2505 goto unlock;
2506 }
2507
2508 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2509 if (!conn) {
2510 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2511 goto done;
2512 }
2513
2514
2515 /* Defer clearing up the connection parameters until closing to
2516 * give a chance of keeping them if a repairing happens.
2517 */
2518 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2519
2520 /* Disable auto-connection parameters if present */
2521 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2522 if (params) {
2523 if (params->explicit_connect)
2524 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2525 else
2526 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2527 }
2528
2529 /* If disconnection is not requested, then clear the connection
2530 * variable so that the link is not terminated.
2531 */
2532 if (!cp->disconnect)
2533 conn = NULL;
2534
2535 done:
2536 /* If the connection variable is set, then termination of the
2537 * link is requested.
2538 */
2539 if (!conn) {
2540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2541 &rp, sizeof(rp));
2542 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2543 goto unlock;
2544 }
2545
2546 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2547 sizeof(*cp));
2548 if (!cmd) {
2549 err = -ENOMEM;
2550 goto unlock;
2551 }
2552
2553 cmd->cmd_complete = addr_cmd_complete;
2554
2555 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2556 if (err < 0)
2557 mgmt_pending_remove(cmd);
2558
2559 unlock:
2560 hci_dev_unlock(hdev);
2561 return err;
2562 }
2563
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2564 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2565 u16 len)
2566 {
2567 struct mgmt_cp_disconnect *cp = data;
2568 struct mgmt_rp_disconnect rp;
2569 struct mgmt_pending_cmd *cmd;
2570 struct hci_conn *conn;
2571 int err;
2572
2573 bt_dev_dbg(hdev, "sock %p", sk);
2574
2575 memset(&rp, 0, sizeof(rp));
2576 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2577 rp.addr.type = cp->addr.type;
2578
2579 if (!bdaddr_type_is_valid(cp->addr.type))
2580 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2581 MGMT_STATUS_INVALID_PARAMS,
2582 &rp, sizeof(rp));
2583
2584 hci_dev_lock(hdev);
2585
2586 if (!test_bit(HCI_UP, &hdev->flags)) {
2587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_NOT_POWERED, &rp,
2589 sizeof(rp));
2590 goto failed;
2591 }
2592
2593 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2596 goto failed;
2597 }
2598
2599 if (cp->addr.type == BDADDR_BREDR)
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2601 &cp->addr.bdaddr);
2602 else
2603 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2604 le_addr_type(cp->addr.type));
2605
2606 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 MGMT_STATUS_NOT_CONNECTED, &rp,
2609 sizeof(rp));
2610 goto failed;
2611 }
2612
2613 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2614 if (!cmd) {
2615 err = -ENOMEM;
2616 goto failed;
2617 }
2618
2619 cmd->cmd_complete = generic_cmd_complete;
2620
2621 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2622 if (err < 0)
2623 mgmt_pending_remove(cmd);
2624
2625 failed:
2626 hci_dev_unlock(hdev);
2627 return err;
2628 }
2629
link_to_bdaddr(u8 link_type,u8 addr_type)2630 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2631 {
2632 switch (link_type) {
2633 case LE_LINK:
2634 switch (addr_type) {
2635 case ADDR_LE_DEV_PUBLIC:
2636 return BDADDR_LE_PUBLIC;
2637
2638 default:
2639 /* Fallback to LE Random address type */
2640 return BDADDR_LE_RANDOM;
2641 }
2642
2643 default:
2644 /* Fallback to BR/EDR type */
2645 return BDADDR_BREDR;
2646 }
2647 }
2648
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2649 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2650 u16 data_len)
2651 {
2652 struct mgmt_rp_get_connections *rp;
2653 struct hci_conn *c;
2654 int err;
2655 u16 i;
2656
2657 bt_dev_dbg(hdev, "sock %p", sk);
2658
2659 hci_dev_lock(hdev);
2660
2661 if (!hdev_is_powered(hdev)) {
2662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2663 MGMT_STATUS_NOT_POWERED);
2664 goto unlock;
2665 }
2666
2667 i = 0;
2668 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2669 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2670 i++;
2671 }
2672
2673 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2674 if (!rp) {
2675 err = -ENOMEM;
2676 goto unlock;
2677 }
2678
2679 i = 0;
2680 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2681 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2682 continue;
2683 bacpy(&rp->addr[i].bdaddr, &c->dst);
2684 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2685 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2686 continue;
2687 i++;
2688 }
2689
2690 rp->conn_count = cpu_to_le16(i);
2691
2692 /* Recalculate length in case of filtered SCO connections, etc */
2693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2694 struct_size(rp, addr, i));
2695
2696 kfree(rp);
2697
2698 unlock:
2699 hci_dev_unlock(hdev);
2700 return err;
2701 }
2702
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2703 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2704 struct mgmt_cp_pin_code_neg_reply *cp)
2705 {
2706 struct mgmt_pending_cmd *cmd;
2707 int err;
2708
2709 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2710 sizeof(*cp));
2711 if (!cmd)
2712 return -ENOMEM;
2713
2714 cmd->cmd_complete = addr_cmd_complete;
2715
2716 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2717 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2718 if (err < 0)
2719 mgmt_pending_remove(cmd);
2720
2721 return err;
2722 }
2723
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2724 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2725 u16 len)
2726 {
2727 struct hci_conn *conn;
2728 struct mgmt_cp_pin_code_reply *cp = data;
2729 struct hci_cp_pin_code_reply reply;
2730 struct mgmt_pending_cmd *cmd;
2731 int err;
2732
2733 bt_dev_dbg(hdev, "sock %p", sk);
2734
2735 hci_dev_lock(hdev);
2736
2737 if (!hdev_is_powered(hdev)) {
2738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2739 MGMT_STATUS_NOT_POWERED);
2740 goto failed;
2741 }
2742
2743 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2744 if (!conn) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_CONNECTED);
2747 goto failed;
2748 }
2749
2750 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2751 struct mgmt_cp_pin_code_neg_reply ncp;
2752
2753 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2754
2755 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2756
2757 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2758 if (err >= 0)
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 MGMT_STATUS_INVALID_PARAMS);
2761
2762 goto failed;
2763 }
2764
2765 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2766 if (!cmd) {
2767 err = -ENOMEM;
2768 goto failed;
2769 }
2770
2771 cmd->cmd_complete = addr_cmd_complete;
2772
2773 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2774 reply.pin_len = cp->pin_len;
2775 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2776
2777 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2778 if (err < 0)
2779 mgmt_pending_remove(cmd);
2780
2781 failed:
2782 hci_dev_unlock(hdev);
2783 return err;
2784 }
2785
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2786 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2787 u16 len)
2788 {
2789 struct mgmt_cp_set_io_capability *cp = data;
2790
2791 bt_dev_dbg(hdev, "sock %p", sk);
2792
2793 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2795 MGMT_STATUS_INVALID_PARAMS);
2796
2797 hci_dev_lock(hdev);
2798
2799 hdev->io_capability = cp->io_capability;
2800
2801 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2802
2803 hci_dev_unlock(hdev);
2804
2805 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2806 NULL, 0);
2807 }
2808
find_pairing(struct hci_conn * conn)2809 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2810 {
2811 struct hci_dev *hdev = conn->hdev;
2812 struct mgmt_pending_cmd *cmd;
2813
2814 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2815 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2816 continue;
2817
2818 if (cmd->user_data != conn)
2819 continue;
2820
2821 return cmd;
2822 }
2823
2824 return NULL;
2825 }
2826
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2827 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2828 {
2829 struct mgmt_rp_pair_device rp;
2830 struct hci_conn *conn = cmd->user_data;
2831 int err;
2832
2833 bacpy(&rp.addr.bdaddr, &conn->dst);
2834 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2835
2836 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2837 status, &rp, sizeof(rp));
2838
2839 /* So we don't get further callbacks for this connection */
2840 conn->connect_cfm_cb = NULL;
2841 conn->security_cfm_cb = NULL;
2842 conn->disconn_cfm_cb = NULL;
2843
2844 hci_conn_drop(conn);
2845
2846 /* The device is paired so there is no need to remove
2847 * its connection parameters anymore.
2848 */
2849 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2850
2851 hci_conn_put(conn);
2852
2853 return err;
2854 }
2855
mgmt_smp_complete(struct hci_conn * conn,bool complete)2856 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2857 {
2858 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2859 struct mgmt_pending_cmd *cmd;
2860
2861 cmd = find_pairing(conn);
2862 if (cmd) {
2863 cmd->cmd_complete(cmd, status);
2864 mgmt_pending_remove(cmd);
2865 }
2866 }
2867
pairing_complete_cb(struct hci_conn * conn,u8 status)2868 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 {
2870 struct mgmt_pending_cmd *cmd;
2871
2872 BT_DBG("status %u", status);
2873
2874 cmd = find_pairing(conn);
2875 if (!cmd) {
2876 BT_DBG("Unable to find a pending command");
2877 return;
2878 }
2879
2880 cmd->cmd_complete(cmd, mgmt_status(status));
2881 mgmt_pending_remove(cmd);
2882 }
2883
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 {
2886 struct mgmt_pending_cmd *cmd;
2887
2888 BT_DBG("status %u", status);
2889
2890 if (!status)
2891 return;
2892
2893 cmd = find_pairing(conn);
2894 if (!cmd) {
2895 BT_DBG("Unable to find a pending command");
2896 return;
2897 }
2898
2899 cmd->cmd_complete(cmd, mgmt_status(status));
2900 mgmt_pending_remove(cmd);
2901 }
2902
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2903 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2904 u16 len)
2905 {
2906 struct mgmt_cp_pair_device *cp = data;
2907 struct mgmt_rp_pair_device rp;
2908 struct mgmt_pending_cmd *cmd;
2909 u8 sec_level, auth_type;
2910 struct hci_conn *conn;
2911 int err;
2912
2913 bt_dev_dbg(hdev, "sock %p", sk);
2914
2915 memset(&rp, 0, sizeof(rp));
2916 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2917 rp.addr.type = cp->addr.type;
2918
2919 if (!bdaddr_type_is_valid(cp->addr.type))
2920 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2921 MGMT_STATUS_INVALID_PARAMS,
2922 &rp, sizeof(rp));
2923
2924 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2925 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_INVALID_PARAMS,
2927 &rp, sizeof(rp));
2928
2929 hci_dev_lock(hdev);
2930
2931 if (!hdev_is_powered(hdev)) {
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_NOT_POWERED, &rp,
2934 sizeof(rp));
2935 goto unlock;
2936 }
2937
2938 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_ALREADY_PAIRED, &rp,
2941 sizeof(rp));
2942 goto unlock;
2943 }
2944
2945 sec_level = BT_SECURITY_MEDIUM;
2946 auth_type = HCI_AT_DEDICATED_BONDING;
2947
2948 if (cp->addr.type == BDADDR_BREDR) {
2949 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2950 auth_type, CONN_REASON_PAIR_DEVICE);
2951 } else {
2952 u8 addr_type = le_addr_type(cp->addr.type);
2953 struct hci_conn_params *p;
2954
2955 /* When pairing a new device, it is expected to remember
2956 * this device for future connections. Adding the connection
2957 * parameter information ahead of time allows tracking
2958 * of the slave preferred values and will speed up any
2959 * further connection establishment.
2960 *
2961 * If connection parameters already exist, then they
2962 * will be kept and this function does nothing.
2963 */
2964 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2965
2966 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2967 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2968
2969 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2970 sec_level, HCI_LE_CONN_TIMEOUT,
2971 CONN_REASON_PAIR_DEVICE);
2972 }
2973
2974 if (IS_ERR(conn)) {
2975 int status;
2976
2977 if (PTR_ERR(conn) == -EBUSY)
2978 status = MGMT_STATUS_BUSY;
2979 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2980 status = MGMT_STATUS_NOT_SUPPORTED;
2981 else if (PTR_ERR(conn) == -ECONNREFUSED)
2982 status = MGMT_STATUS_REJECTED;
2983 else
2984 status = MGMT_STATUS_CONNECT_FAILED;
2985
2986 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2987 status, &rp, sizeof(rp));
2988 goto unlock;
2989 }
2990
2991 if (conn->connect_cfm_cb) {
2992 hci_conn_drop(conn);
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2995 goto unlock;
2996 }
2997
2998 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2999 if (!cmd) {
3000 err = -ENOMEM;
3001 hci_conn_drop(conn);
3002 goto unlock;
3003 }
3004
3005 cmd->cmd_complete = pairing_complete;
3006
3007 /* For LE, just connecting isn't a proof that the pairing finished */
3008 if (cp->addr.type == BDADDR_BREDR) {
3009 conn->connect_cfm_cb = pairing_complete_cb;
3010 conn->security_cfm_cb = pairing_complete_cb;
3011 conn->disconn_cfm_cb = pairing_complete_cb;
3012 } else {
3013 conn->connect_cfm_cb = le_pairing_complete_cb;
3014 conn->security_cfm_cb = le_pairing_complete_cb;
3015 conn->disconn_cfm_cb = le_pairing_complete_cb;
3016 }
3017
3018 conn->io_capability = cp->io_cap;
3019 cmd->user_data = hci_conn_get(conn);
3020
3021 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3022 hci_conn_security(conn, sec_level, auth_type, true)) {
3023 cmd->cmd_complete(cmd, 0);
3024 mgmt_pending_remove(cmd);
3025 }
3026
3027 err = 0;
3028
3029 unlock:
3030 hci_dev_unlock(hdev);
3031 return err;
3032 }
3033
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3034 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3035 u16 len)
3036 {
3037 struct mgmt_addr_info *addr = data;
3038 struct mgmt_pending_cmd *cmd;
3039 struct hci_conn *conn;
3040 int err;
3041
3042 bt_dev_dbg(hdev, "sock %p", sk);
3043
3044 hci_dev_lock(hdev);
3045
3046 if (!hdev_is_powered(hdev)) {
3047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3048 MGMT_STATUS_NOT_POWERED);
3049 goto unlock;
3050 }
3051
3052 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3053 if (!cmd) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_INVALID_PARAMS);
3056 goto unlock;
3057 }
3058
3059 conn = cmd->user_data;
3060
3061 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3064 goto unlock;
3065 }
3066
3067 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3068 mgmt_pending_remove(cmd);
3069
3070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3071 addr, sizeof(*addr));
3072
3073 /* Since user doesn't want to proceed with the connection, abort any
3074 * ongoing pairing and then terminate the link if it was created
3075 * because of the pair device action.
3076 */
3077 if (addr->type == BDADDR_BREDR)
3078 hci_remove_link_key(hdev, &addr->bdaddr);
3079 else
3080 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3081 le_addr_type(addr->type));
3082
3083 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3084 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3085
3086 unlock:
3087 hci_dev_unlock(hdev);
3088 return err;
3089 }
3090
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3091 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3092 struct mgmt_addr_info *addr, u16 mgmt_op,
3093 u16 hci_op, __le32 passkey)
3094 {
3095 struct mgmt_pending_cmd *cmd;
3096 struct hci_conn *conn;
3097 int err;
3098
3099 hci_dev_lock(hdev);
3100
3101 if (!hdev_is_powered(hdev)) {
3102 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3103 MGMT_STATUS_NOT_POWERED, addr,
3104 sizeof(*addr));
3105 goto done;
3106 }
3107
3108 if (addr->type == BDADDR_BREDR)
3109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3110 else
3111 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3112 le_addr_type(addr->type));
3113
3114 if (!conn) {
3115 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3116 MGMT_STATUS_NOT_CONNECTED, addr,
3117 sizeof(*addr));
3118 goto done;
3119 }
3120
3121 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3122 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3123 if (!err)
3124 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3125 MGMT_STATUS_SUCCESS, addr,
3126 sizeof(*addr));
3127 else
3128 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3129 MGMT_STATUS_FAILED, addr,
3130 sizeof(*addr));
3131
3132 goto done;
3133 }
3134
3135 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3136 if (!cmd) {
3137 err = -ENOMEM;
3138 goto done;
3139 }
3140
3141 cmd->cmd_complete = addr_cmd_complete;
3142
3143 /* Continue with pairing via HCI */
3144 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3145 struct hci_cp_user_passkey_reply cp;
3146
3147 bacpy(&cp.bdaddr, &addr->bdaddr);
3148 cp.passkey = passkey;
3149 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3150 } else
3151 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3152 &addr->bdaddr);
3153
3154 if (err < 0)
3155 mgmt_pending_remove(cmd);
3156
3157 done:
3158 hci_dev_unlock(hdev);
3159 return err;
3160 }
3161
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3162 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3163 void *data, u16 len)
3164 {
3165 struct mgmt_cp_pin_code_neg_reply *cp = data;
3166
3167 bt_dev_dbg(hdev, "sock %p", sk);
3168
3169 return user_pairing_resp(sk, hdev, &cp->addr,
3170 MGMT_OP_PIN_CODE_NEG_REPLY,
3171 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3172 }
3173
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3174 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3175 u16 len)
3176 {
3177 struct mgmt_cp_user_confirm_reply *cp = data;
3178
3179 bt_dev_dbg(hdev, "sock %p", sk);
3180
3181 if (len != sizeof(*cp))
3182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3183 MGMT_STATUS_INVALID_PARAMS);
3184
3185 return user_pairing_resp(sk, hdev, &cp->addr,
3186 MGMT_OP_USER_CONFIRM_REPLY,
3187 HCI_OP_USER_CONFIRM_REPLY, 0);
3188 }
3189
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3190 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3191 void *data, u16 len)
3192 {
3193 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3194
3195 bt_dev_dbg(hdev, "sock %p", sk);
3196
3197 return user_pairing_resp(sk, hdev, &cp->addr,
3198 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3199 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3200 }
3201
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3202 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3203 u16 len)
3204 {
3205 struct mgmt_cp_user_passkey_reply *cp = data;
3206
3207 bt_dev_dbg(hdev, "sock %p", sk);
3208
3209 return user_pairing_resp(sk, hdev, &cp->addr,
3210 MGMT_OP_USER_PASSKEY_REPLY,
3211 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3212 }
3213
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3214 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3215 void *data, u16 len)
3216 {
3217 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3218
3219 bt_dev_dbg(hdev, "sock %p", sk);
3220
3221 return user_pairing_resp(sk, hdev, &cp->addr,
3222 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3223 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3224 }
3225
adv_expire(struct hci_dev * hdev,u32 flags)3226 static void adv_expire(struct hci_dev *hdev, u32 flags)
3227 {
3228 struct adv_info *adv_instance;
3229 struct hci_request req;
3230 int err;
3231
3232 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3233 if (!adv_instance)
3234 return;
3235
3236 /* stop if current instance doesn't need to be changed */
3237 if (!(adv_instance->flags & flags))
3238 return;
3239
3240 cancel_adv_timeout(hdev);
3241
3242 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3243 if (!adv_instance)
3244 return;
3245
3246 hci_req_init(&req, hdev);
3247 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3248 true);
3249 if (err)
3250 return;
3251
3252 hci_req_run(&req, NULL);
3253 }
3254
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3255 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3256 {
3257 struct mgmt_cp_set_local_name *cp;
3258 struct mgmt_pending_cmd *cmd;
3259
3260 bt_dev_dbg(hdev, "status 0x%02x", status);
3261
3262 hci_dev_lock(hdev);
3263
3264 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3265 if (!cmd)
3266 goto unlock;
3267
3268 cp = cmd->param;
3269
3270 if (status) {
3271 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3272 mgmt_status(status));
3273 } else {
3274 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3275 cp, sizeof(*cp));
3276
3277 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3278 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3279 }
3280
3281 mgmt_pending_remove(cmd);
3282
3283 unlock:
3284 hci_dev_unlock(hdev);
3285 }
3286
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3287 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3288 u16 len)
3289 {
3290 struct mgmt_cp_set_local_name *cp = data;
3291 struct mgmt_pending_cmd *cmd;
3292 struct hci_request req;
3293 int err;
3294
3295 bt_dev_dbg(hdev, "sock %p", sk);
3296
3297 hci_dev_lock(hdev);
3298
3299 /* If the old values are the same as the new ones just return a
3300 * direct command complete event.
3301 */
3302 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3303 !memcmp(hdev->short_name, cp->short_name,
3304 sizeof(hdev->short_name))) {
3305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3306 data, len);
3307 goto failed;
3308 }
3309
3310 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3311
3312 if (!hdev_is_powered(hdev)) {
3313 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3314
3315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3316 data, len);
3317 if (err < 0)
3318 goto failed;
3319
3320 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3321 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3322 ext_info_changed(hdev, sk);
3323
3324 goto failed;
3325 }
3326
3327 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3328 if (!cmd) {
3329 err = -ENOMEM;
3330 goto failed;
3331 }
3332
3333 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3334
3335 hci_req_init(&req, hdev);
3336
3337 if (lmp_bredr_capable(hdev)) {
3338 __hci_req_update_name(&req);
3339 __hci_req_update_eir(&req);
3340 }
3341
3342 /* The name is stored in the scan response data and so
3343 * no need to udpate the advertising data here.
3344 */
3345 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3346 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3347
3348 err = hci_req_run(&req, set_name_complete);
3349 if (err < 0)
3350 mgmt_pending_remove(cmd);
3351
3352 failed:
3353 hci_dev_unlock(hdev);
3354 return err;
3355 }
3356
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3357 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3358 u16 len)
3359 {
3360 struct mgmt_cp_set_appearance *cp = data;
3361 u16 appearance;
3362 int err;
3363
3364 bt_dev_dbg(hdev, "sock %p", sk);
3365
3366 if (!lmp_le_capable(hdev))
3367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3368 MGMT_STATUS_NOT_SUPPORTED);
3369
3370 appearance = le16_to_cpu(cp->appearance);
3371
3372 hci_dev_lock(hdev);
3373
3374 if (hdev->appearance != appearance) {
3375 hdev->appearance = appearance;
3376
3377 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3378 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3379
3380 ext_info_changed(hdev, sk);
3381 }
3382
3383 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3384 0);
3385
3386 hci_dev_unlock(hdev);
3387
3388 return err;
3389 }
3390
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3391 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3392 void *data, u16 len)
3393 {
3394 struct mgmt_rp_get_phy_confguration rp;
3395
3396 bt_dev_dbg(hdev, "sock %p", sk);
3397
3398 hci_dev_lock(hdev);
3399
3400 memset(&rp, 0, sizeof(rp));
3401
3402 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3403 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3404 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3405
3406 hci_dev_unlock(hdev);
3407
3408 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3409 &rp, sizeof(rp));
3410 }
3411
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3412 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3413 {
3414 struct mgmt_ev_phy_configuration_changed ev;
3415
3416 memset(&ev, 0, sizeof(ev));
3417
3418 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3419
3420 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3421 sizeof(ev), skip);
3422 }
3423
set_default_phy_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3424 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3425 u16 opcode, struct sk_buff *skb)
3426 {
3427 struct mgmt_pending_cmd *cmd;
3428
3429 bt_dev_dbg(hdev, "status 0x%02x", status);
3430
3431 hci_dev_lock(hdev);
3432
3433 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3434 if (!cmd)
3435 goto unlock;
3436
3437 if (status) {
3438 mgmt_cmd_status(cmd->sk, hdev->id,
3439 MGMT_OP_SET_PHY_CONFIGURATION,
3440 mgmt_status(status));
3441 } else {
3442 mgmt_cmd_complete(cmd->sk, hdev->id,
3443 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3444 NULL, 0);
3445
3446 mgmt_phy_configuration_changed(hdev, cmd->sk);
3447 }
3448
3449 mgmt_pending_remove(cmd);
3450
3451 unlock:
3452 hci_dev_unlock(hdev);
3453 }
3454
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3455 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3456 void *data, u16 len)
3457 {
3458 struct mgmt_cp_set_phy_confguration *cp = data;
3459 struct hci_cp_le_set_default_phy cp_phy;
3460 struct mgmt_pending_cmd *cmd;
3461 struct hci_request req;
3462 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3463 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3464 bool changed = false;
3465 int err;
3466
3467 bt_dev_dbg(hdev, "sock %p", sk);
3468
3469 configurable_phys = get_configurable_phys(hdev);
3470 supported_phys = get_supported_phys(hdev);
3471 selected_phys = __le32_to_cpu(cp->selected_phys);
3472
3473 if (selected_phys & ~supported_phys)
3474 return mgmt_cmd_status(sk, hdev->id,
3475 MGMT_OP_SET_PHY_CONFIGURATION,
3476 MGMT_STATUS_INVALID_PARAMS);
3477
3478 unconfigure_phys = supported_phys & ~configurable_phys;
3479
3480 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3484
3485 if (selected_phys == get_selected_phys(hdev))
3486 return mgmt_cmd_complete(sk, hdev->id,
3487 MGMT_OP_SET_PHY_CONFIGURATION,
3488 0, NULL, 0);
3489
3490 hci_dev_lock(hdev);
3491
3492 if (!hdev_is_powered(hdev)) {
3493 err = mgmt_cmd_status(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3495 MGMT_STATUS_REJECTED);
3496 goto unlock;
3497 }
3498
3499 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3502 MGMT_STATUS_BUSY);
3503 goto unlock;
3504 }
3505
3506 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3507 pkt_type |= (HCI_DH3 | HCI_DM3);
3508 else
3509 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3510
3511 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3512 pkt_type |= (HCI_DH5 | HCI_DM5);
3513 else
3514 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3515
3516 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3517 pkt_type &= ~HCI_2DH1;
3518 else
3519 pkt_type |= HCI_2DH1;
3520
3521 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3522 pkt_type &= ~HCI_2DH3;
3523 else
3524 pkt_type |= HCI_2DH3;
3525
3526 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3527 pkt_type &= ~HCI_2DH5;
3528 else
3529 pkt_type |= HCI_2DH5;
3530
3531 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3532 pkt_type &= ~HCI_3DH1;
3533 else
3534 pkt_type |= HCI_3DH1;
3535
3536 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3537 pkt_type &= ~HCI_3DH3;
3538 else
3539 pkt_type |= HCI_3DH3;
3540
3541 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3542 pkt_type &= ~HCI_3DH5;
3543 else
3544 pkt_type |= HCI_3DH5;
3545
3546 if (pkt_type != hdev->pkt_type) {
3547 hdev->pkt_type = pkt_type;
3548 changed = true;
3549 }
3550
3551 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3552 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3553 if (changed)
3554 mgmt_phy_configuration_changed(hdev, sk);
3555
3556 err = mgmt_cmd_complete(sk, hdev->id,
3557 MGMT_OP_SET_PHY_CONFIGURATION,
3558 0, NULL, 0);
3559
3560 goto unlock;
3561 }
3562
3563 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3564 len);
3565 if (!cmd) {
3566 err = -ENOMEM;
3567 goto unlock;
3568 }
3569
3570 hci_req_init(&req, hdev);
3571
3572 memset(&cp_phy, 0, sizeof(cp_phy));
3573
3574 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3575 cp_phy.all_phys |= 0x01;
3576
3577 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3578 cp_phy.all_phys |= 0x02;
3579
3580 if (selected_phys & MGMT_PHY_LE_1M_TX)
3581 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3582
3583 if (selected_phys & MGMT_PHY_LE_2M_TX)
3584 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3585
3586 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3587 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3588
3589 if (selected_phys & MGMT_PHY_LE_1M_RX)
3590 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3591
3592 if (selected_phys & MGMT_PHY_LE_2M_RX)
3593 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3594
3595 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3596 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3597
3598 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3599
3600 err = hci_req_run_skb(&req, set_default_phy_complete);
3601 if (err < 0)
3602 mgmt_pending_remove(cmd);
3603
3604 unlock:
3605 hci_dev_unlock(hdev);
3606
3607 return err;
3608 }
3609
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3610 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3611 u16 len)
3612 {
3613 int err = MGMT_STATUS_SUCCESS;
3614 struct mgmt_cp_set_blocked_keys *keys = data;
3615 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3616 sizeof(struct mgmt_blocked_key_info));
3617 u16 key_count, expected_len;
3618 int i;
3619
3620 bt_dev_dbg(hdev, "sock %p", sk);
3621
3622 key_count = __le16_to_cpu(keys->key_count);
3623 if (key_count > max_key_count) {
3624 bt_dev_err(hdev, "too big key_count value %u", key_count);
3625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3626 MGMT_STATUS_INVALID_PARAMS);
3627 }
3628
3629 expected_len = struct_size(keys, keys, key_count);
3630 if (expected_len != len) {
3631 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3632 expected_len, len);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3635 }
3636
3637 hci_dev_lock(hdev);
3638
3639 hci_blocked_keys_clear(hdev);
3640
3641 for (i = 0; i < keys->key_count; ++i) {
3642 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3643
3644 if (!b) {
3645 err = MGMT_STATUS_NO_RESOURCES;
3646 break;
3647 }
3648
3649 b->type = keys->keys[i].type;
3650 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3651 list_add_rcu(&b->list, &hdev->blocked_keys);
3652 }
3653 hci_dev_unlock(hdev);
3654
3655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3656 err, NULL, 0);
3657 }
3658
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3659 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3660 void *data, u16 len)
3661 {
3662 struct mgmt_mode *cp = data;
3663 int err;
3664 bool changed = false;
3665
3666 bt_dev_dbg(hdev, "sock %p", sk);
3667
3668 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3669 return mgmt_cmd_status(sk, hdev->id,
3670 MGMT_OP_SET_WIDEBAND_SPEECH,
3671 MGMT_STATUS_NOT_SUPPORTED);
3672
3673 if (cp->val != 0x00 && cp->val != 0x01)
3674 return mgmt_cmd_status(sk, hdev->id,
3675 MGMT_OP_SET_WIDEBAND_SPEECH,
3676 MGMT_STATUS_INVALID_PARAMS);
3677
3678 hci_dev_lock(hdev);
3679
3680 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3681 err = mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 MGMT_STATUS_BUSY);
3684 goto unlock;
3685 }
3686
3687 if (hdev_is_powered(hdev) &&
3688 !!cp->val != hci_dev_test_flag(hdev,
3689 HCI_WIDEBAND_SPEECH_ENABLED)) {
3690 err = mgmt_cmd_status(sk, hdev->id,
3691 MGMT_OP_SET_WIDEBAND_SPEECH,
3692 MGMT_STATUS_REJECTED);
3693 goto unlock;
3694 }
3695
3696 if (cp->val)
3697 changed = !hci_dev_test_and_set_flag(hdev,
3698 HCI_WIDEBAND_SPEECH_ENABLED);
3699 else
3700 changed = hci_dev_test_and_clear_flag(hdev,
3701 HCI_WIDEBAND_SPEECH_ENABLED);
3702
3703 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3704 if (err < 0)
3705 goto unlock;
3706
3707 if (changed)
3708 err = new_settings(hdev, sk);
3709
3710 unlock:
3711 hci_dev_unlock(hdev);
3712 return err;
3713 }
3714
read_security_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3715 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3716 void *data, u16 data_len)
3717 {
3718 char buf[16];
3719 struct mgmt_rp_read_security_info *rp = (void *)buf;
3720 u16 sec_len = 0;
3721 u8 flags = 0;
3722
3723 bt_dev_dbg(hdev, "sock %p", sk);
3724
3725 memset(&buf, 0, sizeof(buf));
3726
3727 hci_dev_lock(hdev);
3728
3729 /* When the Read Simple Pairing Options command is supported, then
3730 * the remote public key validation is supported.
3731 */
3732 if (hdev->commands[41] & 0x08)
3733 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3734
3735 flags |= 0x02; /* Remote public key validation (LE) */
3736
3737 /* When the Read Encryption Key Size command is supported, then the
3738 * encryption key size is enforced.
3739 */
3740 if (hdev->commands[20] & 0x10)
3741 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3742
3743 flags |= 0x08; /* Encryption key size enforcement (LE) */
3744
3745 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3746
3747 /* When the Read Simple Pairing Options command is supported, then
3748 * also max encryption key size information is provided.
3749 */
3750 if (hdev->commands[41] & 0x08)
3751 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3752 hdev->max_enc_key_size);
3753
3754 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3755
3756 rp->sec_len = cpu_to_le16(sec_len);
3757
3758 hci_dev_unlock(hdev);
3759
3760 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3761 rp, sizeof(*rp) + sec_len);
3762 }
3763
3764 #ifdef CONFIG_BT_FEATURE_DEBUG
3765 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3766 static const u8 debug_uuid[16] = {
3767 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3768 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3769 };
3770 #endif
3771
3772 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3773 static const u8 simult_central_periph_uuid[16] = {
3774 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3775 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3776 };
3777
3778 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3779 static const u8 rpa_resolution_uuid[16] = {
3780 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3781 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3782 };
3783
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3784 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3785 void *data, u16 data_len)
3786 {
3787 char buf[62]; /* Enough space for 3 features */
3788 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3789 u16 idx = 0;
3790 u32 flags;
3791
3792 bt_dev_dbg(hdev, "sock %p", sk);
3793
3794 memset(&buf, 0, sizeof(buf));
3795
3796 #ifdef CONFIG_BT_FEATURE_DEBUG
3797 if (!hdev) {
3798 flags = bt_dbg_get() ? BIT(0) : 0;
3799
3800 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3801 rp->features[idx].flags = cpu_to_le32(flags);
3802 idx++;
3803 }
3804 #endif
3805
3806 if (hdev) {
3807 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3808 (hdev->le_states[4] & 0x08) && /* Central */
3809 (hdev->le_states[4] & 0x40) && /* Peripheral */
3810 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3811 flags = BIT(0);
3812 else
3813 flags = 0;
3814
3815 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3816 rp->features[idx].flags = cpu_to_le32(flags);
3817 idx++;
3818 }
3819
3820 if (hdev && use_ll_privacy(hdev)) {
3821 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3822 flags = BIT(0) | BIT(1);
3823 else
3824 flags = BIT(1);
3825
3826 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3828 idx++;
3829 }
3830
3831 rp->feature_count = cpu_to_le16(idx);
3832
3833 /* After reading the experimental features information, enable
3834 * the events to update client on any future change.
3835 */
3836 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3837
3838 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3839 MGMT_OP_READ_EXP_FEATURES_INFO,
3840 0, rp, sizeof(*rp) + (20 * idx));
3841 }
3842
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)3843 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3844 struct sock *skip)
3845 {
3846 struct mgmt_ev_exp_feature_changed ev;
3847
3848 memset(&ev, 0, sizeof(ev));
3849 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3850 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3851
3852 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3853 &ev, sizeof(ev),
3854 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3855
3856 }
3857
3858 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled,struct sock * skip)3859 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3860 {
3861 struct mgmt_ev_exp_feature_changed ev;
3862
3863 memset(&ev, 0, sizeof(ev));
3864 memcpy(ev.uuid, debug_uuid, 16);
3865 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3866
3867 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3868 &ev, sizeof(ev),
3869 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3870 }
3871 #endif
3872
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3873 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3874 void *data, u16 data_len)
3875 {
3876 struct mgmt_cp_set_exp_feature *cp = data;
3877 struct mgmt_rp_set_exp_feature rp;
3878
3879 bt_dev_dbg(hdev, "sock %p", sk);
3880
3881 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3882 memset(rp.uuid, 0, 16);
3883 rp.flags = cpu_to_le32(0);
3884
3885 #ifdef CONFIG_BT_FEATURE_DEBUG
3886 if (!hdev) {
3887 bool changed = bt_dbg_get();
3888
3889 bt_dbg_set(false);
3890
3891 if (changed)
3892 exp_debug_feature_changed(false, sk);
3893 }
3894 #endif
3895
3896 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3897 bool changed = hci_dev_test_flag(hdev,
3898 HCI_ENABLE_LL_PRIVACY);
3899
3900 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3901
3902 if (changed)
3903 exp_ll_privacy_feature_changed(false, hdev, sk);
3904 }
3905
3906 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3907
3908 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3909 MGMT_OP_SET_EXP_FEATURE, 0,
3910 &rp, sizeof(rp));
3911 }
3912
3913 #ifdef CONFIG_BT_FEATURE_DEBUG
3914 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3915 bool val, changed;
3916 int err;
3917
3918 /* Command requires to use the non-controller index */
3919 if (hdev)
3920 return mgmt_cmd_status(sk, hdev->id,
3921 MGMT_OP_SET_EXP_FEATURE,
3922 MGMT_STATUS_INVALID_INDEX);
3923
3924 /* Parameters are limited to a single octet */
3925 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3926 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3927 MGMT_OP_SET_EXP_FEATURE,
3928 MGMT_STATUS_INVALID_PARAMS);
3929
3930 /* Only boolean on/off is supported */
3931 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3932 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3933 MGMT_OP_SET_EXP_FEATURE,
3934 MGMT_STATUS_INVALID_PARAMS);
3935
3936 val = !!cp->param[0];
3937 changed = val ? !bt_dbg_get() : bt_dbg_get();
3938 bt_dbg_set(val);
3939
3940 memcpy(rp.uuid, debug_uuid, 16);
3941 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3942
3943 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3944
3945 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3946 MGMT_OP_SET_EXP_FEATURE, 0,
3947 &rp, sizeof(rp));
3948
3949 if (changed)
3950 exp_debug_feature_changed(val, sk);
3951
3952 return err;
3953 }
3954 #endif
3955
3956 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3957 bool val, changed;
3958 int err;
3959 u32 flags;
3960
3961 /* Command requires to use the controller index */
3962 if (!hdev)
3963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3964 MGMT_OP_SET_EXP_FEATURE,
3965 MGMT_STATUS_INVALID_INDEX);
3966
3967 /* Changes can only be made when controller is powered down */
3968 if (hdev_is_powered(hdev))
3969 return mgmt_cmd_status(sk, hdev->id,
3970 MGMT_OP_SET_EXP_FEATURE,
3971 MGMT_STATUS_NOT_POWERED);
3972
3973 /* Parameters are limited to a single octet */
3974 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3975 return mgmt_cmd_status(sk, hdev->id,
3976 MGMT_OP_SET_EXP_FEATURE,
3977 MGMT_STATUS_INVALID_PARAMS);
3978
3979 /* Only boolean on/off is supported */
3980 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3981 return mgmt_cmd_status(sk, hdev->id,
3982 MGMT_OP_SET_EXP_FEATURE,
3983 MGMT_STATUS_INVALID_PARAMS);
3984
3985 val = !!cp->param[0];
3986
3987 if (val) {
3988 changed = !hci_dev_test_flag(hdev,
3989 HCI_ENABLE_LL_PRIVACY);
3990 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3991 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3992
3993 /* Enable LL privacy + supported settings changed */
3994 flags = BIT(0) | BIT(1);
3995 } else {
3996 changed = hci_dev_test_flag(hdev,
3997 HCI_ENABLE_LL_PRIVACY);
3998 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3999
4000 /* Disable LL privacy + supported settings changed */
4001 flags = BIT(1);
4002 }
4003
4004 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4005 rp.flags = cpu_to_le32(flags);
4006
4007 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4008
4009 err = mgmt_cmd_complete(sk, hdev->id,
4010 MGMT_OP_SET_EXP_FEATURE, 0,
4011 &rp, sizeof(rp));
4012
4013 if (changed)
4014 exp_ll_privacy_feature_changed(val, hdev, sk);
4015
4016 return err;
4017 }
4018
4019 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4020 MGMT_OP_SET_EXP_FEATURE,
4021 MGMT_STATUS_NOT_SUPPORTED);
4022 }
4023
4024 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4025
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4026 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4027 u16 data_len)
4028 {
4029 struct mgmt_cp_get_device_flags *cp = data;
4030 struct mgmt_rp_get_device_flags rp;
4031 struct bdaddr_list_with_flags *br_params;
4032 struct hci_conn_params *params;
4033 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4034 u32 current_flags = 0;
4035 u8 status = MGMT_STATUS_INVALID_PARAMS;
4036
4037 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4038 &cp->addr.bdaddr, cp->addr.type);
4039
4040 hci_dev_lock(hdev);
4041
4042 memset(&rp, 0, sizeof(rp));
4043
4044 if (cp->addr.type == BDADDR_BREDR) {
4045 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4046 &cp->addr.bdaddr,
4047 cp->addr.type);
4048 if (!br_params)
4049 goto done;
4050
4051 current_flags = br_params->current_flags;
4052 } else {
4053 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4054 le_addr_type(cp->addr.type));
4055
4056 if (!params)
4057 goto done;
4058
4059 current_flags = params->current_flags;
4060 }
4061
4062 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4063 rp.addr.type = cp->addr.type;
4064 rp.supported_flags = cpu_to_le32(supported_flags);
4065 rp.current_flags = cpu_to_le32(current_flags);
4066
4067 status = MGMT_STATUS_SUCCESS;
4068
4069 done:
4070 hci_dev_unlock(hdev);
4071
4072 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4073 &rp, sizeof(rp));
4074 }
4075
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4076 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4077 bdaddr_t *bdaddr, u8 bdaddr_type,
4078 u32 supported_flags, u32 current_flags)
4079 {
4080 struct mgmt_ev_device_flags_changed ev;
4081
4082 bacpy(&ev.addr.bdaddr, bdaddr);
4083 ev.addr.type = bdaddr_type;
4084 ev.supported_flags = cpu_to_le32(supported_flags);
4085 ev.current_flags = cpu_to_le32(current_flags);
4086
4087 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4088 }
4089
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4090 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4091 u16 len)
4092 {
4093 struct mgmt_cp_set_device_flags *cp = data;
4094 struct bdaddr_list_with_flags *br_params;
4095 struct hci_conn_params *params;
4096 u8 status = MGMT_STATUS_INVALID_PARAMS;
4097 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4098 u32 current_flags = __le32_to_cpu(cp->current_flags);
4099
4100 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4101 &cp->addr.bdaddr, cp->addr.type,
4102 __le32_to_cpu(current_flags));
4103
4104 if ((supported_flags | current_flags) != supported_flags) {
4105 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4106 current_flags, supported_flags);
4107 goto done;
4108 }
4109
4110 hci_dev_lock(hdev);
4111
4112 if (cp->addr.type == BDADDR_BREDR) {
4113 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4114 &cp->addr.bdaddr,
4115 cp->addr.type);
4116
4117 if (br_params) {
4118 br_params->current_flags = current_flags;
4119 status = MGMT_STATUS_SUCCESS;
4120 } else {
4121 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4122 &cp->addr.bdaddr, cp->addr.type);
4123 }
4124 } else {
4125 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4126 le_addr_type(cp->addr.type));
4127 if (params) {
4128 params->current_flags = current_flags;
4129 status = MGMT_STATUS_SUCCESS;
4130 } else {
4131 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4132 &cp->addr.bdaddr,
4133 le_addr_type(cp->addr.type));
4134 }
4135 }
4136
4137 done:
4138 hci_dev_unlock(hdev);
4139
4140 if (status == MGMT_STATUS_SUCCESS)
4141 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4142 supported_flags, current_flags);
4143
4144 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4145 &cp->addr, sizeof(cp->addr));
4146 }
4147
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4148 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4149 u16 handle)
4150 {
4151 struct mgmt_ev_adv_monitor_added ev;
4152
4153 ev.monitor_handle = cpu_to_le16(handle);
4154
4155 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4156 }
4157
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,u16 handle)4158 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4159 u16 handle)
4160 {
4161 struct mgmt_ev_adv_monitor_added ev;
4162
4163 ev.monitor_handle = cpu_to_le16(handle);
4164
4165 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4166 }
4167
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4168 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4169 void *data, u16 len)
4170 {
4171 struct adv_monitor *monitor = NULL;
4172 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4173 int handle, err;
4174 size_t rp_size = 0;
4175 __u32 supported = 0;
4176 __u16 num_handles = 0;
4177 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4178
4179 BT_DBG("request for %s", hdev->name);
4180
4181 hci_dev_lock(hdev);
4182
4183 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4184 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4185
4186 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4187 handles[num_handles++] = monitor->handle;
4188 }
4189
4190 hci_dev_unlock(hdev);
4191
4192 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4193 rp = kmalloc(rp_size, GFP_KERNEL);
4194 if (!rp)
4195 return -ENOMEM;
4196
4197 /* Once controller-based monitoring is in place, the enabled_features
4198 * should reflect the use.
4199 */
4200 rp->supported_features = cpu_to_le32(supported);
4201 rp->enabled_features = 0;
4202 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4203 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4204 rp->num_handles = cpu_to_le16(num_handles);
4205 if (num_handles)
4206 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4207
4208 err = mgmt_cmd_complete(sk, hdev->id,
4209 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4210 MGMT_STATUS_SUCCESS, rp, rp_size);
4211
4212 kfree(rp);
4213
4214 return err;
4215 }
4216
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4217 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4218 void *data, u16 len)
4219 {
4220 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4221 struct mgmt_rp_add_adv_patterns_monitor rp;
4222 struct adv_monitor *m = NULL;
4223 struct adv_pattern *p = NULL;
4224 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4225 __u8 cp_ofst = 0, cp_len = 0;
4226 int err, i;
4227
4228 BT_DBG("request for %s", hdev->name);
4229
4230 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4231 err = mgmt_cmd_status(sk, hdev->id,
4232 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4233 MGMT_STATUS_INVALID_PARAMS);
4234 goto failed;
4235 }
4236
4237 m = kmalloc(sizeof(*m), GFP_KERNEL);
4238 if (!m) {
4239 err = -ENOMEM;
4240 goto failed;
4241 }
4242
4243 INIT_LIST_HEAD(&m->patterns);
4244 m->active = false;
4245
4246 for (i = 0; i < cp->pattern_count; i++) {
4247 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4248 err = mgmt_cmd_status(sk, hdev->id,
4249 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4250 MGMT_STATUS_INVALID_PARAMS);
4251 goto failed;
4252 }
4253
4254 cp_ofst = cp->patterns[i].offset;
4255 cp_len = cp->patterns[i].length;
4256 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4257 cp_len > HCI_MAX_AD_LENGTH ||
4258 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4259 err = mgmt_cmd_status(sk, hdev->id,
4260 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4261 MGMT_STATUS_INVALID_PARAMS);
4262 goto failed;
4263 }
4264
4265 p = kmalloc(sizeof(*p), GFP_KERNEL);
4266 if (!p) {
4267 err = -ENOMEM;
4268 goto failed;
4269 }
4270
4271 p->ad_type = cp->patterns[i].ad_type;
4272 p->offset = cp->patterns[i].offset;
4273 p->length = cp->patterns[i].length;
4274 memcpy(p->value, cp->patterns[i].value, p->length);
4275
4276 INIT_LIST_HEAD(&p->list);
4277 list_add(&p->list, &m->patterns);
4278 }
4279
4280 if (mp_cnt != cp->pattern_count) {
4281 err = mgmt_cmd_status(sk, hdev->id,
4282 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4283 MGMT_STATUS_INVALID_PARAMS);
4284 goto failed;
4285 }
4286
4287 hci_dev_lock(hdev);
4288
4289 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4290
4291 err = hci_add_adv_monitor(hdev, m);
4292 if (err) {
4293 if (err == -ENOSPC) {
4294 mgmt_cmd_status(sk, hdev->id,
4295 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4296 MGMT_STATUS_NO_RESOURCES);
4297 }
4298 goto unlock;
4299 }
4300
4301 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4302 mgmt_adv_monitor_added(sk, hdev, m->handle);
4303
4304 hci_dev_unlock(hdev);
4305
4306 rp.monitor_handle = cpu_to_le16(m->handle);
4307
4308 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4309 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4310
4311 unlock:
4312 hci_dev_unlock(hdev);
4313
4314 failed:
4315 hci_free_adv_monitor(m);
4316 return err;
4317 }
4318
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4319 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4320 void *data, u16 len)
4321 {
4322 struct mgmt_cp_remove_adv_monitor *cp = data;
4323 struct mgmt_rp_remove_adv_monitor rp;
4324 unsigned int prev_adv_monitors_cnt;
4325 u16 handle;
4326 int err;
4327
4328 BT_DBG("request for %s", hdev->name);
4329
4330 hci_dev_lock(hdev);
4331
4332 handle = __le16_to_cpu(cp->monitor_handle);
4333 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4334
4335 err = hci_remove_adv_monitor(hdev, handle);
4336 if (err == -ENOENT) {
4337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4338 MGMT_STATUS_INVALID_INDEX);
4339 goto unlock;
4340 }
4341
4342 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4343 mgmt_adv_monitor_removed(sk, hdev, handle);
4344
4345 hci_dev_unlock(hdev);
4346
4347 rp.monitor_handle = cp->monitor_handle;
4348
4349 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4350 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4351
4352 unlock:
4353 hci_dev_unlock(hdev);
4354 return err;
4355 }
4356
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)4357 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4358 u16 opcode, struct sk_buff *skb)
4359 {
4360 struct mgmt_rp_read_local_oob_data mgmt_rp;
4361 size_t rp_size = sizeof(mgmt_rp);
4362 struct mgmt_pending_cmd *cmd;
4363
4364 bt_dev_dbg(hdev, "status %u", status);
4365
4366 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4367 if (!cmd)
4368 return;
4369
4370 if (status || !skb) {
4371 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4372 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4373 goto remove;
4374 }
4375
4376 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4377
4378 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4379 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4380
4381 if (skb->len < sizeof(*rp)) {
4382 mgmt_cmd_status(cmd->sk, hdev->id,
4383 MGMT_OP_READ_LOCAL_OOB_DATA,
4384 MGMT_STATUS_FAILED);
4385 goto remove;
4386 }
4387
4388 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4389 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4390
4391 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4392 } else {
4393 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4394
4395 if (skb->len < sizeof(*rp)) {
4396 mgmt_cmd_status(cmd->sk, hdev->id,
4397 MGMT_OP_READ_LOCAL_OOB_DATA,
4398 MGMT_STATUS_FAILED);
4399 goto remove;
4400 }
4401
4402 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4403 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4404
4405 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4406 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4407 }
4408
4409 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4410 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4411
4412 remove:
4413 mgmt_pending_remove(cmd);
4414 }
4415
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4416 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4417 void *data, u16 data_len)
4418 {
4419 struct mgmt_pending_cmd *cmd;
4420 struct hci_request req;
4421 int err;
4422
4423 bt_dev_dbg(hdev, "sock %p", sk);
4424
4425 hci_dev_lock(hdev);
4426
4427 if (!hdev_is_powered(hdev)) {
4428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4429 MGMT_STATUS_NOT_POWERED);
4430 goto unlock;
4431 }
4432
4433 if (!lmp_ssp_capable(hdev)) {
4434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4435 MGMT_STATUS_NOT_SUPPORTED);
4436 goto unlock;
4437 }
4438
4439 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4440 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4441 MGMT_STATUS_BUSY);
4442 goto unlock;
4443 }
4444
4445 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4446 if (!cmd) {
4447 err = -ENOMEM;
4448 goto unlock;
4449 }
4450
4451 hci_req_init(&req, hdev);
4452
4453 if (bredr_sc_enabled(hdev))
4454 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4455 else
4456 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4457
4458 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4459 if (err < 0)
4460 mgmt_pending_remove(cmd);
4461
4462 unlock:
4463 hci_dev_unlock(hdev);
4464 return err;
4465 }
4466
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4467 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4468 void *data, u16 len)
4469 {
4470 struct mgmt_addr_info *addr = data;
4471 int err;
4472
4473 bt_dev_dbg(hdev, "sock %p", sk);
4474
4475 if (!bdaddr_type_is_valid(addr->type))
4476 return mgmt_cmd_complete(sk, hdev->id,
4477 MGMT_OP_ADD_REMOTE_OOB_DATA,
4478 MGMT_STATUS_INVALID_PARAMS,
4479 addr, sizeof(*addr));
4480
4481 hci_dev_lock(hdev);
4482
4483 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4484 struct mgmt_cp_add_remote_oob_data *cp = data;
4485 u8 status;
4486
4487 if (cp->addr.type != BDADDR_BREDR) {
4488 err = mgmt_cmd_complete(sk, hdev->id,
4489 MGMT_OP_ADD_REMOTE_OOB_DATA,
4490 MGMT_STATUS_INVALID_PARAMS,
4491 &cp->addr, sizeof(cp->addr));
4492 goto unlock;
4493 }
4494
4495 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4496 cp->addr.type, cp->hash,
4497 cp->rand, NULL, NULL);
4498 if (err < 0)
4499 status = MGMT_STATUS_FAILED;
4500 else
4501 status = MGMT_STATUS_SUCCESS;
4502
4503 err = mgmt_cmd_complete(sk, hdev->id,
4504 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4505 &cp->addr, sizeof(cp->addr));
4506 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4507 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4508 u8 *rand192, *hash192, *rand256, *hash256;
4509 u8 status;
4510
4511 if (bdaddr_type_is_le(cp->addr.type)) {
4512 /* Enforce zero-valued 192-bit parameters as
4513 * long as legacy SMP OOB isn't implemented.
4514 */
4515 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4516 memcmp(cp->hash192, ZERO_KEY, 16)) {
4517 err = mgmt_cmd_complete(sk, hdev->id,
4518 MGMT_OP_ADD_REMOTE_OOB_DATA,
4519 MGMT_STATUS_INVALID_PARAMS,
4520 addr, sizeof(*addr));
4521 goto unlock;
4522 }
4523
4524 rand192 = NULL;
4525 hash192 = NULL;
4526 } else {
4527 /* In case one of the P-192 values is set to zero,
4528 * then just disable OOB data for P-192.
4529 */
4530 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4531 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4532 rand192 = NULL;
4533 hash192 = NULL;
4534 } else {
4535 rand192 = cp->rand192;
4536 hash192 = cp->hash192;
4537 }
4538 }
4539
4540 /* In case one of the P-256 values is set to zero, then just
4541 * disable OOB data for P-256.
4542 */
4543 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4544 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4545 rand256 = NULL;
4546 hash256 = NULL;
4547 } else {
4548 rand256 = cp->rand256;
4549 hash256 = cp->hash256;
4550 }
4551
4552 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4553 cp->addr.type, hash192, rand192,
4554 hash256, rand256);
4555 if (err < 0)
4556 status = MGMT_STATUS_FAILED;
4557 else
4558 status = MGMT_STATUS_SUCCESS;
4559
4560 err = mgmt_cmd_complete(sk, hdev->id,
4561 MGMT_OP_ADD_REMOTE_OOB_DATA,
4562 status, &cp->addr, sizeof(cp->addr));
4563 } else {
4564 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4565 len);
4566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4567 MGMT_STATUS_INVALID_PARAMS);
4568 }
4569
4570 unlock:
4571 hci_dev_unlock(hdev);
4572 return err;
4573 }
4574
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4575 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4576 void *data, u16 len)
4577 {
4578 struct mgmt_cp_remove_remote_oob_data *cp = data;
4579 u8 status;
4580 int err;
4581
4582 bt_dev_dbg(hdev, "sock %p", sk);
4583
4584 if (cp->addr.type != BDADDR_BREDR)
4585 return mgmt_cmd_complete(sk, hdev->id,
4586 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4587 MGMT_STATUS_INVALID_PARAMS,
4588 &cp->addr, sizeof(cp->addr));
4589
4590 hci_dev_lock(hdev);
4591
4592 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4593 hci_remote_oob_data_clear(hdev);
4594 status = MGMT_STATUS_SUCCESS;
4595 goto done;
4596 }
4597
4598 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4599 if (err < 0)
4600 status = MGMT_STATUS_INVALID_PARAMS;
4601 else
4602 status = MGMT_STATUS_SUCCESS;
4603
4604 done:
4605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4606 status, &cp->addr, sizeof(cp->addr));
4607
4608 hci_dev_unlock(hdev);
4609 return err;
4610 }
4611
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)4612 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4613 {
4614 struct mgmt_pending_cmd *cmd;
4615
4616 bt_dev_dbg(hdev, "status %d", status);
4617
4618 hci_dev_lock(hdev);
4619
4620 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4621 if (!cmd)
4622 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4623
4624 if (!cmd)
4625 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4626
4627 if (cmd) {
4628 cmd->cmd_complete(cmd, mgmt_status(status));
4629 mgmt_pending_remove(cmd);
4630 }
4631
4632 hci_dev_unlock(hdev);
4633
4634 /* Handle suspend notifier */
4635 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4636 hdev->suspend_tasks)) {
4637 bt_dev_dbg(hdev, "Unpaused discovery");
4638 wake_up(&hdev->suspend_wait_q);
4639 }
4640 }
4641
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)4642 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4643 uint8_t *mgmt_status)
4644 {
4645 switch (type) {
4646 case DISCOV_TYPE_LE:
4647 *mgmt_status = mgmt_le_support(hdev);
4648 if (*mgmt_status)
4649 return false;
4650 break;
4651 case DISCOV_TYPE_INTERLEAVED:
4652 *mgmt_status = mgmt_le_support(hdev);
4653 if (*mgmt_status)
4654 return false;
4655 fallthrough;
4656 case DISCOV_TYPE_BREDR:
4657 *mgmt_status = mgmt_bredr_support(hdev);
4658 if (*mgmt_status)
4659 return false;
4660 break;
4661 default:
4662 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4663 return false;
4664 }
4665
4666 return true;
4667 }
4668
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)4669 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4670 u16 op, void *data, u16 len)
4671 {
4672 struct mgmt_cp_start_discovery *cp = data;
4673 struct mgmt_pending_cmd *cmd;
4674 u8 status;
4675 int err;
4676
4677 bt_dev_dbg(hdev, "sock %p", sk);
4678
4679 hci_dev_lock(hdev);
4680
4681 if (!hdev_is_powered(hdev)) {
4682 err = mgmt_cmd_complete(sk, hdev->id, op,
4683 MGMT_STATUS_NOT_POWERED,
4684 &cp->type, sizeof(cp->type));
4685 goto failed;
4686 }
4687
4688 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4689 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4690 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4691 &cp->type, sizeof(cp->type));
4692 goto failed;
4693 }
4694
4695 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4696 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4697 &cp->type, sizeof(cp->type));
4698 goto failed;
4699 }
4700
4701 /* Can't start discovery when it is paused */
4702 if (hdev->discovery_paused) {
4703 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4704 &cp->type, sizeof(cp->type));
4705 goto failed;
4706 }
4707
4708 /* Clear the discovery filter first to free any previously
4709 * allocated memory for the UUID list.
4710 */
4711 hci_discovery_filter_clear(hdev);
4712
4713 hdev->discovery.type = cp->type;
4714 hdev->discovery.report_invalid_rssi = false;
4715 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4716 hdev->discovery.limited = true;
4717 else
4718 hdev->discovery.limited = false;
4719
4720 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4721 if (!cmd) {
4722 err = -ENOMEM;
4723 goto failed;
4724 }
4725
4726 cmd->cmd_complete = generic_cmd_complete;
4727
4728 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4729 queue_work(hdev->req_workqueue, &hdev->discov_update);
4730 err = 0;
4731
4732 failed:
4733 hci_dev_unlock(hdev);
4734 return err;
4735 }
4736
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4737 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4738 void *data, u16 len)
4739 {
4740 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4741 data, len);
4742 }
4743
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4744 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4745 void *data, u16 len)
4746 {
4747 return start_discovery_internal(sk, hdev,
4748 MGMT_OP_START_LIMITED_DISCOVERY,
4749 data, len);
4750 }
4751
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4752 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4753 u8 status)
4754 {
4755 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4756 cmd->param, 1);
4757 }
4758
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4759 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4760 void *data, u16 len)
4761 {
4762 struct mgmt_cp_start_service_discovery *cp = data;
4763 struct mgmt_pending_cmd *cmd;
4764 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4765 u16 uuid_count, expected_len;
4766 u8 status;
4767 int err;
4768
4769 bt_dev_dbg(hdev, "sock %p", sk);
4770
4771 hci_dev_lock(hdev);
4772
4773 if (!hdev_is_powered(hdev)) {
4774 err = mgmt_cmd_complete(sk, hdev->id,
4775 MGMT_OP_START_SERVICE_DISCOVERY,
4776 MGMT_STATUS_NOT_POWERED,
4777 &cp->type, sizeof(cp->type));
4778 goto failed;
4779 }
4780
4781 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4782 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4783 err = mgmt_cmd_complete(sk, hdev->id,
4784 MGMT_OP_START_SERVICE_DISCOVERY,
4785 MGMT_STATUS_BUSY, &cp->type,
4786 sizeof(cp->type));
4787 goto failed;
4788 }
4789
4790 uuid_count = __le16_to_cpu(cp->uuid_count);
4791 if (uuid_count > max_uuid_count) {
4792 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4793 uuid_count);
4794 err = mgmt_cmd_complete(sk, hdev->id,
4795 MGMT_OP_START_SERVICE_DISCOVERY,
4796 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4797 sizeof(cp->type));
4798 goto failed;
4799 }
4800
4801 expected_len = sizeof(*cp) + uuid_count * 16;
4802 if (expected_len != len) {
4803 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4804 expected_len, len);
4805 err = mgmt_cmd_complete(sk, hdev->id,
4806 MGMT_OP_START_SERVICE_DISCOVERY,
4807 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4808 sizeof(cp->type));
4809 goto failed;
4810 }
4811
4812 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4813 err = mgmt_cmd_complete(sk, hdev->id,
4814 MGMT_OP_START_SERVICE_DISCOVERY,
4815 status, &cp->type, sizeof(cp->type));
4816 goto failed;
4817 }
4818
4819 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4820 hdev, data, len);
4821 if (!cmd) {
4822 err = -ENOMEM;
4823 goto failed;
4824 }
4825
4826 cmd->cmd_complete = service_discovery_cmd_complete;
4827
4828 /* Clear the discovery filter first to free any previously
4829 * allocated memory for the UUID list.
4830 */
4831 hci_discovery_filter_clear(hdev);
4832
4833 hdev->discovery.result_filtering = true;
4834 hdev->discovery.type = cp->type;
4835 hdev->discovery.rssi = cp->rssi;
4836 hdev->discovery.uuid_count = uuid_count;
4837
4838 if (uuid_count > 0) {
4839 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4840 GFP_KERNEL);
4841 if (!hdev->discovery.uuids) {
4842 err = mgmt_cmd_complete(sk, hdev->id,
4843 MGMT_OP_START_SERVICE_DISCOVERY,
4844 MGMT_STATUS_FAILED,
4845 &cp->type, sizeof(cp->type));
4846 mgmt_pending_remove(cmd);
4847 goto failed;
4848 }
4849 }
4850
4851 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4852 queue_work(hdev->req_workqueue, &hdev->discov_update);
4853 err = 0;
4854
4855 failed:
4856 hci_dev_unlock(hdev);
4857 return err;
4858 }
4859
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)4860 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4861 {
4862 struct mgmt_pending_cmd *cmd;
4863
4864 bt_dev_dbg(hdev, "status %d", status);
4865
4866 hci_dev_lock(hdev);
4867
4868 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4869 if (cmd) {
4870 cmd->cmd_complete(cmd, mgmt_status(status));
4871 mgmt_pending_remove(cmd);
4872 }
4873
4874 hci_dev_unlock(hdev);
4875
4876 /* Handle suspend notifier */
4877 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4878 bt_dev_dbg(hdev, "Paused discovery");
4879 wake_up(&hdev->suspend_wait_q);
4880 }
4881 }
4882
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4883 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4884 u16 len)
4885 {
4886 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4887 struct mgmt_pending_cmd *cmd;
4888 int err;
4889
4890 bt_dev_dbg(hdev, "sock %p", sk);
4891
4892 hci_dev_lock(hdev);
4893
4894 if (!hci_discovery_active(hdev)) {
4895 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4896 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4897 sizeof(mgmt_cp->type));
4898 goto unlock;
4899 }
4900
4901 if (hdev->discovery.type != mgmt_cp->type) {
4902 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4903 MGMT_STATUS_INVALID_PARAMS,
4904 &mgmt_cp->type, sizeof(mgmt_cp->type));
4905 goto unlock;
4906 }
4907
4908 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4909 if (!cmd) {
4910 err = -ENOMEM;
4911 goto unlock;
4912 }
4913
4914 cmd->cmd_complete = generic_cmd_complete;
4915
4916 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4917 queue_work(hdev->req_workqueue, &hdev->discov_update);
4918 err = 0;
4919
4920 unlock:
4921 hci_dev_unlock(hdev);
4922 return err;
4923 }
4924
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4925 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4926 u16 len)
4927 {
4928 struct mgmt_cp_confirm_name *cp = data;
4929 struct inquiry_entry *e;
4930 int err;
4931
4932 bt_dev_dbg(hdev, "sock %p", sk);
4933
4934 hci_dev_lock(hdev);
4935
4936 if (!hci_discovery_active(hdev)) {
4937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4938 MGMT_STATUS_FAILED, &cp->addr,
4939 sizeof(cp->addr));
4940 goto failed;
4941 }
4942
4943 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4944 if (!e) {
4945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4946 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4947 sizeof(cp->addr));
4948 goto failed;
4949 }
4950
4951 if (cp->name_known) {
4952 e->name_state = NAME_KNOWN;
4953 list_del(&e->list);
4954 } else {
4955 e->name_state = NAME_NEEDED;
4956 hci_inquiry_cache_update_resolve(hdev, e);
4957 }
4958
4959 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4960 &cp->addr, sizeof(cp->addr));
4961
4962 failed:
4963 hci_dev_unlock(hdev);
4964 return err;
4965 }
4966
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4967 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4968 u16 len)
4969 {
4970 struct mgmt_cp_block_device *cp = data;
4971 u8 status;
4972 int err;
4973
4974 bt_dev_dbg(hdev, "sock %p", sk);
4975
4976 if (!bdaddr_type_is_valid(cp->addr.type))
4977 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4978 MGMT_STATUS_INVALID_PARAMS,
4979 &cp->addr, sizeof(cp->addr));
4980
4981 hci_dev_lock(hdev);
4982
4983 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4984 cp->addr.type);
4985 if (err < 0) {
4986 status = MGMT_STATUS_FAILED;
4987 goto done;
4988 }
4989
4990 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4991 sk);
4992 status = MGMT_STATUS_SUCCESS;
4993
4994 done:
4995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4996 &cp->addr, sizeof(cp->addr));
4997
4998 hci_dev_unlock(hdev);
4999
5000 return err;
5001 }
5002
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5003 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5004 u16 len)
5005 {
5006 struct mgmt_cp_unblock_device *cp = data;
5007 u8 status;
5008 int err;
5009
5010 bt_dev_dbg(hdev, "sock %p", sk);
5011
5012 if (!bdaddr_type_is_valid(cp->addr.type))
5013 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5014 MGMT_STATUS_INVALID_PARAMS,
5015 &cp->addr, sizeof(cp->addr));
5016
5017 hci_dev_lock(hdev);
5018
5019 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5020 cp->addr.type);
5021 if (err < 0) {
5022 status = MGMT_STATUS_INVALID_PARAMS;
5023 goto done;
5024 }
5025
5026 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5027 sk);
5028 status = MGMT_STATUS_SUCCESS;
5029
5030 done:
5031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5032 &cp->addr, sizeof(cp->addr));
5033
5034 hci_dev_unlock(hdev);
5035
5036 return err;
5037 }
5038
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5039 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5040 u16 len)
5041 {
5042 struct mgmt_cp_set_device_id *cp = data;
5043 struct hci_request req;
5044 int err;
5045 __u16 source;
5046
5047 bt_dev_dbg(hdev, "sock %p", sk);
5048
5049 source = __le16_to_cpu(cp->source);
5050
5051 if (source > 0x0002)
5052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5053 MGMT_STATUS_INVALID_PARAMS);
5054
5055 hci_dev_lock(hdev);
5056
5057 hdev->devid_source = source;
5058 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5059 hdev->devid_product = __le16_to_cpu(cp->product);
5060 hdev->devid_version = __le16_to_cpu(cp->version);
5061
5062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5063 NULL, 0);
5064
5065 hci_req_init(&req, hdev);
5066 __hci_req_update_eir(&req);
5067 hci_req_run(&req, NULL);
5068
5069 hci_dev_unlock(hdev);
5070
5071 return err;
5072 }
5073
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)5074 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5075 u16 opcode)
5076 {
5077 bt_dev_dbg(hdev, "status %d", status);
5078 }
5079
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)5080 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5081 u16 opcode)
5082 {
5083 struct cmd_lookup match = { NULL, hdev };
5084 struct hci_request req;
5085 u8 instance;
5086 struct adv_info *adv_instance;
5087 int err;
5088
5089 hci_dev_lock(hdev);
5090
5091 if (status) {
5092 u8 mgmt_err = mgmt_status(status);
5093
5094 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5095 cmd_status_rsp, &mgmt_err);
5096 goto unlock;
5097 }
5098
5099 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5100 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5101 else
5102 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5103
5104 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5105 &match);
5106
5107 new_settings(hdev, match.sk);
5108
5109 if (match.sk)
5110 sock_put(match.sk);
5111
5112 /* Handle suspend notifier */
5113 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5114 hdev->suspend_tasks)) {
5115 bt_dev_dbg(hdev, "Paused advertising");
5116 wake_up(&hdev->suspend_wait_q);
5117 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5118 hdev->suspend_tasks)) {
5119 bt_dev_dbg(hdev, "Unpaused advertising");
5120 wake_up(&hdev->suspend_wait_q);
5121 }
5122
5123 /* If "Set Advertising" was just disabled and instance advertising was
5124 * set up earlier, then re-enable multi-instance advertising.
5125 */
5126 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5127 list_empty(&hdev->adv_instances))
5128 goto unlock;
5129
5130 instance = hdev->cur_adv_instance;
5131 if (!instance) {
5132 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5133 struct adv_info, list);
5134 if (!adv_instance)
5135 goto unlock;
5136
5137 instance = adv_instance->instance;
5138 }
5139
5140 hci_req_init(&req, hdev);
5141
5142 err = __hci_req_schedule_adv_instance(&req, instance, true);
5143
5144 if (!err)
5145 err = hci_req_run(&req, enable_advertising_instance);
5146
5147 if (err)
5148 bt_dev_err(hdev, "failed to re-configure advertising");
5149
5150 unlock:
5151 hci_dev_unlock(hdev);
5152 }
5153
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5154 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5155 u16 len)
5156 {
5157 struct mgmt_mode *cp = data;
5158 struct mgmt_pending_cmd *cmd;
5159 struct hci_request req;
5160 u8 val, status;
5161 int err;
5162
5163 bt_dev_dbg(hdev, "sock %p", sk);
5164
5165 status = mgmt_le_support(hdev);
5166 if (status)
5167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5168 status);
5169
5170 /* Enabling the experimental LL Privay support disables support for
5171 * advertising.
5172 */
5173 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5175 MGMT_STATUS_NOT_SUPPORTED);
5176
5177 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 MGMT_STATUS_INVALID_PARAMS);
5180
5181 if (hdev->advertising_paused)
5182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5183 MGMT_STATUS_BUSY);
5184
5185 hci_dev_lock(hdev);
5186
5187 val = !!cp->val;
5188
5189 /* The following conditions are ones which mean that we should
5190 * not do any HCI communication but directly send a mgmt
5191 * response to user space (after toggling the flag if
5192 * necessary).
5193 */
5194 if (!hdev_is_powered(hdev) ||
5195 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5196 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5197 hci_conn_num(hdev, LE_LINK) > 0 ||
5198 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5199 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5200 bool changed;
5201
5202 if (cp->val) {
5203 hdev->cur_adv_instance = 0x00;
5204 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5205 if (cp->val == 0x02)
5206 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5207 else
5208 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5209 } else {
5210 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5211 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5212 }
5213
5214 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5215 if (err < 0)
5216 goto unlock;
5217
5218 if (changed)
5219 err = new_settings(hdev, sk);
5220
5221 goto unlock;
5222 }
5223
5224 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5225 pending_find(MGMT_OP_SET_LE, hdev)) {
5226 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5227 MGMT_STATUS_BUSY);
5228 goto unlock;
5229 }
5230
5231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5232 if (!cmd) {
5233 err = -ENOMEM;
5234 goto unlock;
5235 }
5236
5237 hci_req_init(&req, hdev);
5238
5239 if (cp->val == 0x02)
5240 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5241 else
5242 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5243
5244 cancel_adv_timeout(hdev);
5245
5246 if (val) {
5247 /* Switch to instance "0" for the Set Advertising setting.
5248 * We cannot use update_[adv|scan_rsp]_data() here as the
5249 * HCI_ADVERTISING flag is not yet set.
5250 */
5251 hdev->cur_adv_instance = 0x00;
5252
5253 if (ext_adv_capable(hdev)) {
5254 __hci_req_start_ext_adv(&req, 0x00);
5255 } else {
5256 __hci_req_update_adv_data(&req, 0x00);
5257 __hci_req_update_scan_rsp_data(&req, 0x00);
5258 __hci_req_enable_advertising(&req);
5259 }
5260 } else {
5261 __hci_req_disable_advertising(&req);
5262 }
5263
5264 err = hci_req_run(&req, set_advertising_complete);
5265 if (err < 0)
5266 mgmt_pending_remove(cmd);
5267
5268 unlock:
5269 hci_dev_unlock(hdev);
5270 return err;
5271 }
5272
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5273 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5274 void *data, u16 len)
5275 {
5276 struct mgmt_cp_set_static_address *cp = data;
5277 int err;
5278
5279 bt_dev_dbg(hdev, "sock %p", sk);
5280
5281 if (!lmp_le_capable(hdev))
5282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5283 MGMT_STATUS_NOT_SUPPORTED);
5284
5285 if (hdev_is_powered(hdev))
5286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5287 MGMT_STATUS_REJECTED);
5288
5289 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5290 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5291 return mgmt_cmd_status(sk, hdev->id,
5292 MGMT_OP_SET_STATIC_ADDRESS,
5293 MGMT_STATUS_INVALID_PARAMS);
5294
5295 /* Two most significant bits shall be set */
5296 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5297 return mgmt_cmd_status(sk, hdev->id,
5298 MGMT_OP_SET_STATIC_ADDRESS,
5299 MGMT_STATUS_INVALID_PARAMS);
5300 }
5301
5302 hci_dev_lock(hdev);
5303
5304 bacpy(&hdev->static_addr, &cp->bdaddr);
5305
5306 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5307 if (err < 0)
5308 goto unlock;
5309
5310 err = new_settings(hdev, sk);
5311
5312 unlock:
5313 hci_dev_unlock(hdev);
5314 return err;
5315 }
5316
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5317 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5318 void *data, u16 len)
5319 {
5320 struct mgmt_cp_set_scan_params *cp = data;
5321 __u16 interval, window;
5322 int err;
5323
5324 bt_dev_dbg(hdev, "sock %p", sk);
5325
5326 if (!lmp_le_capable(hdev))
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5328 MGMT_STATUS_NOT_SUPPORTED);
5329
5330 interval = __le16_to_cpu(cp->interval);
5331
5332 if (interval < 0x0004 || interval > 0x4000)
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5334 MGMT_STATUS_INVALID_PARAMS);
5335
5336 window = __le16_to_cpu(cp->window);
5337
5338 if (window < 0x0004 || window > 0x4000)
5339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5340 MGMT_STATUS_INVALID_PARAMS);
5341
5342 if (window > interval)
5343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5344 MGMT_STATUS_INVALID_PARAMS);
5345
5346 hci_dev_lock(hdev);
5347
5348 hdev->le_scan_interval = interval;
5349 hdev->le_scan_window = window;
5350
5351 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5352 NULL, 0);
5353
5354 /* If background scan is running, restart it so new parameters are
5355 * loaded.
5356 */
5357 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5358 hdev->discovery.state == DISCOVERY_STOPPED) {
5359 struct hci_request req;
5360
5361 hci_req_init(&req, hdev);
5362
5363 hci_req_add_le_scan_disable(&req, false);
5364 hci_req_add_le_passive_scan(&req);
5365
5366 hci_req_run(&req, NULL);
5367 }
5368
5369 hci_dev_unlock(hdev);
5370
5371 return err;
5372 }
5373
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5374 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5375 u16 opcode)
5376 {
5377 struct mgmt_pending_cmd *cmd;
5378
5379 bt_dev_dbg(hdev, "status 0x%02x", status);
5380
5381 hci_dev_lock(hdev);
5382
5383 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5384 if (!cmd)
5385 goto unlock;
5386
5387 if (status) {
5388 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5389 mgmt_status(status));
5390 } else {
5391 struct mgmt_mode *cp = cmd->param;
5392
5393 if (cp->val)
5394 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5395 else
5396 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5397
5398 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5399 new_settings(hdev, cmd->sk);
5400 }
5401
5402 mgmt_pending_remove(cmd);
5403
5404 unlock:
5405 hci_dev_unlock(hdev);
5406 }
5407
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5408 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5410 {
5411 struct mgmt_mode *cp = data;
5412 struct mgmt_pending_cmd *cmd;
5413 struct hci_request req;
5414 int err;
5415
5416 bt_dev_dbg(hdev, "sock %p", sk);
5417
5418 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5419 hdev->hci_ver < BLUETOOTH_VER_1_2)
5420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5421 MGMT_STATUS_NOT_SUPPORTED);
5422
5423 if (cp->val != 0x00 && cp->val != 0x01)
5424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5425 MGMT_STATUS_INVALID_PARAMS);
5426
5427 hci_dev_lock(hdev);
5428
5429 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5431 MGMT_STATUS_BUSY);
5432 goto unlock;
5433 }
5434
5435 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5436 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5437 hdev);
5438 goto unlock;
5439 }
5440
5441 if (!hdev_is_powered(hdev)) {
5442 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5443 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5444 hdev);
5445 new_settings(hdev, sk);
5446 goto unlock;
5447 }
5448
5449 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5450 data, len);
5451 if (!cmd) {
5452 err = -ENOMEM;
5453 goto unlock;
5454 }
5455
5456 hci_req_init(&req, hdev);
5457
5458 __hci_req_write_fast_connectable(&req, cp->val);
5459
5460 err = hci_req_run(&req, fast_connectable_complete);
5461 if (err < 0) {
5462 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5463 MGMT_STATUS_FAILED);
5464 mgmt_pending_remove(cmd);
5465 }
5466
5467 unlock:
5468 hci_dev_unlock(hdev);
5469
5470 return err;
5471 }
5472
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5473 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5474 {
5475 struct mgmt_pending_cmd *cmd;
5476
5477 bt_dev_dbg(hdev, "status 0x%02x", status);
5478
5479 hci_dev_lock(hdev);
5480
5481 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5482 if (!cmd)
5483 goto unlock;
5484
5485 if (status) {
5486 u8 mgmt_err = mgmt_status(status);
5487
5488 /* We need to restore the flag if related HCI commands
5489 * failed.
5490 */
5491 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5492
5493 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5494 } else {
5495 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5496 new_settings(hdev, cmd->sk);
5497 }
5498
5499 mgmt_pending_remove(cmd);
5500
5501 unlock:
5502 hci_dev_unlock(hdev);
5503 }
5504
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5505 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5506 {
5507 struct mgmt_mode *cp = data;
5508 struct mgmt_pending_cmd *cmd;
5509 struct hci_request req;
5510 int err;
5511
5512 bt_dev_dbg(hdev, "sock %p", sk);
5513
5514 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5516 MGMT_STATUS_NOT_SUPPORTED);
5517
5518 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5520 MGMT_STATUS_REJECTED);
5521
5522 if (cp->val != 0x00 && cp->val != 0x01)
5523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5524 MGMT_STATUS_INVALID_PARAMS);
5525
5526 hci_dev_lock(hdev);
5527
5528 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5529 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5530 goto unlock;
5531 }
5532
5533 if (!hdev_is_powered(hdev)) {
5534 if (!cp->val) {
5535 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5536 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5537 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5538 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5539 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5540 }
5541
5542 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5543
5544 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5545 if (err < 0)
5546 goto unlock;
5547
5548 err = new_settings(hdev, sk);
5549 goto unlock;
5550 }
5551
5552 /* Reject disabling when powered on */
5553 if (!cp->val) {
5554 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5555 MGMT_STATUS_REJECTED);
5556 goto unlock;
5557 } else {
5558 /* When configuring a dual-mode controller to operate
5559 * with LE only and using a static address, then switching
5560 * BR/EDR back on is not allowed.
5561 *
5562 * Dual-mode controllers shall operate with the public
5563 * address as its identity address for BR/EDR and LE. So
5564 * reject the attempt to create an invalid configuration.
5565 *
5566 * The same restrictions applies when secure connections
5567 * has been enabled. For BR/EDR this is a controller feature
5568 * while for LE it is a host stack feature. This means that
5569 * switching BR/EDR back on when secure connections has been
5570 * enabled is not a supported transaction.
5571 */
5572 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5573 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5574 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5576 MGMT_STATUS_REJECTED);
5577 goto unlock;
5578 }
5579 }
5580
5581 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5583 MGMT_STATUS_BUSY);
5584 goto unlock;
5585 }
5586
5587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5588 if (!cmd) {
5589 err = -ENOMEM;
5590 goto unlock;
5591 }
5592
5593 /* We need to flip the bit already here so that
5594 * hci_req_update_adv_data generates the correct flags.
5595 */
5596 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5597
5598 hci_req_init(&req, hdev);
5599
5600 __hci_req_write_fast_connectable(&req, false);
5601 __hci_req_update_scan(&req);
5602
5603 /* Since only the advertising data flags will change, there
5604 * is no need to update the scan response data.
5605 */
5606 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5607
5608 err = hci_req_run(&req, set_bredr_complete);
5609 if (err < 0)
5610 mgmt_pending_remove(cmd);
5611
5612 unlock:
5613 hci_dev_unlock(hdev);
5614 return err;
5615 }
5616
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5617 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5618 {
5619 struct mgmt_pending_cmd *cmd;
5620 struct mgmt_mode *cp;
5621
5622 bt_dev_dbg(hdev, "status %u", status);
5623
5624 hci_dev_lock(hdev);
5625
5626 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5627 if (!cmd)
5628 goto unlock;
5629
5630 if (status) {
5631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5632 mgmt_status(status));
5633 goto remove;
5634 }
5635
5636 cp = cmd->param;
5637
5638 switch (cp->val) {
5639 case 0x00:
5640 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5641 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5642 break;
5643 case 0x01:
5644 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5645 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5646 break;
5647 case 0x02:
5648 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5649 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5650 break;
5651 }
5652
5653 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5654 new_settings(hdev, cmd->sk);
5655
5656 remove:
5657 mgmt_pending_remove(cmd);
5658 unlock:
5659 hci_dev_unlock(hdev);
5660 }
5661
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5662 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5663 void *data, u16 len)
5664 {
5665 struct mgmt_mode *cp = data;
5666 struct mgmt_pending_cmd *cmd;
5667 struct hci_request req;
5668 u8 val;
5669 int err;
5670
5671 bt_dev_dbg(hdev, "sock %p", sk);
5672
5673 if (!lmp_sc_capable(hdev) &&
5674 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5676 MGMT_STATUS_NOT_SUPPORTED);
5677
5678 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5679 lmp_sc_capable(hdev) &&
5680 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5682 MGMT_STATUS_REJECTED);
5683
5684 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5686 MGMT_STATUS_INVALID_PARAMS);
5687
5688 hci_dev_lock(hdev);
5689
5690 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5691 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5692 bool changed;
5693
5694 if (cp->val) {
5695 changed = !hci_dev_test_and_set_flag(hdev,
5696 HCI_SC_ENABLED);
5697 if (cp->val == 0x02)
5698 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5699 else
5700 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5701 } else {
5702 changed = hci_dev_test_and_clear_flag(hdev,
5703 HCI_SC_ENABLED);
5704 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5705 }
5706
5707 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5708 if (err < 0)
5709 goto failed;
5710
5711 if (changed)
5712 err = new_settings(hdev, sk);
5713
5714 goto failed;
5715 }
5716
5717 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5718 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5719 MGMT_STATUS_BUSY);
5720 goto failed;
5721 }
5722
5723 val = !!cp->val;
5724
5725 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5726 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5727 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5728 goto failed;
5729 }
5730
5731 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5732 if (!cmd) {
5733 err = -ENOMEM;
5734 goto failed;
5735 }
5736
5737 hci_req_init(&req, hdev);
5738 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5739 err = hci_req_run(&req, sc_enable_complete);
5740 if (err < 0) {
5741 mgmt_pending_remove(cmd);
5742 goto failed;
5743 }
5744
5745 failed:
5746 hci_dev_unlock(hdev);
5747 return err;
5748 }
5749
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5750 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5751 void *data, u16 len)
5752 {
5753 struct mgmt_mode *cp = data;
5754 bool changed, use_changed;
5755 int err;
5756
5757 bt_dev_dbg(hdev, "sock %p", sk);
5758
5759 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5761 MGMT_STATUS_INVALID_PARAMS);
5762
5763 hci_dev_lock(hdev);
5764
5765 if (cp->val)
5766 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5767 else
5768 changed = hci_dev_test_and_clear_flag(hdev,
5769 HCI_KEEP_DEBUG_KEYS);
5770
5771 if (cp->val == 0x02)
5772 use_changed = !hci_dev_test_and_set_flag(hdev,
5773 HCI_USE_DEBUG_KEYS);
5774 else
5775 use_changed = hci_dev_test_and_clear_flag(hdev,
5776 HCI_USE_DEBUG_KEYS);
5777
5778 if (hdev_is_powered(hdev) && use_changed &&
5779 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5780 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5781 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5782 sizeof(mode), &mode);
5783 }
5784
5785 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5786 if (err < 0)
5787 goto unlock;
5788
5789 if (changed)
5790 err = new_settings(hdev, sk);
5791
5792 unlock:
5793 hci_dev_unlock(hdev);
5794 return err;
5795 }
5796
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5797 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5798 u16 len)
5799 {
5800 struct mgmt_cp_set_privacy *cp = cp_data;
5801 bool changed;
5802 int err;
5803
5804 bt_dev_dbg(hdev, "sock %p", sk);
5805
5806 if (!lmp_le_capable(hdev))
5807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5808 MGMT_STATUS_NOT_SUPPORTED);
5809
5810 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5812 MGMT_STATUS_INVALID_PARAMS);
5813
5814 if (hdev_is_powered(hdev))
5815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5816 MGMT_STATUS_REJECTED);
5817
5818 hci_dev_lock(hdev);
5819
5820 /* If user space supports this command it is also expected to
5821 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5822 */
5823 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5824
5825 if (cp->privacy) {
5826 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5827 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5828 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5829 hci_adv_instances_set_rpa_expired(hdev, true);
5830 if (cp->privacy == 0x02)
5831 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5832 else
5833 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5834 } else {
5835 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5836 memset(hdev->irk, 0, sizeof(hdev->irk));
5837 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5838 hci_adv_instances_set_rpa_expired(hdev, false);
5839 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5840 }
5841
5842 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5843 if (err < 0)
5844 goto unlock;
5845
5846 if (changed)
5847 err = new_settings(hdev, sk);
5848
5849 unlock:
5850 hci_dev_unlock(hdev);
5851 return err;
5852 }
5853
irk_is_valid(struct mgmt_irk_info * irk)5854 static bool irk_is_valid(struct mgmt_irk_info *irk)
5855 {
5856 switch (irk->addr.type) {
5857 case BDADDR_LE_PUBLIC:
5858 return true;
5859
5860 case BDADDR_LE_RANDOM:
5861 /* Two most significant bits shall be set */
5862 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5863 return false;
5864 return true;
5865 }
5866
5867 return false;
5868 }
5869
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5870 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5871 u16 len)
5872 {
5873 struct mgmt_cp_load_irks *cp = cp_data;
5874 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5875 sizeof(struct mgmt_irk_info));
5876 u16 irk_count, expected_len;
5877 int i, err;
5878
5879 bt_dev_dbg(hdev, "sock %p", sk);
5880
5881 if (!lmp_le_capable(hdev))
5882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5883 MGMT_STATUS_NOT_SUPPORTED);
5884
5885 irk_count = __le16_to_cpu(cp->irk_count);
5886 if (irk_count > max_irk_count) {
5887 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5888 irk_count);
5889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5890 MGMT_STATUS_INVALID_PARAMS);
5891 }
5892
5893 expected_len = struct_size(cp, irks, irk_count);
5894 if (expected_len != len) {
5895 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5896 expected_len, len);
5897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5898 MGMT_STATUS_INVALID_PARAMS);
5899 }
5900
5901 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5902
5903 for (i = 0; i < irk_count; i++) {
5904 struct mgmt_irk_info *key = &cp->irks[i];
5905
5906 if (!irk_is_valid(key))
5907 return mgmt_cmd_status(sk, hdev->id,
5908 MGMT_OP_LOAD_IRKS,
5909 MGMT_STATUS_INVALID_PARAMS);
5910 }
5911
5912 hci_dev_lock(hdev);
5913
5914 hci_smp_irks_clear(hdev);
5915
5916 for (i = 0; i < irk_count; i++) {
5917 struct mgmt_irk_info *irk = &cp->irks[i];
5918 u8 addr_type = le_addr_type(irk->addr.type);
5919
5920 if (hci_is_blocked_key(hdev,
5921 HCI_BLOCKED_KEY_TYPE_IRK,
5922 irk->val)) {
5923 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5924 &irk->addr.bdaddr);
5925 continue;
5926 }
5927
5928 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
5929 if (irk->addr.type == BDADDR_BREDR)
5930 addr_type = BDADDR_BREDR;
5931
5932 hci_add_irk(hdev, &irk->addr.bdaddr,
5933 addr_type, irk->val,
5934 BDADDR_ANY);
5935 }
5936
5937 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5938
5939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5940
5941 hci_dev_unlock(hdev);
5942
5943 return err;
5944 }
5945
ltk_is_valid(struct mgmt_ltk_info * key)5946 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5947 {
5948 if (key->initiator != 0x00 && key->initiator != 0x01)
5949 return false;
5950
5951 switch (key->addr.type) {
5952 case BDADDR_LE_PUBLIC:
5953 return true;
5954
5955 case BDADDR_LE_RANDOM:
5956 /* Two most significant bits shall be set */
5957 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5958 return false;
5959 return true;
5960 }
5961
5962 return false;
5963 }
5964
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5965 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5966 void *cp_data, u16 len)
5967 {
5968 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5969 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5970 sizeof(struct mgmt_ltk_info));
5971 u16 key_count, expected_len;
5972 int i, err;
5973
5974 bt_dev_dbg(hdev, "sock %p", sk);
5975
5976 if (!lmp_le_capable(hdev))
5977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5978 MGMT_STATUS_NOT_SUPPORTED);
5979
5980 key_count = __le16_to_cpu(cp->key_count);
5981 if (key_count > max_key_count) {
5982 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5983 key_count);
5984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5985 MGMT_STATUS_INVALID_PARAMS);
5986 }
5987
5988 expected_len = struct_size(cp, keys, key_count);
5989 if (expected_len != len) {
5990 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5991 expected_len, len);
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5994 }
5995
5996 bt_dev_dbg(hdev, "key_count %u", key_count);
5997
5998 for (i = 0; i < key_count; i++) {
5999 struct mgmt_ltk_info *key = &cp->keys[i];
6000
6001 if (!ltk_is_valid(key))
6002 return mgmt_cmd_status(sk, hdev->id,
6003 MGMT_OP_LOAD_LONG_TERM_KEYS,
6004 MGMT_STATUS_INVALID_PARAMS);
6005 }
6006
6007 hci_dev_lock(hdev);
6008
6009 hci_smp_ltks_clear(hdev);
6010
6011 for (i = 0; i < key_count; i++) {
6012 struct mgmt_ltk_info *key = &cp->keys[i];
6013 u8 type, authenticated;
6014 u8 addr_type = le_addr_type(key->addr.type);
6015
6016 if (hci_is_blocked_key(hdev,
6017 HCI_BLOCKED_KEY_TYPE_LTK,
6018 key->val)) {
6019 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6020 &key->addr.bdaddr);
6021 continue;
6022 }
6023
6024 switch (key->type) {
6025 case MGMT_LTK_UNAUTHENTICATED:
6026 authenticated = 0x00;
6027 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6028 break;
6029 case MGMT_LTK_AUTHENTICATED:
6030 authenticated = 0x01;
6031 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6032 break;
6033 case MGMT_LTK_P256_UNAUTH:
6034 authenticated = 0x00;
6035 type = SMP_LTK_P256;
6036 break;
6037 case MGMT_LTK_P256_AUTH:
6038 authenticated = 0x01;
6039 type = SMP_LTK_P256;
6040 break;
6041 case MGMT_LTK_P256_DEBUG:
6042 authenticated = 0x00;
6043 type = SMP_LTK_P256_DEBUG;
6044 fallthrough;
6045 default:
6046 continue;
6047 }
6048
6049 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6050 if (key->addr.type == BDADDR_BREDR)
6051 addr_type = BDADDR_BREDR;
6052
6053 hci_add_ltk(hdev, &key->addr.bdaddr,
6054 addr_type, type, authenticated,
6055 key->val, key->enc_size, key->ediv, key->rand);
6056 }
6057
6058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6059 NULL, 0);
6060
6061 hci_dev_unlock(hdev);
6062
6063 return err;
6064 }
6065
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6066 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6067 {
6068 struct hci_conn *conn = cmd->user_data;
6069 struct mgmt_rp_get_conn_info rp;
6070 int err;
6071
6072 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6073
6074 if (status == MGMT_STATUS_SUCCESS) {
6075 rp.rssi = conn->rssi;
6076 rp.tx_power = conn->tx_power;
6077 rp.max_tx_power = conn->max_tx_power;
6078 } else {
6079 rp.rssi = HCI_RSSI_INVALID;
6080 rp.tx_power = HCI_TX_POWER_INVALID;
6081 rp.max_tx_power = HCI_TX_POWER_INVALID;
6082 }
6083
6084 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6085 status, &rp, sizeof(rp));
6086
6087 hci_conn_drop(conn);
6088 hci_conn_put(conn);
6089
6090 return err;
6091 }
6092
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)6093 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6094 u16 opcode)
6095 {
6096 struct hci_cp_read_rssi *cp;
6097 struct mgmt_pending_cmd *cmd;
6098 struct hci_conn *conn;
6099 u16 handle;
6100 u8 status;
6101
6102 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6103
6104 hci_dev_lock(hdev);
6105
6106 /* Commands sent in request are either Read RSSI or Read Transmit Power
6107 * Level so we check which one was last sent to retrieve connection
6108 * handle. Both commands have handle as first parameter so it's safe to
6109 * cast data on the same command struct.
6110 *
6111 * First command sent is always Read RSSI and we fail only if it fails.
6112 * In other case we simply override error to indicate success as we
6113 * already remembered if TX power value is actually valid.
6114 */
6115 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6116 if (!cp) {
6117 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6118 status = MGMT_STATUS_SUCCESS;
6119 } else {
6120 status = mgmt_status(hci_status);
6121 }
6122
6123 if (!cp) {
6124 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6125 goto unlock;
6126 }
6127
6128 handle = __le16_to_cpu(cp->handle);
6129 conn = hci_conn_hash_lookup_handle(hdev, handle);
6130 if (!conn) {
6131 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6132 handle);
6133 goto unlock;
6134 }
6135
6136 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6137 if (!cmd)
6138 goto unlock;
6139
6140 cmd->cmd_complete(cmd, status);
6141 mgmt_pending_remove(cmd);
6142
6143 unlock:
6144 hci_dev_unlock(hdev);
6145 }
6146
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6147 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6148 u16 len)
6149 {
6150 struct mgmt_cp_get_conn_info *cp = data;
6151 struct mgmt_rp_get_conn_info rp;
6152 struct hci_conn *conn;
6153 unsigned long conn_info_age;
6154 int err = 0;
6155
6156 bt_dev_dbg(hdev, "sock %p", sk);
6157
6158 memset(&rp, 0, sizeof(rp));
6159 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6160 rp.addr.type = cp->addr.type;
6161
6162 if (!bdaddr_type_is_valid(cp->addr.type))
6163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6164 MGMT_STATUS_INVALID_PARAMS,
6165 &rp, sizeof(rp));
6166
6167 hci_dev_lock(hdev);
6168
6169 if (!hdev_is_powered(hdev)) {
6170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6171 MGMT_STATUS_NOT_POWERED, &rp,
6172 sizeof(rp));
6173 goto unlock;
6174 }
6175
6176 if (cp->addr.type == BDADDR_BREDR)
6177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6178 &cp->addr.bdaddr);
6179 else
6180 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6181
6182 if (!conn || conn->state != BT_CONNECTED) {
6183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6184 MGMT_STATUS_NOT_CONNECTED, &rp,
6185 sizeof(rp));
6186 goto unlock;
6187 }
6188
6189 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6191 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6192 goto unlock;
6193 }
6194
6195 /* To avoid client trying to guess when to poll again for information we
6196 * calculate conn info age as random value between min/max set in hdev.
6197 */
6198 conn_info_age = hdev->conn_info_min_age +
6199 prandom_u32_max(hdev->conn_info_max_age -
6200 hdev->conn_info_min_age);
6201
6202 /* Query controller to refresh cached values if they are too old or were
6203 * never read.
6204 */
6205 if (time_after(jiffies, conn->conn_info_timestamp +
6206 msecs_to_jiffies(conn_info_age)) ||
6207 !conn->conn_info_timestamp) {
6208 struct hci_request req;
6209 struct hci_cp_read_tx_power req_txp_cp;
6210 struct hci_cp_read_rssi req_rssi_cp;
6211 struct mgmt_pending_cmd *cmd;
6212
6213 hci_req_init(&req, hdev);
6214 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6215 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6216 &req_rssi_cp);
6217
6218 /* For LE links TX power does not change thus we don't need to
6219 * query for it once value is known.
6220 */
6221 if (!bdaddr_type_is_le(cp->addr.type) ||
6222 conn->tx_power == HCI_TX_POWER_INVALID) {
6223 req_txp_cp.handle = cpu_to_le16(conn->handle);
6224 req_txp_cp.type = 0x00;
6225 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6226 sizeof(req_txp_cp), &req_txp_cp);
6227 }
6228
6229 /* Max TX power needs to be read only once per connection */
6230 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6231 req_txp_cp.handle = cpu_to_le16(conn->handle);
6232 req_txp_cp.type = 0x01;
6233 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6234 sizeof(req_txp_cp), &req_txp_cp);
6235 }
6236
6237 err = hci_req_run(&req, conn_info_refresh_complete);
6238 if (err < 0)
6239 goto unlock;
6240
6241 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6242 data, len);
6243 if (!cmd) {
6244 err = -ENOMEM;
6245 goto unlock;
6246 }
6247
6248 hci_conn_hold(conn);
6249 cmd->user_data = hci_conn_get(conn);
6250 cmd->cmd_complete = conn_info_cmd_complete;
6251
6252 conn->conn_info_timestamp = jiffies;
6253 } else {
6254 /* Cache is valid, just reply with values cached in hci_conn */
6255 rp.rssi = conn->rssi;
6256 rp.tx_power = conn->tx_power;
6257 rp.max_tx_power = conn->max_tx_power;
6258
6259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6260 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6261 }
6262
6263 unlock:
6264 hci_dev_unlock(hdev);
6265 return err;
6266 }
6267
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6268 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6269 {
6270 struct hci_conn *conn = cmd->user_data;
6271 struct mgmt_rp_get_clock_info rp;
6272 struct hci_dev *hdev;
6273 int err;
6274
6275 memset(&rp, 0, sizeof(rp));
6276 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6277
6278 if (status)
6279 goto complete;
6280
6281 hdev = hci_dev_get(cmd->index);
6282 if (hdev) {
6283 rp.local_clock = cpu_to_le32(hdev->clock);
6284 hci_dev_put(hdev);
6285 }
6286
6287 if (conn) {
6288 rp.piconet_clock = cpu_to_le32(conn->clock);
6289 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6290 }
6291
6292 complete:
6293 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6294 sizeof(rp));
6295
6296 if (conn) {
6297 hci_conn_drop(conn);
6298 hci_conn_put(conn);
6299 }
6300
6301 return err;
6302 }
6303
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)6304 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6305 {
6306 struct hci_cp_read_clock *hci_cp;
6307 struct mgmt_pending_cmd *cmd;
6308 struct hci_conn *conn;
6309
6310 bt_dev_dbg(hdev, "status %u", status);
6311
6312 hci_dev_lock(hdev);
6313
6314 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6315 if (!hci_cp)
6316 goto unlock;
6317
6318 if (hci_cp->which) {
6319 u16 handle = __le16_to_cpu(hci_cp->handle);
6320 conn = hci_conn_hash_lookup_handle(hdev, handle);
6321 } else {
6322 conn = NULL;
6323 }
6324
6325 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6326 if (!cmd)
6327 goto unlock;
6328
6329 cmd->cmd_complete(cmd, mgmt_status(status));
6330 mgmt_pending_remove(cmd);
6331
6332 unlock:
6333 hci_dev_unlock(hdev);
6334 }
6335
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6336 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6337 u16 len)
6338 {
6339 struct mgmt_cp_get_clock_info *cp = data;
6340 struct mgmt_rp_get_clock_info rp;
6341 struct hci_cp_read_clock hci_cp;
6342 struct mgmt_pending_cmd *cmd;
6343 struct hci_request req;
6344 struct hci_conn *conn;
6345 int err;
6346
6347 bt_dev_dbg(hdev, "sock %p", sk);
6348
6349 memset(&rp, 0, sizeof(rp));
6350 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6351 rp.addr.type = cp->addr.type;
6352
6353 if (cp->addr.type != BDADDR_BREDR)
6354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6355 MGMT_STATUS_INVALID_PARAMS,
6356 &rp, sizeof(rp));
6357
6358 hci_dev_lock(hdev);
6359
6360 if (!hdev_is_powered(hdev)) {
6361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6362 MGMT_STATUS_NOT_POWERED, &rp,
6363 sizeof(rp));
6364 goto unlock;
6365 }
6366
6367 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6369 &cp->addr.bdaddr);
6370 if (!conn || conn->state != BT_CONNECTED) {
6371 err = mgmt_cmd_complete(sk, hdev->id,
6372 MGMT_OP_GET_CLOCK_INFO,
6373 MGMT_STATUS_NOT_CONNECTED,
6374 &rp, sizeof(rp));
6375 goto unlock;
6376 }
6377 } else {
6378 conn = NULL;
6379 }
6380
6381 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6382 if (!cmd) {
6383 err = -ENOMEM;
6384 goto unlock;
6385 }
6386
6387 cmd->cmd_complete = clock_info_cmd_complete;
6388
6389 hci_req_init(&req, hdev);
6390
6391 memset(&hci_cp, 0, sizeof(hci_cp));
6392 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6393
6394 if (conn) {
6395 hci_conn_hold(conn);
6396 cmd->user_data = hci_conn_get(conn);
6397
6398 hci_cp.handle = cpu_to_le16(conn->handle);
6399 hci_cp.which = 0x01; /* Piconet clock */
6400 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6401 }
6402
6403 err = hci_req_run(&req, get_clock_info_complete);
6404 if (err < 0)
6405 mgmt_pending_remove(cmd);
6406
6407 unlock:
6408 hci_dev_unlock(hdev);
6409 return err;
6410 }
6411
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6412 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6413 {
6414 struct hci_conn *conn;
6415
6416 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6417 if (!conn)
6418 return false;
6419
6420 if (conn->dst_type != type)
6421 return false;
6422
6423 if (conn->state != BT_CONNECTED)
6424 return false;
6425
6426 return true;
6427 }
6428
6429 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6430 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6431 u8 addr_type, u8 auto_connect)
6432 {
6433 struct hci_conn_params *params;
6434
6435 params = hci_conn_params_add(hdev, addr, addr_type);
6436 if (!params)
6437 return -EIO;
6438
6439 if (params->auto_connect == auto_connect)
6440 return 0;
6441
6442 list_del_init(¶ms->action);
6443
6444 switch (auto_connect) {
6445 case HCI_AUTO_CONN_DISABLED:
6446 case HCI_AUTO_CONN_LINK_LOSS:
6447 /* If auto connect is being disabled when we're trying to
6448 * connect to device, keep connecting.
6449 */
6450 if (params->explicit_connect)
6451 list_add(¶ms->action, &hdev->pend_le_conns);
6452 break;
6453 case HCI_AUTO_CONN_REPORT:
6454 if (params->explicit_connect)
6455 list_add(¶ms->action, &hdev->pend_le_conns);
6456 else
6457 list_add(¶ms->action, &hdev->pend_le_reports);
6458 break;
6459 case HCI_AUTO_CONN_DIRECT:
6460 case HCI_AUTO_CONN_ALWAYS:
6461 if (!is_connected(hdev, addr, addr_type))
6462 list_add(¶ms->action, &hdev->pend_le_conns);
6463 break;
6464 }
6465
6466 params->auto_connect = auto_connect;
6467
6468 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6469 addr, addr_type, auto_connect);
6470
6471 return 0;
6472 }
6473
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6474 static void device_added(struct sock *sk, struct hci_dev *hdev,
6475 bdaddr_t *bdaddr, u8 type, u8 action)
6476 {
6477 struct mgmt_ev_device_added ev;
6478
6479 bacpy(&ev.addr.bdaddr, bdaddr);
6480 ev.addr.type = type;
6481 ev.action = action;
6482
6483 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6484 }
6485
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6486 static int add_device(struct sock *sk, struct hci_dev *hdev,
6487 void *data, u16 len)
6488 {
6489 struct mgmt_cp_add_device *cp = data;
6490 u8 auto_conn, addr_type;
6491 struct hci_conn_params *params;
6492 int err;
6493 u32 current_flags = 0;
6494
6495 bt_dev_dbg(hdev, "sock %p", sk);
6496
6497 if (!bdaddr_type_is_valid(cp->addr.type) ||
6498 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6499 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6500 MGMT_STATUS_INVALID_PARAMS,
6501 &cp->addr, sizeof(cp->addr));
6502
6503 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6504 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6505 MGMT_STATUS_INVALID_PARAMS,
6506 &cp->addr, sizeof(cp->addr));
6507
6508 hci_dev_lock(hdev);
6509
6510 if (cp->addr.type == BDADDR_BREDR) {
6511 /* Only incoming connections action is supported for now */
6512 if (cp->action != 0x01) {
6513 err = mgmt_cmd_complete(sk, hdev->id,
6514 MGMT_OP_ADD_DEVICE,
6515 MGMT_STATUS_INVALID_PARAMS,
6516 &cp->addr, sizeof(cp->addr));
6517 goto unlock;
6518 }
6519
6520 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6521 &cp->addr.bdaddr,
6522 cp->addr.type, 0);
6523 if (err)
6524 goto unlock;
6525
6526 hci_req_update_scan(hdev);
6527
6528 goto added;
6529 }
6530
6531 addr_type = le_addr_type(cp->addr.type);
6532
6533 if (cp->action == 0x02)
6534 auto_conn = HCI_AUTO_CONN_ALWAYS;
6535 else if (cp->action == 0x01)
6536 auto_conn = HCI_AUTO_CONN_DIRECT;
6537 else
6538 auto_conn = HCI_AUTO_CONN_REPORT;
6539
6540 /* Kernel internally uses conn_params with resolvable private
6541 * address, but Add Device allows only identity addresses.
6542 * Make sure it is enforced before calling
6543 * hci_conn_params_lookup.
6544 */
6545 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6547 MGMT_STATUS_INVALID_PARAMS,
6548 &cp->addr, sizeof(cp->addr));
6549 goto unlock;
6550 }
6551
6552 /* If the connection parameters don't exist for this device,
6553 * they will be created and configured with defaults.
6554 */
6555 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6556 auto_conn) < 0) {
6557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6558 MGMT_STATUS_FAILED, &cp->addr,
6559 sizeof(cp->addr));
6560 goto unlock;
6561 } else {
6562 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6563 addr_type);
6564 if (params)
6565 current_flags = params->current_flags;
6566 }
6567
6568 hci_update_background_scan(hdev);
6569
6570 added:
6571 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6572 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6573 SUPPORTED_DEVICE_FLAGS(), current_flags);
6574
6575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6576 MGMT_STATUS_SUCCESS, &cp->addr,
6577 sizeof(cp->addr));
6578
6579 unlock:
6580 hci_dev_unlock(hdev);
6581 return err;
6582 }
6583
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6584 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6585 bdaddr_t *bdaddr, u8 type)
6586 {
6587 struct mgmt_ev_device_removed ev;
6588
6589 bacpy(&ev.addr.bdaddr, bdaddr);
6590 ev.addr.type = type;
6591
6592 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6593 }
6594
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6595 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6597 {
6598 struct mgmt_cp_remove_device *cp = data;
6599 int err;
6600
6601 bt_dev_dbg(hdev, "sock %p", sk);
6602
6603 hci_dev_lock(hdev);
6604
6605 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6606 struct hci_conn_params *params;
6607 u8 addr_type;
6608
6609 if (!bdaddr_type_is_valid(cp->addr.type)) {
6610 err = mgmt_cmd_complete(sk, hdev->id,
6611 MGMT_OP_REMOVE_DEVICE,
6612 MGMT_STATUS_INVALID_PARAMS,
6613 &cp->addr, sizeof(cp->addr));
6614 goto unlock;
6615 }
6616
6617 if (cp->addr.type == BDADDR_BREDR) {
6618 err = hci_bdaddr_list_del(&hdev->whitelist,
6619 &cp->addr.bdaddr,
6620 cp->addr.type);
6621 if (err) {
6622 err = mgmt_cmd_complete(sk, hdev->id,
6623 MGMT_OP_REMOVE_DEVICE,
6624 MGMT_STATUS_INVALID_PARAMS,
6625 &cp->addr,
6626 sizeof(cp->addr));
6627 goto unlock;
6628 }
6629
6630 hci_req_update_scan(hdev);
6631
6632 device_removed(sk, hdev, &cp->addr.bdaddr,
6633 cp->addr.type);
6634 goto complete;
6635 }
6636
6637 addr_type = le_addr_type(cp->addr.type);
6638
6639 /* Kernel internally uses conn_params with resolvable private
6640 * address, but Remove Device allows only identity addresses.
6641 * Make sure it is enforced before calling
6642 * hci_conn_params_lookup.
6643 */
6644 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6645 err = mgmt_cmd_complete(sk, hdev->id,
6646 MGMT_OP_REMOVE_DEVICE,
6647 MGMT_STATUS_INVALID_PARAMS,
6648 &cp->addr, sizeof(cp->addr));
6649 goto unlock;
6650 }
6651
6652 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6653 addr_type);
6654 if (!params) {
6655 err = mgmt_cmd_complete(sk, hdev->id,
6656 MGMT_OP_REMOVE_DEVICE,
6657 MGMT_STATUS_INVALID_PARAMS,
6658 &cp->addr, sizeof(cp->addr));
6659 goto unlock;
6660 }
6661
6662 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6663 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6664 err = mgmt_cmd_complete(sk, hdev->id,
6665 MGMT_OP_REMOVE_DEVICE,
6666 MGMT_STATUS_INVALID_PARAMS,
6667 &cp->addr, sizeof(cp->addr));
6668 goto unlock;
6669 }
6670
6671 list_del(¶ms->action);
6672 list_del(¶ms->list);
6673 kfree(params);
6674 hci_update_background_scan(hdev);
6675
6676 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6677 } else {
6678 struct hci_conn_params *p, *tmp;
6679 struct bdaddr_list *b, *btmp;
6680
6681 if (cp->addr.type) {
6682 err = mgmt_cmd_complete(sk, hdev->id,
6683 MGMT_OP_REMOVE_DEVICE,
6684 MGMT_STATUS_INVALID_PARAMS,
6685 &cp->addr, sizeof(cp->addr));
6686 goto unlock;
6687 }
6688
6689 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6690 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6691 list_del(&b->list);
6692 kfree(b);
6693 }
6694
6695 hci_req_update_scan(hdev);
6696
6697 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6698 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6699 continue;
6700 device_removed(sk, hdev, &p->addr, p->addr_type);
6701 if (p->explicit_connect) {
6702 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6703 continue;
6704 }
6705 list_del(&p->action);
6706 list_del(&p->list);
6707 kfree(p);
6708 }
6709
6710 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6711
6712 hci_update_background_scan(hdev);
6713 }
6714
6715 complete:
6716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6717 MGMT_STATUS_SUCCESS, &cp->addr,
6718 sizeof(cp->addr));
6719 unlock:
6720 hci_dev_unlock(hdev);
6721 return err;
6722 }
6723
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6724 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6725 u16 len)
6726 {
6727 struct mgmt_cp_load_conn_param *cp = data;
6728 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6729 sizeof(struct mgmt_conn_param));
6730 u16 param_count, expected_len;
6731 int i;
6732
6733 if (!lmp_le_capable(hdev))
6734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6735 MGMT_STATUS_NOT_SUPPORTED);
6736
6737 param_count = __le16_to_cpu(cp->param_count);
6738 if (param_count > max_param_count) {
6739 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6740 param_count);
6741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6742 MGMT_STATUS_INVALID_PARAMS);
6743 }
6744
6745 expected_len = struct_size(cp, params, param_count);
6746 if (expected_len != len) {
6747 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6748 expected_len, len);
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6750 MGMT_STATUS_INVALID_PARAMS);
6751 }
6752
6753 bt_dev_dbg(hdev, "param_count %u", param_count);
6754
6755 hci_dev_lock(hdev);
6756
6757 hci_conn_params_clear_disabled(hdev);
6758
6759 for (i = 0; i < param_count; i++) {
6760 struct mgmt_conn_param *param = &cp->params[i];
6761 struct hci_conn_params *hci_param;
6762 u16 min, max, latency, timeout;
6763 u8 addr_type;
6764
6765 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6766 param->addr.type);
6767
6768 if (param->addr.type == BDADDR_LE_PUBLIC) {
6769 addr_type = ADDR_LE_DEV_PUBLIC;
6770 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6771 addr_type = ADDR_LE_DEV_RANDOM;
6772 } else {
6773 bt_dev_err(hdev, "ignoring invalid connection parameters");
6774 continue;
6775 }
6776
6777 min = le16_to_cpu(param->min_interval);
6778 max = le16_to_cpu(param->max_interval);
6779 latency = le16_to_cpu(param->latency);
6780 timeout = le16_to_cpu(param->timeout);
6781
6782 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6783 min, max, latency, timeout);
6784
6785 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6786 bt_dev_err(hdev, "ignoring invalid connection parameters");
6787 continue;
6788 }
6789
6790 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6791 addr_type);
6792 if (!hci_param) {
6793 bt_dev_err(hdev, "failed to add connection parameters");
6794 continue;
6795 }
6796
6797 hci_param->conn_min_interval = min;
6798 hci_param->conn_max_interval = max;
6799 hci_param->conn_latency = latency;
6800 hci_param->supervision_timeout = timeout;
6801 }
6802
6803 hci_dev_unlock(hdev);
6804
6805 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6806 NULL, 0);
6807 }
6808
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6809 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6810 void *data, u16 len)
6811 {
6812 struct mgmt_cp_set_external_config *cp = data;
6813 bool changed;
6814 int err;
6815
6816 bt_dev_dbg(hdev, "sock %p", sk);
6817
6818 if (hdev_is_powered(hdev))
6819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6820 MGMT_STATUS_REJECTED);
6821
6822 if (cp->config != 0x00 && cp->config != 0x01)
6823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6824 MGMT_STATUS_INVALID_PARAMS);
6825
6826 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6828 MGMT_STATUS_NOT_SUPPORTED);
6829
6830 hci_dev_lock(hdev);
6831
6832 if (cp->config)
6833 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6834 else
6835 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6836
6837 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6838 if (err < 0)
6839 goto unlock;
6840
6841 if (!changed)
6842 goto unlock;
6843
6844 err = new_options(hdev, sk);
6845
6846 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6847 mgmt_index_removed(hdev);
6848
6849 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6850 hci_dev_set_flag(hdev, HCI_CONFIG);
6851 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6852
6853 queue_work(hdev->req_workqueue, &hdev->power_on);
6854 } else {
6855 set_bit(HCI_RAW, &hdev->flags);
6856 mgmt_index_added(hdev);
6857 }
6858 }
6859
6860 unlock:
6861 hci_dev_unlock(hdev);
6862 return err;
6863 }
6864
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6865 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6866 void *data, u16 len)
6867 {
6868 struct mgmt_cp_set_public_address *cp = data;
6869 bool changed;
6870 int err;
6871
6872 bt_dev_dbg(hdev, "sock %p", sk);
6873
6874 if (hdev_is_powered(hdev))
6875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6876 MGMT_STATUS_REJECTED);
6877
6878 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6880 MGMT_STATUS_INVALID_PARAMS);
6881
6882 if (!hdev->set_bdaddr)
6883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6884 MGMT_STATUS_NOT_SUPPORTED);
6885
6886 hci_dev_lock(hdev);
6887
6888 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6889 bacpy(&hdev->public_addr, &cp->bdaddr);
6890
6891 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6892 if (err < 0)
6893 goto unlock;
6894
6895 if (!changed)
6896 goto unlock;
6897
6898 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6899 err = new_options(hdev, sk);
6900
6901 if (is_configured(hdev)) {
6902 mgmt_index_removed(hdev);
6903
6904 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6905
6906 hci_dev_set_flag(hdev, HCI_CONFIG);
6907 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6908
6909 queue_work(hdev->req_workqueue, &hdev->power_on);
6910 }
6911
6912 unlock:
6913 hci_dev_unlock(hdev);
6914 return err;
6915 }
6916
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6917 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6918 u16 opcode, struct sk_buff *skb)
6919 {
6920 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6921 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6922 u8 *h192, *r192, *h256, *r256;
6923 struct mgmt_pending_cmd *cmd;
6924 u16 eir_len;
6925 int err;
6926
6927 bt_dev_dbg(hdev, "status %u", status);
6928
6929 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6930 if (!cmd)
6931 return;
6932
6933 mgmt_cp = cmd->param;
6934
6935 if (status) {
6936 status = mgmt_status(status);
6937 eir_len = 0;
6938
6939 h192 = NULL;
6940 r192 = NULL;
6941 h256 = NULL;
6942 r256 = NULL;
6943 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6944 struct hci_rp_read_local_oob_data *rp;
6945
6946 if (skb->len != sizeof(*rp)) {
6947 status = MGMT_STATUS_FAILED;
6948 eir_len = 0;
6949 } else {
6950 status = MGMT_STATUS_SUCCESS;
6951 rp = (void *)skb->data;
6952
6953 eir_len = 5 + 18 + 18;
6954 h192 = rp->hash;
6955 r192 = rp->rand;
6956 h256 = NULL;
6957 r256 = NULL;
6958 }
6959 } else {
6960 struct hci_rp_read_local_oob_ext_data *rp;
6961
6962 if (skb->len != sizeof(*rp)) {
6963 status = MGMT_STATUS_FAILED;
6964 eir_len = 0;
6965 } else {
6966 status = MGMT_STATUS_SUCCESS;
6967 rp = (void *)skb->data;
6968
6969 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6970 eir_len = 5 + 18 + 18;
6971 h192 = NULL;
6972 r192 = NULL;
6973 } else {
6974 eir_len = 5 + 18 + 18 + 18 + 18;
6975 h192 = rp->hash192;
6976 r192 = rp->rand192;
6977 }
6978
6979 h256 = rp->hash256;
6980 r256 = rp->rand256;
6981 }
6982 }
6983
6984 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6985 if (!mgmt_rp)
6986 goto done;
6987
6988 if (status)
6989 goto send_rsp;
6990
6991 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6992 hdev->dev_class, 3);
6993
6994 if (h192 && r192) {
6995 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6996 EIR_SSP_HASH_C192, h192, 16);
6997 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6998 EIR_SSP_RAND_R192, r192, 16);
6999 }
7000
7001 if (h256 && r256) {
7002 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7003 EIR_SSP_HASH_C256, h256, 16);
7004 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7005 EIR_SSP_RAND_R256, r256, 16);
7006 }
7007
7008 send_rsp:
7009 mgmt_rp->type = mgmt_cp->type;
7010 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7011
7012 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7013 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7014 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7015 if (err < 0 || status)
7016 goto done;
7017
7018 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7019
7020 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7021 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7022 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7023 done:
7024 kfree(mgmt_rp);
7025 mgmt_pending_remove(cmd);
7026 }
7027
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7028 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7029 struct mgmt_cp_read_local_oob_ext_data *cp)
7030 {
7031 struct mgmt_pending_cmd *cmd;
7032 struct hci_request req;
7033 int err;
7034
7035 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7036 cp, sizeof(*cp));
7037 if (!cmd)
7038 return -ENOMEM;
7039
7040 hci_req_init(&req, hdev);
7041
7042 if (bredr_sc_enabled(hdev))
7043 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7044 else
7045 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7046
7047 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7048 if (err < 0) {
7049 mgmt_pending_remove(cmd);
7050 return err;
7051 }
7052
7053 return 0;
7054 }
7055
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7056 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7057 void *data, u16 data_len)
7058 {
7059 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7060 struct mgmt_rp_read_local_oob_ext_data *rp;
7061 size_t rp_len;
7062 u16 eir_len;
7063 u8 status, flags, role, addr[7], hash[16], rand[16];
7064 int err;
7065
7066 bt_dev_dbg(hdev, "sock %p", sk);
7067
7068 if (hdev_is_powered(hdev)) {
7069 switch (cp->type) {
7070 case BIT(BDADDR_BREDR):
7071 status = mgmt_bredr_support(hdev);
7072 if (status)
7073 eir_len = 0;
7074 else
7075 eir_len = 5;
7076 break;
7077 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7078 status = mgmt_le_support(hdev);
7079 if (status)
7080 eir_len = 0;
7081 else
7082 eir_len = 9 + 3 + 18 + 18 + 3;
7083 break;
7084 default:
7085 status = MGMT_STATUS_INVALID_PARAMS;
7086 eir_len = 0;
7087 break;
7088 }
7089 } else {
7090 status = MGMT_STATUS_NOT_POWERED;
7091 eir_len = 0;
7092 }
7093
7094 rp_len = sizeof(*rp) + eir_len;
7095 rp = kmalloc(rp_len, GFP_ATOMIC);
7096 if (!rp)
7097 return -ENOMEM;
7098
7099 if (status)
7100 goto complete;
7101
7102 hci_dev_lock(hdev);
7103
7104 eir_len = 0;
7105 switch (cp->type) {
7106 case BIT(BDADDR_BREDR):
7107 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7108 err = read_local_ssp_oob_req(hdev, sk, cp);
7109 hci_dev_unlock(hdev);
7110 if (!err)
7111 goto done;
7112
7113 status = MGMT_STATUS_FAILED;
7114 goto complete;
7115 } else {
7116 eir_len = eir_append_data(rp->eir, eir_len,
7117 EIR_CLASS_OF_DEV,
7118 hdev->dev_class, 3);
7119 }
7120 break;
7121 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7122 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7123 smp_generate_oob(hdev, hash, rand) < 0) {
7124 hci_dev_unlock(hdev);
7125 status = MGMT_STATUS_FAILED;
7126 goto complete;
7127 }
7128
7129 /* This should return the active RPA, but since the RPA
7130 * is only programmed on demand, it is really hard to fill
7131 * this in at the moment. For now disallow retrieving
7132 * local out-of-band data when privacy is in use.
7133 *
7134 * Returning the identity address will not help here since
7135 * pairing happens before the identity resolving key is
7136 * known and thus the connection establishment happens
7137 * based on the RPA and not the identity address.
7138 */
7139 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7140 hci_dev_unlock(hdev);
7141 status = MGMT_STATUS_REJECTED;
7142 goto complete;
7143 }
7144
7145 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7146 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7147 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7148 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7149 memcpy(addr, &hdev->static_addr, 6);
7150 addr[6] = 0x01;
7151 } else {
7152 memcpy(addr, &hdev->bdaddr, 6);
7153 addr[6] = 0x00;
7154 }
7155
7156 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7157 addr, sizeof(addr));
7158
7159 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7160 role = 0x02;
7161 else
7162 role = 0x01;
7163
7164 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7165 &role, sizeof(role));
7166
7167 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7168 eir_len = eir_append_data(rp->eir, eir_len,
7169 EIR_LE_SC_CONFIRM,
7170 hash, sizeof(hash));
7171
7172 eir_len = eir_append_data(rp->eir, eir_len,
7173 EIR_LE_SC_RANDOM,
7174 rand, sizeof(rand));
7175 }
7176
7177 flags = mgmt_get_adv_discov_flags(hdev);
7178
7179 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7180 flags |= LE_AD_NO_BREDR;
7181
7182 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7183 &flags, sizeof(flags));
7184 break;
7185 }
7186
7187 hci_dev_unlock(hdev);
7188
7189 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7190
7191 status = MGMT_STATUS_SUCCESS;
7192
7193 complete:
7194 rp->type = cp->type;
7195 rp->eir_len = cpu_to_le16(eir_len);
7196
7197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7198 status, rp, sizeof(*rp) + eir_len);
7199 if (err < 0 || status)
7200 goto done;
7201
7202 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7203 rp, sizeof(*rp) + eir_len,
7204 HCI_MGMT_OOB_DATA_EVENTS, sk);
7205
7206 done:
7207 kfree(rp);
7208
7209 return err;
7210 }
7211
get_supported_adv_flags(struct hci_dev * hdev)7212 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7213 {
7214 u32 flags = 0;
7215
7216 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7217 flags |= MGMT_ADV_FLAG_DISCOV;
7218 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7219 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7220 flags |= MGMT_ADV_FLAG_APPEARANCE;
7221 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7222
7223 /* In extended adv TX_POWER returned from Set Adv Param
7224 * will be always valid.
7225 */
7226 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7227 ext_adv_capable(hdev))
7228 flags |= MGMT_ADV_FLAG_TX_POWER;
7229
7230 if (ext_adv_capable(hdev)) {
7231 flags |= MGMT_ADV_FLAG_SEC_1M;
7232 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7233 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7234
7235 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7236 flags |= MGMT_ADV_FLAG_SEC_2M;
7237
7238 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7239 flags |= MGMT_ADV_FLAG_SEC_CODED;
7240 }
7241
7242 return flags;
7243 }
7244
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7245 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7246 void *data, u16 data_len)
7247 {
7248 struct mgmt_rp_read_adv_features *rp;
7249 size_t rp_len;
7250 int err;
7251 struct adv_info *adv_instance;
7252 u32 supported_flags;
7253 u8 *instance;
7254
7255 bt_dev_dbg(hdev, "sock %p", sk);
7256
7257 if (!lmp_le_capable(hdev))
7258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7259 MGMT_STATUS_REJECTED);
7260
7261 /* Enabling the experimental LL Privay support disables support for
7262 * advertising.
7263 */
7264 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7265 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7266 MGMT_STATUS_NOT_SUPPORTED);
7267
7268 hci_dev_lock(hdev);
7269
7270 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7271 rp = kmalloc(rp_len, GFP_ATOMIC);
7272 if (!rp) {
7273 hci_dev_unlock(hdev);
7274 return -ENOMEM;
7275 }
7276
7277 supported_flags = get_supported_adv_flags(hdev);
7278
7279 rp->supported_flags = cpu_to_le32(supported_flags);
7280 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7281 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7282 rp->max_instances = hdev->le_num_of_adv_sets;
7283 rp->num_instances = hdev->adv_instance_cnt;
7284
7285 instance = rp->instance;
7286 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7287 *instance = adv_instance->instance;
7288 instance++;
7289 }
7290
7291 hci_dev_unlock(hdev);
7292
7293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7294 MGMT_STATUS_SUCCESS, rp, rp_len);
7295
7296 kfree(rp);
7297
7298 return err;
7299 }
7300
calculate_name_len(struct hci_dev * hdev)7301 static u8 calculate_name_len(struct hci_dev *hdev)
7302 {
7303 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7304
7305 return append_local_name(hdev, buf, 0);
7306 }
7307
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7308 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7309 bool is_adv_data)
7310 {
7311 u8 max_len = HCI_MAX_AD_LENGTH;
7312
7313 if (is_adv_data) {
7314 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7315 MGMT_ADV_FLAG_LIMITED_DISCOV |
7316 MGMT_ADV_FLAG_MANAGED_FLAGS))
7317 max_len -= 3;
7318
7319 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7320 max_len -= 3;
7321 } else {
7322 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7323 max_len -= calculate_name_len(hdev);
7324
7325 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7326 max_len -= 4;
7327 }
7328
7329 return max_len;
7330 }
7331
flags_managed(u32 adv_flags)7332 static bool flags_managed(u32 adv_flags)
7333 {
7334 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7335 MGMT_ADV_FLAG_LIMITED_DISCOV |
7336 MGMT_ADV_FLAG_MANAGED_FLAGS);
7337 }
7338
tx_power_managed(u32 adv_flags)7339 static bool tx_power_managed(u32 adv_flags)
7340 {
7341 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7342 }
7343
name_managed(u32 adv_flags)7344 static bool name_managed(u32 adv_flags)
7345 {
7346 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7347 }
7348
appearance_managed(u32 adv_flags)7349 static bool appearance_managed(u32 adv_flags)
7350 {
7351 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7352 }
7353
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7354 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7355 u8 len, bool is_adv_data)
7356 {
7357 int i, cur_len;
7358 u8 max_len;
7359
7360 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7361
7362 if (len > max_len)
7363 return false;
7364
7365 /* Make sure that the data is correctly formatted. */
7366 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7367 cur_len = data[i];
7368
7369 if (!cur_len)
7370 continue;
7371
7372 if (data[i + 1] == EIR_FLAGS &&
7373 (!is_adv_data || flags_managed(adv_flags)))
7374 return false;
7375
7376 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7377 return false;
7378
7379 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7380 return false;
7381
7382 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7383 return false;
7384
7385 if (data[i + 1] == EIR_APPEARANCE &&
7386 appearance_managed(adv_flags))
7387 return false;
7388
7389 /* If the current field length would exceed the total data
7390 * length, then it's invalid.
7391 */
7392 if (i + cur_len >= len)
7393 return false;
7394 }
7395
7396 return true;
7397 }
7398
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7399 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7400 u16 opcode)
7401 {
7402 struct mgmt_pending_cmd *cmd;
7403 struct mgmt_cp_add_advertising *cp;
7404 struct mgmt_rp_add_advertising rp;
7405 struct adv_info *adv_instance, *n;
7406 u8 instance;
7407
7408 bt_dev_dbg(hdev, "status %d", status);
7409
7410 hci_dev_lock(hdev);
7411
7412 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7413
7414 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7415 if (!adv_instance->pending)
7416 continue;
7417
7418 if (!status) {
7419 adv_instance->pending = false;
7420 continue;
7421 }
7422
7423 instance = adv_instance->instance;
7424
7425 if (hdev->cur_adv_instance == instance)
7426 cancel_adv_timeout(hdev);
7427
7428 hci_remove_adv_instance(hdev, instance);
7429 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7430 }
7431
7432 if (!cmd)
7433 goto unlock;
7434
7435 cp = cmd->param;
7436 rp.instance = cp->instance;
7437
7438 if (status)
7439 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7440 mgmt_status(status));
7441 else
7442 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7443 mgmt_status(status), &rp, sizeof(rp));
7444
7445 mgmt_pending_remove(cmd);
7446
7447 unlock:
7448 hci_dev_unlock(hdev);
7449 }
7450
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7451 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7452 void *data, u16 data_len)
7453 {
7454 struct mgmt_cp_add_advertising *cp = data;
7455 struct mgmt_rp_add_advertising rp;
7456 u32 flags;
7457 u32 supported_flags, phy_flags;
7458 u8 status;
7459 u16 timeout, duration;
7460 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7461 u8 schedule_instance = 0;
7462 struct adv_info *next_instance;
7463 int err;
7464 struct mgmt_pending_cmd *cmd;
7465 struct hci_request req;
7466
7467 bt_dev_dbg(hdev, "sock %p", sk);
7468
7469 status = mgmt_le_support(hdev);
7470 if (status)
7471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7472 status);
7473
7474 /* Enabling the experimental LL Privay support disables support for
7475 * advertising.
7476 */
7477 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7479 MGMT_STATUS_NOT_SUPPORTED);
7480
7481 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7483 MGMT_STATUS_INVALID_PARAMS);
7484
7485 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7487 MGMT_STATUS_INVALID_PARAMS);
7488
7489 flags = __le32_to_cpu(cp->flags);
7490 timeout = __le16_to_cpu(cp->timeout);
7491 duration = __le16_to_cpu(cp->duration);
7492
7493 /* The current implementation only supports a subset of the specified
7494 * flags. Also need to check mutual exclusiveness of sec flags.
7495 */
7496 supported_flags = get_supported_adv_flags(hdev);
7497 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7498 if (flags & ~supported_flags ||
7499 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7501 MGMT_STATUS_INVALID_PARAMS);
7502
7503 hci_dev_lock(hdev);
7504
7505 if (timeout && !hdev_is_powered(hdev)) {
7506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7507 MGMT_STATUS_REJECTED);
7508 goto unlock;
7509 }
7510
7511 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7512 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7513 pending_find(MGMT_OP_SET_LE, hdev)) {
7514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7515 MGMT_STATUS_BUSY);
7516 goto unlock;
7517 }
7518
7519 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7520 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7521 cp->scan_rsp_len, false)) {
7522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 MGMT_STATUS_INVALID_PARAMS);
7524 goto unlock;
7525 }
7526
7527 err = hci_add_adv_instance(hdev, cp->instance, flags,
7528 cp->adv_data_len, cp->data,
7529 cp->scan_rsp_len,
7530 cp->data + cp->adv_data_len,
7531 timeout, duration);
7532 if (err < 0) {
7533 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7534 MGMT_STATUS_FAILED);
7535 goto unlock;
7536 }
7537
7538 /* Only trigger an advertising added event if a new instance was
7539 * actually added.
7540 */
7541 if (hdev->adv_instance_cnt > prev_instance_cnt)
7542 mgmt_advertising_added(sk, hdev, cp->instance);
7543
7544 if (hdev->cur_adv_instance == cp->instance) {
7545 /* If the currently advertised instance is being changed then
7546 * cancel the current advertising and schedule the next
7547 * instance. If there is only one instance then the overridden
7548 * advertising data will be visible right away.
7549 */
7550 cancel_adv_timeout(hdev);
7551
7552 next_instance = hci_get_next_instance(hdev, cp->instance);
7553 if (next_instance)
7554 schedule_instance = next_instance->instance;
7555 } else if (!hdev->adv_instance_timeout) {
7556 /* Immediately advertise the new instance if no other
7557 * instance is currently being advertised.
7558 */
7559 schedule_instance = cp->instance;
7560 }
7561
7562 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7563 * there is no instance to be advertised then we have no HCI
7564 * communication to make. Simply return.
7565 */
7566 if (!hdev_is_powered(hdev) ||
7567 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7568 !schedule_instance) {
7569 rp.instance = cp->instance;
7570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7571 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7572 goto unlock;
7573 }
7574
7575 /* We're good to go, update advertising data, parameters, and start
7576 * advertising.
7577 */
7578 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7579 data_len);
7580 if (!cmd) {
7581 err = -ENOMEM;
7582 goto unlock;
7583 }
7584
7585 hci_req_init(&req, hdev);
7586
7587 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7588
7589 if (!err)
7590 err = hci_req_run(&req, add_advertising_complete);
7591
7592 if (err < 0) {
7593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7594 MGMT_STATUS_FAILED);
7595 mgmt_pending_remove(cmd);
7596 }
7597
7598 unlock:
7599 hci_dev_unlock(hdev);
7600
7601 return err;
7602 }
7603
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7604 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7605 u16 opcode)
7606 {
7607 struct mgmt_pending_cmd *cmd;
7608 struct mgmt_cp_remove_advertising *cp;
7609 struct mgmt_rp_remove_advertising rp;
7610
7611 bt_dev_dbg(hdev, "status %d", status);
7612
7613 hci_dev_lock(hdev);
7614
7615 /* A failure status here only means that we failed to disable
7616 * advertising. Otherwise, the advertising instance has been removed,
7617 * so report success.
7618 */
7619 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7620 if (!cmd)
7621 goto unlock;
7622
7623 cp = cmd->param;
7624 rp.instance = cp->instance;
7625
7626 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7627 &rp, sizeof(rp));
7628 mgmt_pending_remove(cmd);
7629
7630 unlock:
7631 hci_dev_unlock(hdev);
7632 }
7633
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7634 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7635 void *data, u16 data_len)
7636 {
7637 struct mgmt_cp_remove_advertising *cp = data;
7638 struct mgmt_rp_remove_advertising rp;
7639 struct mgmt_pending_cmd *cmd;
7640 struct hci_request req;
7641 int err;
7642
7643 bt_dev_dbg(hdev, "sock %p", sk);
7644
7645 /* Enabling the experimental LL Privay support disables support for
7646 * advertising.
7647 */
7648 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7649 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7650 MGMT_STATUS_NOT_SUPPORTED);
7651
7652 hci_dev_lock(hdev);
7653
7654 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7655 err = mgmt_cmd_status(sk, hdev->id,
7656 MGMT_OP_REMOVE_ADVERTISING,
7657 MGMT_STATUS_INVALID_PARAMS);
7658 goto unlock;
7659 }
7660
7661 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7662 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7663 pending_find(MGMT_OP_SET_LE, hdev)) {
7664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7665 MGMT_STATUS_BUSY);
7666 goto unlock;
7667 }
7668
7669 if (list_empty(&hdev->adv_instances)) {
7670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7671 MGMT_STATUS_INVALID_PARAMS);
7672 goto unlock;
7673 }
7674
7675 hci_req_init(&req, hdev);
7676
7677 /* If we use extended advertising, instance is disabled and removed */
7678 if (ext_adv_capable(hdev)) {
7679 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7680 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7681 }
7682
7683 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7684
7685 if (list_empty(&hdev->adv_instances))
7686 __hci_req_disable_advertising(&req);
7687
7688 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7689 * flag is set or the device isn't powered then we have no HCI
7690 * communication to make. Simply return.
7691 */
7692 if (skb_queue_empty(&req.cmd_q) ||
7693 !hdev_is_powered(hdev) ||
7694 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7695 hci_req_purge(&req);
7696 rp.instance = cp->instance;
7697 err = mgmt_cmd_complete(sk, hdev->id,
7698 MGMT_OP_REMOVE_ADVERTISING,
7699 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7700 goto unlock;
7701 }
7702
7703 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7704 data_len);
7705 if (!cmd) {
7706 err = -ENOMEM;
7707 goto unlock;
7708 }
7709
7710 err = hci_req_run(&req, remove_advertising_complete);
7711 if (err < 0)
7712 mgmt_pending_remove(cmd);
7713
7714 unlock:
7715 hci_dev_unlock(hdev);
7716
7717 return err;
7718 }
7719
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7720 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7721 void *data, u16 data_len)
7722 {
7723 struct mgmt_cp_get_adv_size_info *cp = data;
7724 struct mgmt_rp_get_adv_size_info rp;
7725 u32 flags, supported_flags;
7726 int err;
7727
7728 bt_dev_dbg(hdev, "sock %p", sk);
7729
7730 if (!lmp_le_capable(hdev))
7731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7732 MGMT_STATUS_REJECTED);
7733
7734 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7736 MGMT_STATUS_INVALID_PARAMS);
7737
7738 flags = __le32_to_cpu(cp->flags);
7739
7740 /* The current implementation only supports a subset of the specified
7741 * flags.
7742 */
7743 supported_flags = get_supported_adv_flags(hdev);
7744 if (flags & ~supported_flags)
7745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7746 MGMT_STATUS_INVALID_PARAMS);
7747
7748 rp.instance = cp->instance;
7749 rp.flags = cp->flags;
7750 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7751 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7752
7753 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7754 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7755
7756 return err;
7757 }
7758
7759 static const struct hci_mgmt_handler mgmt_handlers[] = {
7760 { NULL }, /* 0x0000 (no command) */
7761 { read_version, MGMT_READ_VERSION_SIZE,
7762 HCI_MGMT_NO_HDEV |
7763 HCI_MGMT_UNTRUSTED },
7764 { read_commands, MGMT_READ_COMMANDS_SIZE,
7765 HCI_MGMT_NO_HDEV |
7766 HCI_MGMT_UNTRUSTED },
7767 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7768 HCI_MGMT_NO_HDEV |
7769 HCI_MGMT_UNTRUSTED },
7770 { read_controller_info, MGMT_READ_INFO_SIZE,
7771 HCI_MGMT_UNTRUSTED },
7772 { set_powered, MGMT_SETTING_SIZE },
7773 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7774 { set_connectable, MGMT_SETTING_SIZE },
7775 { set_fast_connectable, MGMT_SETTING_SIZE },
7776 { set_bondable, MGMT_SETTING_SIZE },
7777 { set_link_security, MGMT_SETTING_SIZE },
7778 { set_ssp, MGMT_SETTING_SIZE },
7779 { set_hs, MGMT_SETTING_SIZE },
7780 { set_le, MGMT_SETTING_SIZE },
7781 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7782 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7783 { add_uuid, MGMT_ADD_UUID_SIZE },
7784 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7785 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7786 HCI_MGMT_VAR_LEN },
7787 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7788 HCI_MGMT_VAR_LEN },
7789 { disconnect, MGMT_DISCONNECT_SIZE },
7790 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7791 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7792 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7793 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7794 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7795 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7796 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7797 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7798 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7799 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7800 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7801 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7802 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7803 HCI_MGMT_VAR_LEN },
7804 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7805 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7806 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7807 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7808 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7809 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7810 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7811 { set_advertising, MGMT_SETTING_SIZE },
7812 { set_bredr, MGMT_SETTING_SIZE },
7813 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7814 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7815 { set_secure_conn, MGMT_SETTING_SIZE },
7816 { set_debug_keys, MGMT_SETTING_SIZE },
7817 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7818 { load_irks, MGMT_LOAD_IRKS_SIZE,
7819 HCI_MGMT_VAR_LEN },
7820 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7821 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7822 { add_device, MGMT_ADD_DEVICE_SIZE },
7823 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7824 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7825 HCI_MGMT_VAR_LEN },
7826 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7827 HCI_MGMT_NO_HDEV |
7828 HCI_MGMT_UNTRUSTED },
7829 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7830 HCI_MGMT_UNCONFIGURED |
7831 HCI_MGMT_UNTRUSTED },
7832 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7833 HCI_MGMT_UNCONFIGURED },
7834 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7835 HCI_MGMT_UNCONFIGURED },
7836 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7837 HCI_MGMT_VAR_LEN },
7838 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7839 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7840 HCI_MGMT_NO_HDEV |
7841 HCI_MGMT_UNTRUSTED },
7842 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7843 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7844 HCI_MGMT_VAR_LEN },
7845 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7846 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7847 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7848 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7849 HCI_MGMT_UNTRUSTED },
7850 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7851 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7852 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7853 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7854 HCI_MGMT_VAR_LEN },
7855 { set_wideband_speech, MGMT_SETTING_SIZE },
7856 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7857 HCI_MGMT_UNTRUSTED },
7858 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7859 HCI_MGMT_UNTRUSTED |
7860 HCI_MGMT_HDEV_OPTIONAL },
7861 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7862 HCI_MGMT_VAR_LEN |
7863 HCI_MGMT_HDEV_OPTIONAL },
7864 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7865 HCI_MGMT_UNTRUSTED },
7866 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7867 HCI_MGMT_VAR_LEN },
7868 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7869 HCI_MGMT_UNTRUSTED },
7870 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7871 HCI_MGMT_VAR_LEN },
7872 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7873 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7874 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7875 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7876 HCI_MGMT_VAR_LEN },
7877 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7878 };
7879
mgmt_index_added(struct hci_dev * hdev)7880 void mgmt_index_added(struct hci_dev *hdev)
7881 {
7882 struct mgmt_ev_ext_index ev;
7883
7884 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7885 return;
7886
7887 switch (hdev->dev_type) {
7888 case HCI_PRIMARY:
7889 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7890 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7891 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7892 ev.type = 0x01;
7893 } else {
7894 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7895 HCI_MGMT_INDEX_EVENTS);
7896 ev.type = 0x00;
7897 }
7898 break;
7899 case HCI_AMP:
7900 ev.type = 0x02;
7901 break;
7902 default:
7903 return;
7904 }
7905
7906 ev.bus = hdev->bus;
7907
7908 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7909 HCI_MGMT_EXT_INDEX_EVENTS);
7910 }
7911
mgmt_index_removed(struct hci_dev * hdev)7912 void mgmt_index_removed(struct hci_dev *hdev)
7913 {
7914 struct mgmt_ev_ext_index ev;
7915 u8 status = MGMT_STATUS_INVALID_INDEX;
7916
7917 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7918 return;
7919
7920 switch (hdev->dev_type) {
7921 case HCI_PRIMARY:
7922 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7923
7924 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7925 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7926 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7927 ev.type = 0x01;
7928 } else {
7929 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7930 HCI_MGMT_INDEX_EVENTS);
7931 ev.type = 0x00;
7932 }
7933 break;
7934 case HCI_AMP:
7935 ev.type = 0x02;
7936 break;
7937 default:
7938 return;
7939 }
7940
7941 ev.bus = hdev->bus;
7942
7943 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7944 HCI_MGMT_EXT_INDEX_EVENTS);
7945 }
7946
7947 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)7948 static void restart_le_actions(struct hci_dev *hdev)
7949 {
7950 struct hci_conn_params *p;
7951
7952 list_for_each_entry(p, &hdev->le_conn_params, list) {
7953 /* Needed for AUTO_OFF case where might not "really"
7954 * have been powered off.
7955 */
7956 list_del_init(&p->action);
7957
7958 switch (p->auto_connect) {
7959 case HCI_AUTO_CONN_DIRECT:
7960 case HCI_AUTO_CONN_ALWAYS:
7961 list_add(&p->action, &hdev->pend_le_conns);
7962 break;
7963 case HCI_AUTO_CONN_REPORT:
7964 list_add(&p->action, &hdev->pend_le_reports);
7965 break;
7966 default:
7967 break;
7968 }
7969 }
7970 }
7971
mgmt_power_on(struct hci_dev * hdev,int err)7972 void mgmt_power_on(struct hci_dev *hdev, int err)
7973 {
7974 struct cmd_lookup match = { NULL, hdev };
7975
7976 bt_dev_dbg(hdev, "err %d", err);
7977
7978 hci_dev_lock(hdev);
7979
7980 if (!err) {
7981 restart_le_actions(hdev);
7982 hci_update_background_scan(hdev);
7983 }
7984
7985 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7986
7987 new_settings(hdev, match.sk);
7988
7989 if (match.sk)
7990 sock_put(match.sk);
7991
7992 hci_dev_unlock(hdev);
7993 }
7994
__mgmt_power_off(struct hci_dev * hdev)7995 void __mgmt_power_off(struct hci_dev *hdev)
7996 {
7997 struct cmd_lookup match = { NULL, hdev };
7998 u8 status, zero_cod[] = { 0, 0, 0 };
7999
8000 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8001
8002 /* If the power off is because of hdev unregistration let
8003 * use the appropriate INVALID_INDEX status. Otherwise use
8004 * NOT_POWERED. We cover both scenarios here since later in
8005 * mgmt_index_removed() any hci_conn callbacks will have already
8006 * been triggered, potentially causing misleading DISCONNECTED
8007 * status responses.
8008 */
8009 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8010 status = MGMT_STATUS_INVALID_INDEX;
8011 else
8012 status = MGMT_STATUS_NOT_POWERED;
8013
8014 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8015
8016 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8017 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8018 zero_cod, sizeof(zero_cod),
8019 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8020 ext_info_changed(hdev, NULL);
8021 }
8022
8023 new_settings(hdev, match.sk);
8024
8025 if (match.sk)
8026 sock_put(match.sk);
8027 }
8028
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8029 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8030 {
8031 struct mgmt_pending_cmd *cmd;
8032 u8 status;
8033
8034 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8035 if (!cmd)
8036 return;
8037
8038 if (err == -ERFKILL)
8039 status = MGMT_STATUS_RFKILLED;
8040 else
8041 status = MGMT_STATUS_FAILED;
8042
8043 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8044
8045 mgmt_pending_remove(cmd);
8046 }
8047
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8048 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8049 bool persistent)
8050 {
8051 struct mgmt_ev_new_link_key ev;
8052
8053 memset(&ev, 0, sizeof(ev));
8054
8055 ev.store_hint = persistent;
8056 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8057 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8058 ev.key.type = key->type;
8059 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8060 ev.key.pin_len = key->pin_len;
8061
8062 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8063 }
8064
mgmt_ltk_type(struct smp_ltk * ltk)8065 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8066 {
8067 switch (ltk->type) {
8068 case SMP_LTK:
8069 case SMP_LTK_RESPONDER:
8070 if (ltk->authenticated)
8071 return MGMT_LTK_AUTHENTICATED;
8072 return MGMT_LTK_UNAUTHENTICATED;
8073 case SMP_LTK_P256:
8074 if (ltk->authenticated)
8075 return MGMT_LTK_P256_AUTH;
8076 return MGMT_LTK_P256_UNAUTH;
8077 case SMP_LTK_P256_DEBUG:
8078 return MGMT_LTK_P256_DEBUG;
8079 }
8080
8081 return MGMT_LTK_UNAUTHENTICATED;
8082 }
8083
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8084 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8085 {
8086 struct mgmt_ev_new_long_term_key ev;
8087
8088 memset(&ev, 0, sizeof(ev));
8089
8090 /* Devices using resolvable or non-resolvable random addresses
8091 * without providing an identity resolving key don't require
8092 * to store long term keys. Their addresses will change the
8093 * next time around.
8094 *
8095 * Only when a remote device provides an identity address
8096 * make sure the long term key is stored. If the remote
8097 * identity is known, the long term keys are internally
8098 * mapped to the identity address. So allow static random
8099 * and public addresses here.
8100 */
8101 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8102 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8103 ev.store_hint = 0x00;
8104 else
8105 ev.store_hint = persistent;
8106
8107 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8108 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8109 ev.key.type = mgmt_ltk_type(key);
8110 ev.key.enc_size = key->enc_size;
8111 ev.key.ediv = key->ediv;
8112 ev.key.rand = key->rand;
8113
8114 if (key->type == SMP_LTK)
8115 ev.key.initiator = 1;
8116
8117 /* Make sure we copy only the significant bytes based on the
8118 * encryption key size, and set the rest of the value to zeroes.
8119 */
8120 memcpy(ev.key.val, key->val, key->enc_size);
8121 memset(ev.key.val + key->enc_size, 0,
8122 sizeof(ev.key.val) - key->enc_size);
8123
8124 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8125 }
8126
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)8127 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8128 {
8129 struct mgmt_ev_new_irk ev;
8130
8131 memset(&ev, 0, sizeof(ev));
8132
8133 ev.store_hint = persistent;
8134
8135 bacpy(&ev.rpa, &irk->rpa);
8136 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8137 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
8138 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8139
8140 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8141 }
8142
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)8143 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8144 bool persistent)
8145 {
8146 struct mgmt_ev_new_csrk ev;
8147
8148 memset(&ev, 0, sizeof(ev));
8149
8150 /* Devices using resolvable or non-resolvable random addresses
8151 * without providing an identity resolving key don't require
8152 * to store signature resolving keys. Their addresses will change
8153 * the next time around.
8154 *
8155 * Only when a remote device provides an identity address
8156 * make sure the signature resolving key is stored. So allow
8157 * static random and public addresses here.
8158 */
8159 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8160 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8161 ev.store_hint = 0x00;
8162 else
8163 ev.store_hint = persistent;
8164
8165 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8166 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
8167 ev.key.type = csrk->type;
8168 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8169
8170 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8171 }
8172
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)8173 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8174 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8175 u16 max_interval, u16 latency, u16 timeout)
8176 {
8177 struct mgmt_ev_new_conn_param ev;
8178
8179 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8180 return;
8181
8182 memset(&ev, 0, sizeof(ev));
8183 bacpy(&ev.addr.bdaddr, bdaddr);
8184 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8185 ev.store_hint = store_hint;
8186 ev.min_interval = cpu_to_le16(min_interval);
8187 ev.max_interval = cpu_to_le16(max_interval);
8188 ev.latency = cpu_to_le16(latency);
8189 ev.timeout = cpu_to_le16(timeout);
8190
8191 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8192 }
8193
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u32 flags,u8 * name,u8 name_len)8194 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8195 u32 flags, u8 *name, u8 name_len)
8196 {
8197 char buf[512];
8198 struct mgmt_ev_device_connected *ev = (void *) buf;
8199 u16 eir_len = 0;
8200
8201 bacpy(&ev->addr.bdaddr, &conn->dst);
8202 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8203
8204 ev->flags = __cpu_to_le32(flags);
8205
8206 /* We must ensure that the EIR Data fields are ordered and
8207 * unique. Keep it simple for now and avoid the problem by not
8208 * adding any BR/EDR data to the LE adv.
8209 */
8210 if (conn->le_adv_data_len > 0) {
8211 memcpy(&ev->eir[eir_len],
8212 conn->le_adv_data, conn->le_adv_data_len);
8213 eir_len = conn->le_adv_data_len;
8214 } else {
8215 if (name_len > 0)
8216 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8217 name, name_len);
8218
8219 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8220 eir_len = eir_append_data(ev->eir, eir_len,
8221 EIR_CLASS_OF_DEV,
8222 conn->dev_class, 3);
8223 }
8224
8225 ev->eir_len = cpu_to_le16(eir_len);
8226
8227 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8228 sizeof(*ev) + eir_len, NULL);
8229 }
8230
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)8231 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8232 {
8233 struct sock **sk = data;
8234
8235 cmd->cmd_complete(cmd, 0);
8236
8237 *sk = cmd->sk;
8238 sock_hold(*sk);
8239
8240 mgmt_pending_remove(cmd);
8241 }
8242
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)8243 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8244 {
8245 struct hci_dev *hdev = data;
8246 struct mgmt_cp_unpair_device *cp = cmd->param;
8247
8248 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8249
8250 cmd->cmd_complete(cmd, 0);
8251 mgmt_pending_remove(cmd);
8252 }
8253
mgmt_powering_down(struct hci_dev * hdev)8254 bool mgmt_powering_down(struct hci_dev *hdev)
8255 {
8256 struct mgmt_pending_cmd *cmd;
8257 struct mgmt_mode *cp;
8258
8259 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8260 if (!cmd)
8261 return false;
8262
8263 cp = cmd->param;
8264 if (!cp->val)
8265 return true;
8266
8267 return false;
8268 }
8269
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)8270 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8271 u8 link_type, u8 addr_type, u8 reason,
8272 bool mgmt_connected)
8273 {
8274 struct mgmt_ev_device_disconnected ev;
8275 struct sock *sk = NULL;
8276
8277 /* The connection is still in hci_conn_hash so test for 1
8278 * instead of 0 to know if this is the last one.
8279 */
8280 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8281 cancel_delayed_work(&hdev->power_off);
8282 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8283 }
8284
8285 if (!mgmt_connected)
8286 return;
8287
8288 if (link_type != ACL_LINK && link_type != LE_LINK)
8289 return;
8290
8291 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8292
8293 bacpy(&ev.addr.bdaddr, bdaddr);
8294 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8295 ev.reason = reason;
8296
8297 /* Report disconnects due to suspend */
8298 if (hdev->suspended)
8299 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8300
8301 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8302
8303 if (sk)
8304 sock_put(sk);
8305
8306 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8307 hdev);
8308 }
8309
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8310 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8311 u8 link_type, u8 addr_type, u8 status)
8312 {
8313 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8314 struct mgmt_cp_disconnect *cp;
8315 struct mgmt_pending_cmd *cmd;
8316
8317 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8318 hdev);
8319
8320 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8321 if (!cmd)
8322 return;
8323
8324 cp = cmd->param;
8325
8326 if (bacmp(bdaddr, &cp->addr.bdaddr))
8327 return;
8328
8329 if (cp->addr.type != bdaddr_type)
8330 return;
8331
8332 cmd->cmd_complete(cmd, mgmt_status(status));
8333 mgmt_pending_remove(cmd);
8334 }
8335
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8336 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8337 u8 addr_type, u8 status)
8338 {
8339 struct mgmt_ev_connect_failed ev;
8340
8341 /* The connection is still in hci_conn_hash so test for 1
8342 * instead of 0 to know if this is the last one.
8343 */
8344 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8345 cancel_delayed_work(&hdev->power_off);
8346 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8347 }
8348
8349 bacpy(&ev.addr.bdaddr, bdaddr);
8350 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8351 ev.status = mgmt_status(status);
8352
8353 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8354 }
8355
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8356 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8357 {
8358 struct mgmt_ev_pin_code_request ev;
8359
8360 bacpy(&ev.addr.bdaddr, bdaddr);
8361 ev.addr.type = BDADDR_BREDR;
8362 ev.secure = secure;
8363
8364 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8365 }
8366
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8367 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8368 u8 status)
8369 {
8370 struct mgmt_pending_cmd *cmd;
8371
8372 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8373 if (!cmd)
8374 return;
8375
8376 cmd->cmd_complete(cmd, mgmt_status(status));
8377 mgmt_pending_remove(cmd);
8378 }
8379
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8380 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8381 u8 status)
8382 {
8383 struct mgmt_pending_cmd *cmd;
8384
8385 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8386 if (!cmd)
8387 return;
8388
8389 cmd->cmd_complete(cmd, mgmt_status(status));
8390 mgmt_pending_remove(cmd);
8391 }
8392
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)8393 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8394 u8 link_type, u8 addr_type, u32 value,
8395 u8 confirm_hint)
8396 {
8397 struct mgmt_ev_user_confirm_request ev;
8398
8399 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8400
8401 bacpy(&ev.addr.bdaddr, bdaddr);
8402 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8403 ev.confirm_hint = confirm_hint;
8404 ev.value = cpu_to_le32(value);
8405
8406 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8407 NULL);
8408 }
8409
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)8410 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8411 u8 link_type, u8 addr_type)
8412 {
8413 struct mgmt_ev_user_passkey_request ev;
8414
8415 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8416
8417 bacpy(&ev.addr.bdaddr, bdaddr);
8418 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8419
8420 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8421 NULL);
8422 }
8423
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)8424 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8425 u8 link_type, u8 addr_type, u8 status,
8426 u8 opcode)
8427 {
8428 struct mgmt_pending_cmd *cmd;
8429
8430 cmd = pending_find(opcode, hdev);
8431 if (!cmd)
8432 return -ENOENT;
8433
8434 cmd->cmd_complete(cmd, mgmt_status(status));
8435 mgmt_pending_remove(cmd);
8436
8437 return 0;
8438 }
8439
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8440 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8441 u8 link_type, u8 addr_type, u8 status)
8442 {
8443 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8444 status, MGMT_OP_USER_CONFIRM_REPLY);
8445 }
8446
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8447 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8448 u8 link_type, u8 addr_type, u8 status)
8449 {
8450 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8451 status,
8452 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8453 }
8454
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8455 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8456 u8 link_type, u8 addr_type, u8 status)
8457 {
8458 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8459 status, MGMT_OP_USER_PASSKEY_REPLY);
8460 }
8461
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8462 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8463 u8 link_type, u8 addr_type, u8 status)
8464 {
8465 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8466 status,
8467 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8468 }
8469
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)8470 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8471 u8 link_type, u8 addr_type, u32 passkey,
8472 u8 entered)
8473 {
8474 struct mgmt_ev_passkey_notify ev;
8475
8476 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8477
8478 bacpy(&ev.addr.bdaddr, bdaddr);
8479 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8480 ev.passkey = __cpu_to_le32(passkey);
8481 ev.entered = entered;
8482
8483 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8484 }
8485
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)8486 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8487 {
8488 struct mgmt_ev_auth_failed ev;
8489 struct mgmt_pending_cmd *cmd;
8490 u8 status = mgmt_status(hci_status);
8491
8492 bacpy(&ev.addr.bdaddr, &conn->dst);
8493 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8494 ev.status = status;
8495
8496 cmd = find_pairing(conn);
8497
8498 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8499 cmd ? cmd->sk : NULL);
8500
8501 if (cmd) {
8502 cmd->cmd_complete(cmd, status);
8503 mgmt_pending_remove(cmd);
8504 }
8505 }
8506
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8507 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8508 {
8509 struct cmd_lookup match = { NULL, hdev };
8510 bool changed;
8511
8512 if (status) {
8513 u8 mgmt_err = mgmt_status(status);
8514 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8515 cmd_status_rsp, &mgmt_err);
8516 return;
8517 }
8518
8519 if (test_bit(HCI_AUTH, &hdev->flags))
8520 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8521 else
8522 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8523
8524 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8525 &match);
8526
8527 if (changed)
8528 new_settings(hdev, match.sk);
8529
8530 if (match.sk)
8531 sock_put(match.sk);
8532 }
8533
clear_eir(struct hci_request * req)8534 static void clear_eir(struct hci_request *req)
8535 {
8536 struct hci_dev *hdev = req->hdev;
8537 struct hci_cp_write_eir cp;
8538
8539 if (!lmp_ext_inq_capable(hdev))
8540 return;
8541
8542 memset(hdev->eir, 0, sizeof(hdev->eir));
8543
8544 memset(&cp, 0, sizeof(cp));
8545
8546 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8547 }
8548
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8549 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8550 {
8551 struct cmd_lookup match = { NULL, hdev };
8552 struct hci_request req;
8553 bool changed = false;
8554
8555 if (status) {
8556 u8 mgmt_err = mgmt_status(status);
8557
8558 if (enable && hci_dev_test_and_clear_flag(hdev,
8559 HCI_SSP_ENABLED)) {
8560 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8561 new_settings(hdev, NULL);
8562 }
8563
8564 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8565 &mgmt_err);
8566 return;
8567 }
8568
8569 if (enable) {
8570 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8571 } else {
8572 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8573 if (!changed)
8574 changed = hci_dev_test_and_clear_flag(hdev,
8575 HCI_HS_ENABLED);
8576 else
8577 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8578 }
8579
8580 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8581
8582 if (changed)
8583 new_settings(hdev, match.sk);
8584
8585 if (match.sk)
8586 sock_put(match.sk);
8587
8588 hci_req_init(&req, hdev);
8589
8590 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8591 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8592 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8593 sizeof(enable), &enable);
8594 __hci_req_update_eir(&req);
8595 } else {
8596 clear_eir(&req);
8597 }
8598
8599 hci_req_run(&req, NULL);
8600 }
8601
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8602 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8603 {
8604 struct cmd_lookup *match = data;
8605
8606 if (match->sk == NULL) {
8607 match->sk = cmd->sk;
8608 sock_hold(match->sk);
8609 }
8610 }
8611
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8612 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8613 u8 status)
8614 {
8615 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8616
8617 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8618 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8619 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8620
8621 if (!status) {
8622 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8623 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8624 ext_info_changed(hdev, NULL);
8625 }
8626
8627 if (match.sk)
8628 sock_put(match.sk);
8629 }
8630
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8631 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8632 {
8633 struct mgmt_cp_set_local_name ev;
8634 struct mgmt_pending_cmd *cmd;
8635
8636 if (status)
8637 return;
8638
8639 memset(&ev, 0, sizeof(ev));
8640 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8641 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8642
8643 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8644 if (!cmd) {
8645 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8646
8647 /* If this is a HCI command related to powering on the
8648 * HCI dev don't send any mgmt signals.
8649 */
8650 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8651 return;
8652 }
8653
8654 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8655 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8656 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8657 }
8658
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8659 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8660 {
8661 int i;
8662
8663 for (i = 0; i < uuid_count; i++) {
8664 if (!memcmp(uuid, uuids[i], 16))
8665 return true;
8666 }
8667
8668 return false;
8669 }
8670
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8671 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8672 {
8673 u16 parsed = 0;
8674
8675 while (parsed < eir_len) {
8676 u8 field_len = eir[0];
8677 u8 uuid[16];
8678 int i;
8679
8680 if (field_len == 0)
8681 break;
8682
8683 if (eir_len - parsed < field_len + 1)
8684 break;
8685
8686 switch (eir[1]) {
8687 case EIR_UUID16_ALL:
8688 case EIR_UUID16_SOME:
8689 for (i = 0; i + 3 <= field_len; i += 2) {
8690 memcpy(uuid, bluetooth_base_uuid, 16);
8691 uuid[13] = eir[i + 3];
8692 uuid[12] = eir[i + 2];
8693 if (has_uuid(uuid, uuid_count, uuids))
8694 return true;
8695 }
8696 break;
8697 case EIR_UUID32_ALL:
8698 case EIR_UUID32_SOME:
8699 for (i = 0; i + 5 <= field_len; i += 4) {
8700 memcpy(uuid, bluetooth_base_uuid, 16);
8701 uuid[15] = eir[i + 5];
8702 uuid[14] = eir[i + 4];
8703 uuid[13] = eir[i + 3];
8704 uuid[12] = eir[i + 2];
8705 if (has_uuid(uuid, uuid_count, uuids))
8706 return true;
8707 }
8708 break;
8709 case EIR_UUID128_ALL:
8710 case EIR_UUID128_SOME:
8711 for (i = 0; i + 17 <= field_len; i += 16) {
8712 memcpy(uuid, eir + i + 2, 16);
8713 if (has_uuid(uuid, uuid_count, uuids))
8714 return true;
8715 }
8716 break;
8717 }
8718
8719 parsed += field_len + 1;
8720 eir += field_len + 1;
8721 }
8722
8723 return false;
8724 }
8725
restart_le_scan(struct hci_dev * hdev)8726 static void restart_le_scan(struct hci_dev *hdev)
8727 {
8728 /* If controller is not scanning we are done. */
8729 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8730 return;
8731
8732 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8733 hdev->discovery.scan_start +
8734 hdev->discovery.scan_duration))
8735 return;
8736
8737 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8738 DISCOV_LE_RESTART_DELAY);
8739 }
8740
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8741 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8742 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8743 {
8744 /* If a RSSI threshold has been specified, and
8745 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8746 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8747 * is set, let it through for further processing, as we might need to
8748 * restart the scan.
8749 *
8750 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8751 * the results are also dropped.
8752 */
8753 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8754 (rssi == HCI_RSSI_INVALID ||
8755 (rssi < hdev->discovery.rssi &&
8756 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8757 return false;
8758
8759 if (hdev->discovery.uuid_count != 0) {
8760 /* If a list of UUIDs is provided in filter, results with no
8761 * matching UUID should be dropped.
8762 */
8763 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8764 hdev->discovery.uuids) &&
8765 !eir_has_uuids(scan_rsp, scan_rsp_len,
8766 hdev->discovery.uuid_count,
8767 hdev->discovery.uuids))
8768 return false;
8769 }
8770
8771 /* If duplicate filtering does not report RSSI changes, then restart
8772 * scanning to ensure updated result with updated RSSI values.
8773 */
8774 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8775 restart_le_scan(hdev);
8776
8777 /* Validate RSSI value against the RSSI threshold once more. */
8778 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8779 rssi < hdev->discovery.rssi)
8780 return false;
8781 }
8782
8783 return true;
8784 }
8785
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8786 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8787 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8788 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8789 {
8790 char buf[512];
8791 struct mgmt_ev_device_found *ev = (void *)buf;
8792 size_t ev_size;
8793
8794 /* Don't send events for a non-kernel initiated discovery. With
8795 * LE one exception is if we have pend_le_reports > 0 in which
8796 * case we're doing passive scanning and want these events.
8797 */
8798 if (!hci_discovery_active(hdev)) {
8799 if (link_type == ACL_LINK)
8800 return;
8801 if (link_type == LE_LINK &&
8802 list_empty(&hdev->pend_le_reports) &&
8803 !hci_is_adv_monitoring(hdev)) {
8804 return;
8805 }
8806 }
8807
8808 if (hdev->discovery.result_filtering) {
8809 /* We are using service discovery */
8810 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8811 scan_rsp_len))
8812 return;
8813 }
8814
8815 if (hdev->discovery.limited) {
8816 /* Check for limited discoverable bit */
8817 if (dev_class) {
8818 if (!(dev_class[1] & 0x20))
8819 return;
8820 } else {
8821 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8822 if (!flags || !(flags[0] & LE_AD_LIMITED))
8823 return;
8824 }
8825 }
8826
8827 /* Make sure that the buffer is big enough. The 5 extra bytes
8828 * are for the potential CoD field.
8829 */
8830 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8831 return;
8832
8833 memset(buf, 0, sizeof(buf));
8834
8835 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8836 * RSSI value was reported as 0 when not available. This behavior
8837 * is kept when using device discovery. This is required for full
8838 * backwards compatibility with the API.
8839 *
8840 * However when using service discovery, the value 127 will be
8841 * returned when the RSSI is not available.
8842 */
8843 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8844 link_type == ACL_LINK)
8845 rssi = 0;
8846
8847 bacpy(&ev->addr.bdaddr, bdaddr);
8848 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8849 ev->rssi = rssi;
8850 ev->flags = cpu_to_le32(flags);
8851
8852 if (eir_len > 0)
8853 /* Copy EIR or advertising data into event */
8854 memcpy(ev->eir, eir, eir_len);
8855
8856 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8857 NULL))
8858 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8859 dev_class, 3);
8860
8861 if (scan_rsp_len > 0)
8862 /* Append scan response data to event */
8863 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8864
8865 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8866 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8867
8868 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8869 }
8870
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8871 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8872 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8873 {
8874 struct mgmt_ev_device_found *ev;
8875 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8876 u16 eir_len;
8877
8878 ev = (struct mgmt_ev_device_found *) buf;
8879
8880 memset(buf, 0, sizeof(buf));
8881
8882 bacpy(&ev->addr.bdaddr, bdaddr);
8883 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8884 ev->rssi = rssi;
8885
8886 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8887 name_len);
8888
8889 ev->eir_len = cpu_to_le16(eir_len);
8890
8891 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8892 }
8893
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8894 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8895 {
8896 struct mgmt_ev_discovering ev;
8897
8898 bt_dev_dbg(hdev, "discovering %u", discovering);
8899
8900 memset(&ev, 0, sizeof(ev));
8901 ev.type = hdev->discovery.type;
8902 ev.discovering = discovering;
8903
8904 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8905 }
8906
mgmt_suspending(struct hci_dev * hdev,u8 state)8907 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8908 {
8909 struct mgmt_ev_controller_suspend ev;
8910
8911 ev.suspend_state = state;
8912 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8913 }
8914
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)8915 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8916 u8 addr_type)
8917 {
8918 struct mgmt_ev_controller_resume ev;
8919
8920 ev.wake_reason = reason;
8921 if (bdaddr) {
8922 bacpy(&ev.addr.bdaddr, bdaddr);
8923 ev.addr.type = addr_type;
8924 } else {
8925 memset(&ev.addr, 0, sizeof(ev.addr));
8926 }
8927
8928 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8929 }
8930
8931 static struct hci_mgmt_chan chan = {
8932 .channel = HCI_CHANNEL_CONTROL,
8933 .handler_count = ARRAY_SIZE(mgmt_handlers),
8934 .handlers = mgmt_handlers,
8935 .hdev_init = mgmt_init_hdev,
8936 };
8937
mgmt_init(void)8938 int mgmt_init(void)
8939 {
8940 return hci_mgmt_chan_register(&chan);
8941 }
8942
mgmt_exit(void)8943 void mgmt_exit(void)
8944 {
8945 hci_mgmt_chan_unregister(&chan);
8946 }
8947