1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 21
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_GET_PHY_CONFIGURATION,
112 MGMT_OP_SET_PHY_CONFIGURATION,
113 MGMT_OP_SET_BLOCKED_KEYS,
114 MGMT_OP_SET_WIDEBAND_SPEECH,
115 MGMT_OP_READ_CONTROLLER_CAP,
116 MGMT_OP_READ_EXP_FEATURES_INFO,
117 MGMT_OP_SET_EXP_FEATURE,
118 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
119 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
120 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
121 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
122 MGMT_OP_GET_DEVICE_FLAGS,
123 MGMT_OP_SET_DEVICE_FLAGS,
124 MGMT_OP_READ_ADV_MONITOR_FEATURES,
125 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
126 MGMT_OP_REMOVE_ADV_MONITOR,
127 MGMT_OP_ADD_EXT_ADV_PARAMS,
128 MGMT_OP_ADD_EXT_ADV_DATA,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
130 };
131
132 static const u16 mgmt_events[] = {
133 MGMT_EV_CONTROLLER_ERROR,
134 MGMT_EV_INDEX_ADDED,
135 MGMT_EV_INDEX_REMOVED,
136 MGMT_EV_NEW_SETTINGS,
137 MGMT_EV_CLASS_OF_DEV_CHANGED,
138 MGMT_EV_LOCAL_NAME_CHANGED,
139 MGMT_EV_NEW_LINK_KEY,
140 MGMT_EV_NEW_LONG_TERM_KEY,
141 MGMT_EV_DEVICE_CONNECTED,
142 MGMT_EV_DEVICE_DISCONNECTED,
143 MGMT_EV_CONNECT_FAILED,
144 MGMT_EV_PIN_CODE_REQUEST,
145 MGMT_EV_USER_CONFIRM_REQUEST,
146 MGMT_EV_USER_PASSKEY_REQUEST,
147 MGMT_EV_AUTH_FAILED,
148 MGMT_EV_DEVICE_FOUND,
149 MGMT_EV_DISCOVERING,
150 MGMT_EV_DEVICE_BLOCKED,
151 MGMT_EV_DEVICE_UNBLOCKED,
152 MGMT_EV_DEVICE_UNPAIRED,
153 MGMT_EV_PASSKEY_NOTIFY,
154 MGMT_EV_NEW_IRK,
155 MGMT_EV_NEW_CSRK,
156 MGMT_EV_DEVICE_ADDED,
157 MGMT_EV_DEVICE_REMOVED,
158 MGMT_EV_NEW_CONN_PARAM,
159 MGMT_EV_UNCONF_INDEX_ADDED,
160 MGMT_EV_UNCONF_INDEX_REMOVED,
161 MGMT_EV_NEW_CONFIG_OPTIONS,
162 MGMT_EV_EXT_INDEX_ADDED,
163 MGMT_EV_EXT_INDEX_REMOVED,
164 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
165 MGMT_EV_ADVERTISING_ADDED,
166 MGMT_EV_ADVERTISING_REMOVED,
167 MGMT_EV_EXT_INFO_CHANGED,
168 MGMT_EV_PHY_CONFIGURATION_CHANGED,
169 MGMT_EV_EXP_FEATURE_CHANGED,
170 MGMT_EV_DEVICE_FLAGS_CHANGED,
171 MGMT_EV_ADV_MONITOR_ADDED,
172 MGMT_EV_ADV_MONITOR_REMOVED,
173 MGMT_EV_CONTROLLER_SUSPEND,
174 MGMT_EV_CONTROLLER_RESUME,
175 };
176
177 static const u16 mgmt_untrusted_commands[] = {
178 MGMT_OP_READ_INDEX_LIST,
179 MGMT_OP_READ_INFO,
180 MGMT_OP_READ_UNCONF_INDEX_LIST,
181 MGMT_OP_READ_CONFIG_INFO,
182 MGMT_OP_READ_EXT_INDEX_LIST,
183 MGMT_OP_READ_EXT_INFO,
184 MGMT_OP_READ_CONTROLLER_CAP,
185 MGMT_OP_READ_EXP_FEATURES_INFO,
186 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
187 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
188 };
189
190 static const u16 mgmt_untrusted_events[] = {
191 MGMT_EV_INDEX_ADDED,
192 MGMT_EV_INDEX_REMOVED,
193 MGMT_EV_NEW_SETTINGS,
194 MGMT_EV_CLASS_OF_DEV_CHANGED,
195 MGMT_EV_LOCAL_NAME_CHANGED,
196 MGMT_EV_UNCONF_INDEX_ADDED,
197 MGMT_EV_UNCONF_INDEX_REMOVED,
198 MGMT_EV_NEW_CONFIG_OPTIONS,
199 MGMT_EV_EXT_INDEX_ADDED,
200 MGMT_EV_EXT_INDEX_REMOVED,
201 MGMT_EV_EXT_INFO_CHANGED,
202 MGMT_EV_EXP_FEATURE_CHANGED,
203 };
204
205 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
206
207 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
208 "\x00\x00\x00\x00\x00\x00\x00\x00"
209
210 /* HCI to MGMT error code conversion table */
211 static const u8 mgmt_status_table[] = {
212 MGMT_STATUS_SUCCESS,
213 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
214 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
215 MGMT_STATUS_FAILED, /* Hardware Failure */
216 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
217 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
218 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
219 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
220 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
221 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
222 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
223 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
224 MGMT_STATUS_BUSY, /* Command Disallowed */
225 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
226 MGMT_STATUS_REJECTED, /* Rejected Security */
227 MGMT_STATUS_REJECTED, /* Rejected Personal */
228 MGMT_STATUS_TIMEOUT, /* Host Timeout */
229 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
230 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
231 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
232 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
233 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
234 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
235 MGMT_STATUS_BUSY, /* Repeated Attempts */
236 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
237 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
238 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
239 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
240 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
241 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
243 MGMT_STATUS_FAILED, /* Unspecified Error */
244 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
245 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
246 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
247 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
248 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
249 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
250 MGMT_STATUS_FAILED, /* Unit Link Key Used */
251 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
252 MGMT_STATUS_TIMEOUT, /* Instant Passed */
253 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
254 MGMT_STATUS_FAILED, /* Transaction Collision */
255 MGMT_STATUS_FAILED, /* Reserved for future use */
256 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
257 MGMT_STATUS_REJECTED, /* QoS Rejected */
258 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
259 MGMT_STATUS_REJECTED, /* Insufficient Security */
260 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
261 MGMT_STATUS_FAILED, /* Reserved for future use */
262 MGMT_STATUS_BUSY, /* Role Switch Pending */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_FAILED, /* Slot Violation */
265 MGMT_STATUS_FAILED, /* Role Switch Failed */
266 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
267 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
268 MGMT_STATUS_BUSY, /* Host Busy Pairing */
269 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
270 MGMT_STATUS_BUSY, /* Controller Busy */
271 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
272 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
273 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
274 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
275 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
276 };
277
mgmt_status(u8 hci_status)278 static u8 mgmt_status(u8 hci_status)
279 {
280 if (hci_status < ARRAY_SIZE(mgmt_status_table))
281 return mgmt_status_table[hci_status];
282
283 return MGMT_STATUS_FAILED;
284 }
285
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)286 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
287 u16 len, int flag)
288 {
289 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
290 flag, NULL);
291 }
292
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)293 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
294 u16 len, int flag, struct sock *skip_sk)
295 {
296 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 flag, skip_sk);
298 }
299
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)300 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
301 struct sock *skip_sk)
302 {
303 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 HCI_SOCK_TRUSTED, skip_sk);
305 }
306
le_addr_type(u8 mgmt_addr_type)307 static u8 le_addr_type(u8 mgmt_addr_type)
308 {
309 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
310 return ADDR_LE_DEV_PUBLIC;
311 else
312 return ADDR_LE_DEV_RANDOM;
313 }
314
mgmt_fill_version_info(void * ver)315 void mgmt_fill_version_info(void *ver)
316 {
317 struct mgmt_rp_read_version *rp = ver;
318
319 rp->version = MGMT_VERSION;
320 rp->revision = cpu_to_le16(MGMT_REVISION);
321 }
322
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)323 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
324 u16 data_len)
325 {
326 struct mgmt_rp_read_version rp;
327
328 bt_dev_dbg(hdev, "sock %p", sk);
329
330 mgmt_fill_version_info(&rp);
331
332 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
333 &rp, sizeof(rp));
334 }
335
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)336 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
337 u16 data_len)
338 {
339 struct mgmt_rp_read_commands *rp;
340 u16 num_commands, num_events;
341 size_t rp_size;
342 int i, err;
343
344 bt_dev_dbg(hdev, "sock %p", sk);
345
346 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
347 num_commands = ARRAY_SIZE(mgmt_commands);
348 num_events = ARRAY_SIZE(mgmt_events);
349 } else {
350 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
351 num_events = ARRAY_SIZE(mgmt_untrusted_events);
352 }
353
354 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
355
356 rp = kmalloc(rp_size, GFP_KERNEL);
357 if (!rp)
358 return -ENOMEM;
359
360 rp->num_commands = cpu_to_le16(num_commands);
361 rp->num_events = cpu_to_le16(num_events);
362
363 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
364 __le16 *opcode = rp->opcodes;
365
366 for (i = 0; i < num_commands; i++, opcode++)
367 put_unaligned_le16(mgmt_commands[i], opcode);
368
369 for (i = 0; i < num_events; i++, opcode++)
370 put_unaligned_le16(mgmt_events[i], opcode);
371 } else {
372 __le16 *opcode = rp->opcodes;
373
374 for (i = 0; i < num_commands; i++, opcode++)
375 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
376
377 for (i = 0; i < num_events; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
379 }
380
381 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
382 rp, rp_size);
383 kfree(rp);
384
385 return err;
386 }
387
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)388 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
389 u16 data_len)
390 {
391 struct mgmt_rp_read_index_list *rp;
392 struct hci_dev *d;
393 size_t rp_len;
394 u16 count;
395 int err;
396
397 bt_dev_dbg(hdev, "sock %p", sk);
398
399 read_lock(&hci_dev_list_lock);
400
401 count = 0;
402 list_for_each_entry(d, &hci_dev_list, list) {
403 if (d->dev_type == HCI_PRIMARY &&
404 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
405 count++;
406 }
407
408 rp_len = sizeof(*rp) + (2 * count);
409 rp = kmalloc(rp_len, GFP_ATOMIC);
410 if (!rp) {
411 read_unlock(&hci_dev_list_lock);
412 return -ENOMEM;
413 }
414
415 count = 0;
416 list_for_each_entry(d, &hci_dev_list, list) {
417 if (hci_dev_test_flag(d, HCI_SETUP) ||
418 hci_dev_test_flag(d, HCI_CONFIG) ||
419 hci_dev_test_flag(d, HCI_USER_CHANNEL))
420 continue;
421
422 /* Devices marked as raw-only are neither configured
423 * nor unconfigured controllers.
424 */
425 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
426 continue;
427
428 if (d->dev_type == HCI_PRIMARY &&
429 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
430 rp->index[count++] = cpu_to_le16(d->id);
431 bt_dev_dbg(hdev, "Added hci%u", d->id);
432 }
433 }
434
435 rp->num_controllers = cpu_to_le16(count);
436 rp_len = sizeof(*rp) + (2 * count);
437
438 read_unlock(&hci_dev_list_lock);
439
440 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
441 0, rp, rp_len);
442
443 kfree(rp);
444
445 return err;
446 }
447
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)448 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
449 void *data, u16 data_len)
450 {
451 struct mgmt_rp_read_unconf_index_list *rp;
452 struct hci_dev *d;
453 size_t rp_len;
454 u16 count;
455 int err;
456
457 bt_dev_dbg(hdev, "sock %p", sk);
458
459 read_lock(&hci_dev_list_lock);
460
461 count = 0;
462 list_for_each_entry(d, &hci_dev_list, list) {
463 if (d->dev_type == HCI_PRIMARY &&
464 hci_dev_test_flag(d, HCI_UNCONFIGURED))
465 count++;
466 }
467
468 rp_len = sizeof(*rp) + (2 * count);
469 rp = kmalloc(rp_len, GFP_ATOMIC);
470 if (!rp) {
471 read_unlock(&hci_dev_list_lock);
472 return -ENOMEM;
473 }
474
475 count = 0;
476 list_for_each_entry(d, &hci_dev_list, list) {
477 if (hci_dev_test_flag(d, HCI_SETUP) ||
478 hci_dev_test_flag(d, HCI_CONFIG) ||
479 hci_dev_test_flag(d, HCI_USER_CHANNEL))
480 continue;
481
482 /* Devices marked as raw-only are neither configured
483 * nor unconfigured controllers.
484 */
485 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
486 continue;
487
488 if (d->dev_type == HCI_PRIMARY &&
489 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
490 rp->index[count++] = cpu_to_le16(d->id);
491 bt_dev_dbg(hdev, "Added hci%u", d->id);
492 }
493 }
494
495 rp->num_controllers = cpu_to_le16(count);
496 rp_len = sizeof(*rp) + (2 * count);
497
498 read_unlock(&hci_dev_list_lock);
499
500 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
501 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
502
503 kfree(rp);
504
505 return err;
506 }
507
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)508 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
509 void *data, u16 data_len)
510 {
511 struct mgmt_rp_read_ext_index_list *rp;
512 struct hci_dev *d;
513 u16 count;
514 int err;
515
516 bt_dev_dbg(hdev, "sock %p", sk);
517
518 read_lock(&hci_dev_list_lock);
519
520 count = 0;
521 list_for_each_entry(d, &hci_dev_list, list) {
522 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
523 count++;
524 }
525
526 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
527 if (!rp) {
528 read_unlock(&hci_dev_list_lock);
529 return -ENOMEM;
530 }
531
532 count = 0;
533 list_for_each_entry(d, &hci_dev_list, list) {
534 if (hci_dev_test_flag(d, HCI_SETUP) ||
535 hci_dev_test_flag(d, HCI_CONFIG) ||
536 hci_dev_test_flag(d, HCI_USER_CHANNEL))
537 continue;
538
539 /* Devices marked as raw-only are neither configured
540 * nor unconfigured controllers.
541 */
542 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
543 continue;
544
545 if (d->dev_type == HCI_PRIMARY) {
546 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
547 rp->entry[count].type = 0x01;
548 else
549 rp->entry[count].type = 0x00;
550 } else if (d->dev_type == HCI_AMP) {
551 rp->entry[count].type = 0x02;
552 } else {
553 continue;
554 }
555
556 rp->entry[count].bus = d->bus;
557 rp->entry[count++].index = cpu_to_le16(d->id);
558 bt_dev_dbg(hdev, "Added hci%u", d->id);
559 }
560
561 rp->num_controllers = cpu_to_le16(count);
562
563 read_unlock(&hci_dev_list_lock);
564
565 /* If this command is called at least once, then all the
566 * default index and unconfigured index events are disabled
567 * and from now on only extended index events are used.
568 */
569 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
570 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
571 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
572
573 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
574 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
575 struct_size(rp, entry, count));
576
577 kfree(rp);
578
579 return err;
580 }
581
is_configured(struct hci_dev * hdev)582 static bool is_configured(struct hci_dev *hdev)
583 {
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
585 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
586 return false;
587
588 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
589 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
590 !bacmp(&hdev->public_addr, BDADDR_ANY))
591 return false;
592
593 return true;
594 }
595
get_missing_options(struct hci_dev * hdev)596 static __le32 get_missing_options(struct hci_dev *hdev)
597 {
598 u32 options = 0;
599
600 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
601 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
602 options |= MGMT_OPTION_EXTERNAL_CONFIG;
603
604 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
605 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
606 !bacmp(&hdev->public_addr, BDADDR_ANY))
607 options |= MGMT_OPTION_PUBLIC_ADDRESS;
608
609 return cpu_to_le32(options);
610 }
611
new_options(struct hci_dev * hdev,struct sock * skip)612 static int new_options(struct hci_dev *hdev, struct sock *skip)
613 {
614 __le32 options = get_missing_options(hdev);
615
616 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
617 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
618 }
619
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)620 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
621 {
622 __le32 options = get_missing_options(hdev);
623
624 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
625 sizeof(options));
626 }
627
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)628 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
629 void *data, u16 data_len)
630 {
631 struct mgmt_rp_read_config_info rp;
632 u32 options = 0;
633
634 bt_dev_dbg(hdev, "sock %p", sk);
635
636 hci_dev_lock(hdev);
637
638 memset(&rp, 0, sizeof(rp));
639 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
640
641 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
642 options |= MGMT_OPTION_EXTERNAL_CONFIG;
643
644 if (hdev->set_bdaddr)
645 options |= MGMT_OPTION_PUBLIC_ADDRESS;
646
647 rp.supported_options = cpu_to_le32(options);
648 rp.missing_options = get_missing_options(hdev);
649
650 hci_dev_unlock(hdev);
651
652 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
653 &rp, sizeof(rp));
654 }
655
get_supported_phys(struct hci_dev * hdev)656 static u32 get_supported_phys(struct hci_dev *hdev)
657 {
658 u32 supported_phys = 0;
659
660 if (lmp_bredr_capable(hdev)) {
661 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
662
663 if (hdev->features[0][0] & LMP_3SLOT)
664 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
665
666 if (hdev->features[0][0] & LMP_5SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
668
669 if (lmp_edr_2m_capable(hdev)) {
670 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
671
672 if (lmp_edr_3slot_capable(hdev))
673 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
674
675 if (lmp_edr_5slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
677
678 if (lmp_edr_3m_capable(hdev)) {
679 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
680
681 if (lmp_edr_3slot_capable(hdev))
682 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
683
684 if (lmp_edr_5slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
686 }
687 }
688 }
689
690 if (lmp_le_capable(hdev)) {
691 supported_phys |= MGMT_PHY_LE_1M_TX;
692 supported_phys |= MGMT_PHY_LE_1M_RX;
693
694 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
695 supported_phys |= MGMT_PHY_LE_2M_TX;
696 supported_phys |= MGMT_PHY_LE_2M_RX;
697 }
698
699 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
700 supported_phys |= MGMT_PHY_LE_CODED_TX;
701 supported_phys |= MGMT_PHY_LE_CODED_RX;
702 }
703 }
704
705 return supported_phys;
706 }
707
get_selected_phys(struct hci_dev * hdev)708 static u32 get_selected_phys(struct hci_dev *hdev)
709 {
710 u32 selected_phys = 0;
711
712 if (lmp_bredr_capable(hdev)) {
713 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
714
715 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
716 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
717
718 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
719 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
720
721 if (lmp_edr_2m_capable(hdev)) {
722 if (!(hdev->pkt_type & HCI_2DH1))
723 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
724
725 if (lmp_edr_3slot_capable(hdev) &&
726 !(hdev->pkt_type & HCI_2DH3))
727 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
728
729 if (lmp_edr_5slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH5))
731 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
732
733 if (lmp_edr_3m_capable(hdev)) {
734 if (!(hdev->pkt_type & HCI_3DH1))
735 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
736
737 if (lmp_edr_3slot_capable(hdev) &&
738 !(hdev->pkt_type & HCI_3DH3))
739 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
740
741 if (lmp_edr_5slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH5))
743 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
744 }
745 }
746 }
747
748 if (lmp_le_capable(hdev)) {
749 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
750 selected_phys |= MGMT_PHY_LE_1M_TX;
751
752 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_RX;
754
755 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
756 selected_phys |= MGMT_PHY_LE_2M_TX;
757
758 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_RX;
760
761 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
762 selected_phys |= MGMT_PHY_LE_CODED_TX;
763
764 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_RX;
766 }
767
768 return selected_phys;
769 }
770
get_configurable_phys(struct hci_dev * hdev)771 static u32 get_configurable_phys(struct hci_dev *hdev)
772 {
773 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
774 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
775 }
776
get_supported_settings(struct hci_dev * hdev)777 static u32 get_supported_settings(struct hci_dev *hdev)
778 {
779 u32 settings = 0;
780
781 settings |= MGMT_SETTING_POWERED;
782 settings |= MGMT_SETTING_BONDABLE;
783 settings |= MGMT_SETTING_DEBUG_KEYS;
784 settings |= MGMT_SETTING_CONNECTABLE;
785 settings |= MGMT_SETTING_DISCOVERABLE;
786
787 if (lmp_bredr_capable(hdev)) {
788 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
789 settings |= MGMT_SETTING_FAST_CONNECTABLE;
790 settings |= MGMT_SETTING_BREDR;
791 settings |= MGMT_SETTING_LINK_SECURITY;
792
793 if (lmp_ssp_capable(hdev)) {
794 settings |= MGMT_SETTING_SSP;
795 if (IS_ENABLED(CONFIG_BT_HS))
796 settings |= MGMT_SETTING_HS;
797 }
798
799 if (lmp_sc_capable(hdev))
800 settings |= MGMT_SETTING_SECURE_CONN;
801
802 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
803 &hdev->quirks))
804 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
805 }
806
807 if (lmp_le_capable(hdev)) {
808 settings |= MGMT_SETTING_LE;
809 settings |= MGMT_SETTING_SECURE_CONN;
810 settings |= MGMT_SETTING_PRIVACY;
811 settings |= MGMT_SETTING_STATIC_ADDRESS;
812
813 /* When the experimental feature for LL Privacy support is
814 * enabled, then advertising is no longer supported.
815 */
816 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
817 settings |= MGMT_SETTING_ADVERTISING;
818 }
819
820 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
821 hdev->set_bdaddr)
822 settings |= MGMT_SETTING_CONFIGURATION;
823
824 settings |= MGMT_SETTING_PHY_CONFIGURATION;
825
826 return settings;
827 }
828
get_current_settings(struct hci_dev * hdev)829 static u32 get_current_settings(struct hci_dev *hdev)
830 {
831 u32 settings = 0;
832
833 if (hdev_is_powered(hdev))
834 settings |= MGMT_SETTING_POWERED;
835
836 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
837 settings |= MGMT_SETTING_CONNECTABLE;
838
839 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
840 settings |= MGMT_SETTING_FAST_CONNECTABLE;
841
842 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
843 settings |= MGMT_SETTING_DISCOVERABLE;
844
845 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
846 settings |= MGMT_SETTING_BONDABLE;
847
848 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
849 settings |= MGMT_SETTING_BREDR;
850
851 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
852 settings |= MGMT_SETTING_LE;
853
854 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
855 settings |= MGMT_SETTING_LINK_SECURITY;
856
857 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
858 settings |= MGMT_SETTING_SSP;
859
860 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
861 settings |= MGMT_SETTING_HS;
862
863 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
864 settings |= MGMT_SETTING_ADVERTISING;
865
866 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
867 settings |= MGMT_SETTING_SECURE_CONN;
868
869 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
870 settings |= MGMT_SETTING_DEBUG_KEYS;
871
872 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
873 settings |= MGMT_SETTING_PRIVACY;
874
875 /* The current setting for static address has two purposes. The
876 * first is to indicate if the static address will be used and
877 * the second is to indicate if it is actually set.
878 *
879 * This means if the static address is not configured, this flag
880 * will never be set. If the address is configured, then if the
881 * address is actually used decides if the flag is set or not.
882 *
883 * For single mode LE only controllers and dual-mode controllers
884 * with BR/EDR disabled, the existence of the static address will
885 * be evaluated.
886 */
887 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
888 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
889 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
890 if (bacmp(&hdev->static_addr, BDADDR_ANY))
891 settings |= MGMT_SETTING_STATIC_ADDRESS;
892 }
893
894 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
895 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
896
897 return settings;
898 }
899
pending_find(u16 opcode,struct hci_dev * hdev)900 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
901 {
902 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
903 }
904
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)905 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
906 struct hci_dev *hdev,
907 const void *data)
908 {
909 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
910 }
911
mgmt_get_adv_discov_flags(struct hci_dev * hdev)912 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
913 {
914 struct mgmt_pending_cmd *cmd;
915
916 /* If there's a pending mgmt command the flags will not yet have
917 * their final values, so check for this first.
918 */
919 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
920 if (cmd) {
921 struct mgmt_mode *cp = cmd->param;
922 if (cp->val == 0x01)
923 return LE_AD_GENERAL;
924 else if (cp->val == 0x02)
925 return LE_AD_LIMITED;
926 } else {
927 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
928 return LE_AD_LIMITED;
929 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
930 return LE_AD_GENERAL;
931 }
932
933 return 0;
934 }
935
mgmt_get_connectable(struct hci_dev * hdev)936 bool mgmt_get_connectable(struct hci_dev *hdev)
937 {
938 struct mgmt_pending_cmd *cmd;
939
940 /* If there's a pending mgmt command the flag will not yet have
941 * it's final value, so check for this first.
942 */
943 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
944 if (cmd) {
945 struct mgmt_mode *cp = cmd->param;
946
947 return cp->val;
948 }
949
950 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
951 }
952
service_cache_off(struct work_struct * work)953 static void service_cache_off(struct work_struct *work)
954 {
955 struct hci_dev *hdev = container_of(work, struct hci_dev,
956 service_cache.work);
957 struct hci_request req;
958
959 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
960 return;
961
962 hci_req_init(&req, hdev);
963
964 hci_dev_lock(hdev);
965
966 __hci_req_update_eir(&req);
967 __hci_req_update_class(&req);
968
969 hci_dev_unlock(hdev);
970
971 hci_req_run(&req, NULL);
972 }
973
rpa_expired(struct work_struct * work)974 static void rpa_expired(struct work_struct *work)
975 {
976 struct hci_dev *hdev = container_of(work, struct hci_dev,
977 rpa_expired.work);
978 struct hci_request req;
979
980 bt_dev_dbg(hdev, "");
981
982 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
983
984 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
985 return;
986
987 /* The generation of a new RPA and programming it into the
988 * controller happens in the hci_req_enable_advertising()
989 * function.
990 */
991 hci_req_init(&req, hdev);
992 if (ext_adv_capable(hdev))
993 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
994 else
995 __hci_req_enable_advertising(&req);
996 hci_req_run(&req, NULL);
997 }
998
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)999 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1000 {
1001 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1002 return;
1003
1004 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1005 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1006
1007 /* Non-mgmt controlled devices get this bit set
1008 * implicitly so that pairing works for them, however
1009 * for mgmt we require user-space to explicitly enable
1010 * it
1011 */
1012 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1013 }
1014
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1015 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1016 void *data, u16 data_len)
1017 {
1018 struct mgmt_rp_read_info rp;
1019
1020 bt_dev_dbg(hdev, "sock %p", sk);
1021
1022 hci_dev_lock(hdev);
1023
1024 memset(&rp, 0, sizeof(rp));
1025
1026 bacpy(&rp.bdaddr, &hdev->bdaddr);
1027
1028 rp.version = hdev->hci_ver;
1029 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1030
1031 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1032 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1033
1034 memcpy(rp.dev_class, hdev->dev_class, 3);
1035
1036 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1037 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1038
1039 hci_dev_unlock(hdev);
1040
1041 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1042 sizeof(rp));
1043 }
1044
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1045 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1046 {
1047 u16 eir_len = 0;
1048 size_t name_len;
1049
1050 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1051 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1052 hdev->dev_class, 3);
1053
1054 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1055 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1056 hdev->appearance);
1057
1058 name_len = strlen(hdev->dev_name);
1059 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1060 hdev->dev_name, name_len);
1061
1062 name_len = strlen(hdev->short_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1064 hdev->short_name, name_len);
1065
1066 return eir_len;
1067 }
1068
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1069 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1070 void *data, u16 data_len)
1071 {
1072 char buf[512];
1073 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1074 u16 eir_len;
1075
1076 bt_dev_dbg(hdev, "sock %p", sk);
1077
1078 memset(&buf, 0, sizeof(buf));
1079
1080 hci_dev_lock(hdev);
1081
1082 bacpy(&rp->bdaddr, &hdev->bdaddr);
1083
1084 rp->version = hdev->hci_ver;
1085 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1086
1087 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1088 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1089
1090
1091 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1092 rp->eir_len = cpu_to_le16(eir_len);
1093
1094 hci_dev_unlock(hdev);
1095
1096 /* If this command is called at least once, then the events
1097 * for class of device and local name changes are disabled
1098 * and only the new extended controller information event
1099 * is used.
1100 */
1101 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1102 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1103 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1104
1105 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1106 sizeof(*rp) + eir_len);
1107 }
1108
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1109 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1110 {
1111 char buf[512];
1112 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1113 u16 eir_len;
1114
1115 memset(buf, 0, sizeof(buf));
1116
1117 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1118 ev->eir_len = cpu_to_le16(eir_len);
1119
1120 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1121 sizeof(*ev) + eir_len,
1122 HCI_MGMT_EXT_INFO_EVENTS, skip);
1123 }
1124
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1125 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1126 {
1127 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1128
1129 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1130 sizeof(settings));
1131 }
1132
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1133 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1134 {
1135 bt_dev_dbg(hdev, "status 0x%02x", status);
1136
1137 if (hci_conn_count(hdev) == 0) {
1138 cancel_delayed_work(&hdev->power_off);
1139 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1140 }
1141 }
1142
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1143 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1144 {
1145 struct mgmt_ev_advertising_added ev;
1146
1147 ev.instance = instance;
1148
1149 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1150 }
1151
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1152 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1153 u8 instance)
1154 {
1155 struct mgmt_ev_advertising_removed ev;
1156
1157 ev.instance = instance;
1158
1159 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1160 }
1161
cancel_adv_timeout(struct hci_dev * hdev)1162 static void cancel_adv_timeout(struct hci_dev *hdev)
1163 {
1164 if (hdev->adv_instance_timeout) {
1165 hdev->adv_instance_timeout = 0;
1166 cancel_delayed_work(&hdev->adv_instance_expire);
1167 }
1168 }
1169
clean_up_hci_state(struct hci_dev * hdev)1170 static int clean_up_hci_state(struct hci_dev *hdev)
1171 {
1172 struct hci_request req;
1173 struct hci_conn *conn;
1174 bool discov_stopped;
1175 int err;
1176
1177 hci_req_init(&req, hdev);
1178
1179 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1180 test_bit(HCI_PSCAN, &hdev->flags)) {
1181 u8 scan = 0x00;
1182 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1183 }
1184
1185 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1186
1187 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1188 __hci_req_disable_advertising(&req);
1189
1190 discov_stopped = hci_req_stop_discovery(&req);
1191
1192 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1193 /* 0x15 == Terminated due to Power Off */
1194 __hci_abort_conn(&req, conn, 0x15);
1195 }
1196
1197 err = hci_req_run(&req, clean_up_hci_complete);
1198 if (!err && discov_stopped)
1199 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1200
1201 return err;
1202 }
1203
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1204 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1205 u16 len)
1206 {
1207 struct mgmt_mode *cp = data;
1208 struct mgmt_pending_cmd *cmd;
1209 int err;
1210
1211 bt_dev_dbg(hdev, "sock %p", sk);
1212
1213 if (cp->val != 0x00 && cp->val != 0x01)
1214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1215 MGMT_STATUS_INVALID_PARAMS);
1216
1217 hci_dev_lock(hdev);
1218
1219 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1221 MGMT_STATUS_BUSY);
1222 goto failed;
1223 }
1224
1225 if (!!cp->val == hdev_is_powered(hdev)) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1227 goto failed;
1228 }
1229
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1231 if (!cmd) {
1232 err = -ENOMEM;
1233 goto failed;
1234 }
1235
1236 if (cp->val) {
1237 queue_work(hdev->req_workqueue, &hdev->power_on);
1238 err = 0;
1239 } else {
1240 /* Disconnect connections, stop scans, etc */
1241 err = clean_up_hci_state(hdev);
1242 if (!err)
1243 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1244 HCI_POWER_OFF_TIMEOUT);
1245
1246 /* ENODATA means there were no HCI commands queued */
1247 if (err == -ENODATA) {
1248 cancel_delayed_work(&hdev->power_off);
1249 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 err = 0;
1251 }
1252 }
1253
1254 failed:
1255 hci_dev_unlock(hdev);
1256 return err;
1257 }
1258
new_settings(struct hci_dev * hdev,struct sock * skip)1259 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1260 {
1261 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1262
1263 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1264 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1265 }
1266
mgmt_new_settings(struct hci_dev * hdev)1267 int mgmt_new_settings(struct hci_dev *hdev)
1268 {
1269 return new_settings(hdev, NULL);
1270 }
1271
1272 struct cmd_lookup {
1273 struct sock *sk;
1274 struct hci_dev *hdev;
1275 u8 mgmt_status;
1276 };
1277
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1278 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1279 {
1280 struct cmd_lookup *match = data;
1281
1282 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1283
1284 list_del(&cmd->list);
1285
1286 if (match->sk == NULL) {
1287 match->sk = cmd->sk;
1288 sock_hold(match->sk);
1289 }
1290
1291 mgmt_pending_free(cmd);
1292 }
1293
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1294 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1295 {
1296 u8 *status = data;
1297
1298 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1299 mgmt_pending_remove(cmd);
1300 }
1301
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1302 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1303 {
1304 if (cmd->cmd_complete) {
1305 u8 *status = data;
1306
1307 cmd->cmd_complete(cmd, *status);
1308 mgmt_pending_remove(cmd);
1309
1310 return;
1311 }
1312
1313 cmd_status_rsp(cmd, data);
1314 }
1315
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1316 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1317 {
1318 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1319 cmd->param, cmd->param_len);
1320 }
1321
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1322 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1323 {
1324 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1325 cmd->param, sizeof(struct mgmt_addr_info));
1326 }
1327
mgmt_bredr_support(struct hci_dev * hdev)1328 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1329 {
1330 if (!lmp_bredr_capable(hdev))
1331 return MGMT_STATUS_NOT_SUPPORTED;
1332 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1333 return MGMT_STATUS_REJECTED;
1334 else
1335 return MGMT_STATUS_SUCCESS;
1336 }
1337
mgmt_le_support(struct hci_dev * hdev)1338 static u8 mgmt_le_support(struct hci_dev *hdev)
1339 {
1340 if (!lmp_le_capable(hdev))
1341 return MGMT_STATUS_NOT_SUPPORTED;
1342 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1343 return MGMT_STATUS_REJECTED;
1344 else
1345 return MGMT_STATUS_SUCCESS;
1346 }
1347
mgmt_set_discoverable_complete(struct hci_dev * hdev,u8 status)1348 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1349 {
1350 struct mgmt_pending_cmd *cmd;
1351
1352 bt_dev_dbg(hdev, "status 0x%02x", status);
1353
1354 hci_dev_lock(hdev);
1355
1356 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1357 if (!cmd)
1358 goto unlock;
1359
1360 if (status) {
1361 u8 mgmt_err = mgmt_status(status);
1362 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1363 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1364 goto remove_cmd;
1365 }
1366
1367 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1368 hdev->discov_timeout > 0) {
1369 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1370 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1371 }
1372
1373 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1374 new_settings(hdev, cmd->sk);
1375
1376 remove_cmd:
1377 mgmt_pending_remove(cmd);
1378
1379 unlock:
1380 hci_dev_unlock(hdev);
1381 }
1382
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1383 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1384 u16 len)
1385 {
1386 struct mgmt_cp_set_discoverable *cp = data;
1387 struct mgmt_pending_cmd *cmd;
1388 u16 timeout;
1389 int err;
1390
1391 bt_dev_dbg(hdev, "sock %p", sk);
1392
1393 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1394 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1396 MGMT_STATUS_REJECTED);
1397
1398 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_INVALID_PARAMS);
1401
1402 timeout = __le16_to_cpu(cp->timeout);
1403
1404 /* Disabling discoverable requires that no timeout is set,
1405 * and enabling limited discoverable requires a timeout.
1406 */
1407 if ((cp->val == 0x00 && timeout > 0) ||
1408 (cp->val == 0x02 && timeout == 0))
1409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1410 MGMT_STATUS_INVALID_PARAMS);
1411
1412 hci_dev_lock(hdev);
1413
1414 if (!hdev_is_powered(hdev) && timeout > 0) {
1415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1416 MGMT_STATUS_NOT_POWERED);
1417 goto failed;
1418 }
1419
1420 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1421 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 MGMT_STATUS_BUSY);
1424 goto failed;
1425 }
1426
1427 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 MGMT_STATUS_REJECTED);
1430 goto failed;
1431 }
1432
1433 if (hdev->advertising_paused) {
1434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1435 MGMT_STATUS_BUSY);
1436 goto failed;
1437 }
1438
1439 if (!hdev_is_powered(hdev)) {
1440 bool changed = false;
1441
1442 /* Setting limited discoverable when powered off is
1443 * not a valid operation since it requires a timeout
1444 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1445 */
1446 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1447 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1448 changed = true;
1449 }
1450
1451 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1452 if (err < 0)
1453 goto failed;
1454
1455 if (changed)
1456 err = new_settings(hdev, sk);
1457
1458 goto failed;
1459 }
1460
1461 /* If the current mode is the same, then just update the timeout
1462 * value with the new value. And if only the timeout gets updated,
1463 * then no need for any HCI transactions.
1464 */
1465 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1466 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1467 HCI_LIMITED_DISCOVERABLE)) {
1468 cancel_delayed_work(&hdev->discov_off);
1469 hdev->discov_timeout = timeout;
1470
1471 if (cp->val && hdev->discov_timeout > 0) {
1472 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1473 queue_delayed_work(hdev->req_workqueue,
1474 &hdev->discov_off, to);
1475 }
1476
1477 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1478 goto failed;
1479 }
1480
1481 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1482 if (!cmd) {
1483 err = -ENOMEM;
1484 goto failed;
1485 }
1486
1487 /* Cancel any potential discoverable timeout that might be
1488 * still active and store new timeout value. The arming of
1489 * the timeout happens in the complete handler.
1490 */
1491 cancel_delayed_work(&hdev->discov_off);
1492 hdev->discov_timeout = timeout;
1493
1494 if (cp->val)
1495 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1496 else
1497 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1498
1499 /* Limited discoverable mode */
1500 if (cp->val == 0x02)
1501 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1502 else
1503 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1504
1505 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1506 err = 0;
1507
1508 failed:
1509 hci_dev_unlock(hdev);
1510 return err;
1511 }
1512
mgmt_set_connectable_complete(struct hci_dev * hdev,u8 status)1513 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1514 {
1515 struct mgmt_pending_cmd *cmd;
1516
1517 bt_dev_dbg(hdev, "status 0x%02x", status);
1518
1519 hci_dev_lock(hdev);
1520
1521 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1522 if (!cmd)
1523 goto unlock;
1524
1525 if (status) {
1526 u8 mgmt_err = mgmt_status(status);
1527 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528 goto remove_cmd;
1529 }
1530
1531 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1532 new_settings(hdev, cmd->sk);
1533
1534 remove_cmd:
1535 mgmt_pending_remove(cmd);
1536
1537 unlock:
1538 hci_dev_unlock(hdev);
1539 }
1540
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1541 static int set_connectable_update_settings(struct hci_dev *hdev,
1542 struct sock *sk, u8 val)
1543 {
1544 bool changed = false;
1545 int err;
1546
1547 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1548 changed = true;
1549
1550 if (val) {
1551 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1552 } else {
1553 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1554 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1555 }
1556
1557 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1558 if (err < 0)
1559 return err;
1560
1561 if (changed) {
1562 hci_req_update_scan(hdev);
1563 hci_update_background_scan(hdev);
1564 return new_settings(hdev, sk);
1565 }
1566
1567 return 0;
1568 }
1569
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1570 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1571 u16 len)
1572 {
1573 struct mgmt_mode *cp = data;
1574 struct mgmt_pending_cmd *cmd;
1575 int err;
1576
1577 bt_dev_dbg(hdev, "sock %p", sk);
1578
1579 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1580 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1582 MGMT_STATUS_REJECTED);
1583
1584 if (cp->val != 0x00 && cp->val != 0x01)
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_INVALID_PARAMS);
1587
1588 hci_dev_lock(hdev);
1589
1590 if (!hdev_is_powered(hdev)) {
1591 err = set_connectable_update_settings(hdev, sk, cp->val);
1592 goto failed;
1593 }
1594
1595 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1596 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1598 MGMT_STATUS_BUSY);
1599 goto failed;
1600 }
1601
1602 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1603 if (!cmd) {
1604 err = -ENOMEM;
1605 goto failed;
1606 }
1607
1608 if (cp->val) {
1609 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1610 } else {
1611 if (hdev->discov_timeout > 0)
1612 cancel_delayed_work(&hdev->discov_off);
1613
1614 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1615 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1616 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1617 }
1618
1619 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1620 err = 0;
1621
1622 failed:
1623 hci_dev_unlock(hdev);
1624 return err;
1625 }
1626
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1627 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1628 u16 len)
1629 {
1630 struct mgmt_mode *cp = data;
1631 bool changed;
1632 int err;
1633
1634 bt_dev_dbg(hdev, "sock %p", sk);
1635
1636 if (cp->val != 0x00 && cp->val != 0x01)
1637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1638 MGMT_STATUS_INVALID_PARAMS);
1639
1640 hci_dev_lock(hdev);
1641
1642 if (cp->val)
1643 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1644 else
1645 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1646
1647 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1648 if (err < 0)
1649 goto unlock;
1650
1651 if (changed) {
1652 /* In limited privacy mode the change of bondable mode
1653 * may affect the local advertising address.
1654 */
1655 if (hdev_is_powered(hdev) &&
1656 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1657 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1658 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1659 queue_work(hdev->req_workqueue,
1660 &hdev->discoverable_update);
1661
1662 err = new_settings(hdev, sk);
1663 }
1664
1665 unlock:
1666 hci_dev_unlock(hdev);
1667 return err;
1668 }
1669
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1670 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1671 u16 len)
1672 {
1673 struct mgmt_mode *cp = data;
1674 struct mgmt_pending_cmd *cmd;
1675 u8 val, status;
1676 int err;
1677
1678 bt_dev_dbg(hdev, "sock %p", sk);
1679
1680 status = mgmt_bredr_support(hdev);
1681 if (status)
1682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1683 status);
1684
1685 if (cp->val != 0x00 && cp->val != 0x01)
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1687 MGMT_STATUS_INVALID_PARAMS);
1688
1689 hci_dev_lock(hdev);
1690
1691 if (!hdev_is_powered(hdev)) {
1692 bool changed = false;
1693
1694 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1695 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1696 changed = true;
1697 }
1698
1699 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1700 if (err < 0)
1701 goto failed;
1702
1703 if (changed)
1704 err = new_settings(hdev, sk);
1705
1706 goto failed;
1707 }
1708
1709 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1711 MGMT_STATUS_BUSY);
1712 goto failed;
1713 }
1714
1715 val = !!cp->val;
1716
1717 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1718 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1719 goto failed;
1720 }
1721
1722 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1723 if (!cmd) {
1724 err = -ENOMEM;
1725 goto failed;
1726 }
1727
1728 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1729 if (err < 0) {
1730 mgmt_pending_remove(cmd);
1731 goto failed;
1732 }
1733
1734 failed:
1735 hci_dev_unlock(hdev);
1736 return err;
1737 }
1738
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1739 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1740 {
1741 struct mgmt_mode *cp = data;
1742 struct mgmt_pending_cmd *cmd;
1743 u8 status;
1744 int err;
1745
1746 bt_dev_dbg(hdev, "sock %p", sk);
1747
1748 status = mgmt_bredr_support(hdev);
1749 if (status)
1750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1751
1752 if (!lmp_ssp_capable(hdev))
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1754 MGMT_STATUS_NOT_SUPPORTED);
1755
1756 if (cp->val != 0x00 && cp->val != 0x01)
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_INVALID_PARAMS);
1759
1760 hci_dev_lock(hdev);
1761
1762 if (!hdev_is_powered(hdev)) {
1763 bool changed;
1764
1765 if (cp->val) {
1766 changed = !hci_dev_test_and_set_flag(hdev,
1767 HCI_SSP_ENABLED);
1768 } else {
1769 changed = hci_dev_test_and_clear_flag(hdev,
1770 HCI_SSP_ENABLED);
1771 if (!changed)
1772 changed = hci_dev_test_and_clear_flag(hdev,
1773 HCI_HS_ENABLED);
1774 else
1775 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1776 }
1777
1778 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1779 if (err < 0)
1780 goto failed;
1781
1782 if (changed)
1783 err = new_settings(hdev, sk);
1784
1785 goto failed;
1786 }
1787
1788 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1790 MGMT_STATUS_BUSY);
1791 goto failed;
1792 }
1793
1794 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1795 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1796 goto failed;
1797 }
1798
1799 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cmd) {
1801 err = -ENOMEM;
1802 goto failed;
1803 }
1804
1805 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1806 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1807 sizeof(cp->val), &cp->val);
1808
1809 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1810 if (err < 0) {
1811 mgmt_pending_remove(cmd);
1812 goto failed;
1813 }
1814
1815 failed:
1816 hci_dev_unlock(hdev);
1817 return err;
1818 }
1819
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1820 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1821 {
1822 struct mgmt_mode *cp = data;
1823 bool changed;
1824 u8 status;
1825 int err;
1826
1827 bt_dev_dbg(hdev, "sock %p", sk);
1828
1829 if (!IS_ENABLED(CONFIG_BT_HS))
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1832
1833 status = mgmt_bredr_support(hdev);
1834 if (status)
1835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1836
1837 if (!lmp_ssp_capable(hdev))
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_NOT_SUPPORTED);
1840
1841 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_REJECTED);
1844
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_INVALID_PARAMS);
1848
1849 hci_dev_lock(hdev);
1850
1851 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1852 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1853 MGMT_STATUS_BUSY);
1854 goto unlock;
1855 }
1856
1857 if (cp->val) {
1858 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1859 } else {
1860 if (hdev_is_powered(hdev)) {
1861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 MGMT_STATUS_REJECTED);
1863 goto unlock;
1864 }
1865
1866 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1867 }
1868
1869 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1870 if (err < 0)
1871 goto unlock;
1872
1873 if (changed)
1874 err = new_settings(hdev, sk);
1875
1876 unlock:
1877 hci_dev_unlock(hdev);
1878 return err;
1879 }
1880
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1881 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1882 {
1883 struct cmd_lookup match = { NULL, hdev };
1884
1885 hci_dev_lock(hdev);
1886
1887 if (status) {
1888 u8 mgmt_err = mgmt_status(status);
1889
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1891 &mgmt_err);
1892 goto unlock;
1893 }
1894
1895 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1896
1897 new_settings(hdev, match.sk);
1898
1899 if (match.sk)
1900 sock_put(match.sk);
1901
1902 /* Make sure the controller has a good default for
1903 * advertising data. Restrict the update to when LE
1904 * has actually been enabled. During power on, the
1905 * update in powered_update_hci will take care of it.
1906 */
1907 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1908 struct hci_request req;
1909 hci_req_init(&req, hdev);
1910 if (ext_adv_capable(hdev)) {
1911 int err;
1912
1913 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1914 if (!err)
1915 __hci_req_update_scan_rsp_data(&req, 0x00);
1916 } else {
1917 __hci_req_update_adv_data(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1919 }
1920 hci_req_run(&req, NULL);
1921 hci_update_background_scan(hdev);
1922 }
1923
1924 unlock:
1925 hci_dev_unlock(hdev);
1926 }
1927
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1928 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1929 {
1930 struct mgmt_mode *cp = data;
1931 struct hci_cp_write_le_host_supported hci_cp;
1932 struct mgmt_pending_cmd *cmd;
1933 struct hci_request req;
1934 int err;
1935 u8 val, enabled;
1936
1937 bt_dev_dbg(hdev, "sock %p", sk);
1938
1939 if (!lmp_le_capable(hdev))
1940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1941 MGMT_STATUS_NOT_SUPPORTED);
1942
1943 if (cp->val != 0x00 && cp->val != 0x01)
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_INVALID_PARAMS);
1946
1947 /* Bluetooth single mode LE only controllers or dual-mode
1948 * controllers configured as LE only devices, do not allow
1949 * switching LE off. These have either LE enabled explicitly
1950 * or BR/EDR has been previously switched off.
1951 *
1952 * When trying to enable an already enabled LE, then gracefully
1953 * send a positive response. Trying to disable it however will
1954 * result into rejection.
1955 */
1956 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1957 if (cp->val == 0x01)
1958 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1959
1960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1961 MGMT_STATUS_REJECTED);
1962 }
1963
1964 hci_dev_lock(hdev);
1965
1966 val = !!cp->val;
1967 enabled = lmp_host_le_capable(hdev);
1968
1969 if (!val)
1970 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1971
1972 if (!hdev_is_powered(hdev) || val == enabled) {
1973 bool changed = false;
1974
1975 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1976 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1977 changed = true;
1978 }
1979
1980 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1981 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1982 changed = true;
1983 }
1984
1985 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1986 if (err < 0)
1987 goto unlock;
1988
1989 if (changed)
1990 err = new_settings(hdev, sk);
1991
1992 goto unlock;
1993 }
1994
1995 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1996 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1998 MGMT_STATUS_BUSY);
1999 goto unlock;
2000 }
2001
2002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 if (!cmd) {
2004 err = -ENOMEM;
2005 goto unlock;
2006 }
2007
2008 hci_req_init(&req, hdev);
2009
2010 memset(&hci_cp, 0, sizeof(hci_cp));
2011
2012 if (val) {
2013 hci_cp.le = val;
2014 hci_cp.simul = 0x00;
2015 } else {
2016 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2017 __hci_req_disable_advertising(&req);
2018
2019 if (ext_adv_capable(hdev))
2020 __hci_req_clear_ext_adv_sets(&req);
2021 }
2022
2023 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2024 &hci_cp);
2025
2026 err = hci_req_run(&req, le_enable_complete);
2027 if (err < 0)
2028 mgmt_pending_remove(cmd);
2029
2030 unlock:
2031 hci_dev_unlock(hdev);
2032 return err;
2033 }
2034
2035 /* This is a helper function to test for pending mgmt commands that can
2036 * cause CoD or EIR HCI commands. We can only allow one such pending
2037 * mgmt command at a time since otherwise we cannot easily track what
2038 * the current values are, will be, and based on that calculate if a new
2039 * HCI command needs to be sent and if yes with what value.
2040 */
pending_eir_or_class(struct hci_dev * hdev)2041 static bool pending_eir_or_class(struct hci_dev *hdev)
2042 {
2043 struct mgmt_pending_cmd *cmd;
2044
2045 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2046 switch (cmd->opcode) {
2047 case MGMT_OP_ADD_UUID:
2048 case MGMT_OP_REMOVE_UUID:
2049 case MGMT_OP_SET_DEV_CLASS:
2050 case MGMT_OP_SET_POWERED:
2051 return true;
2052 }
2053 }
2054
2055 return false;
2056 }
2057
2058 static const u8 bluetooth_base_uuid[] = {
2059 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2060 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2061 };
2062
get_uuid_size(const u8 * uuid)2063 static u8 get_uuid_size(const u8 *uuid)
2064 {
2065 u32 val;
2066
2067 if (memcmp(uuid, bluetooth_base_uuid, 12))
2068 return 128;
2069
2070 val = get_unaligned_le32(&uuid[12]);
2071 if (val > 0xffff)
2072 return 32;
2073
2074 return 16;
2075 }
2076
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2077 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2078 {
2079 struct mgmt_pending_cmd *cmd;
2080
2081 hci_dev_lock(hdev);
2082
2083 cmd = pending_find(mgmt_op, hdev);
2084 if (!cmd)
2085 goto unlock;
2086
2087 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2088 mgmt_status(status), hdev->dev_class, 3);
2089
2090 mgmt_pending_remove(cmd);
2091
2092 unlock:
2093 hci_dev_unlock(hdev);
2094 }
2095
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2096 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2097 {
2098 bt_dev_dbg(hdev, "status 0x%02x", status);
2099
2100 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2101 }
2102
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2103 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2104 {
2105 struct mgmt_cp_add_uuid *cp = data;
2106 struct mgmt_pending_cmd *cmd;
2107 struct hci_request req;
2108 struct bt_uuid *uuid;
2109 int err;
2110
2111 bt_dev_dbg(hdev, "sock %p", sk);
2112
2113 hci_dev_lock(hdev);
2114
2115 if (pending_eir_or_class(hdev)) {
2116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2117 MGMT_STATUS_BUSY);
2118 goto failed;
2119 }
2120
2121 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2122 if (!uuid) {
2123 err = -ENOMEM;
2124 goto failed;
2125 }
2126
2127 memcpy(uuid->uuid, cp->uuid, 16);
2128 uuid->svc_hint = cp->svc_hint;
2129 uuid->size = get_uuid_size(cp->uuid);
2130
2131 list_add_tail(&uuid->list, &hdev->uuids);
2132
2133 hci_req_init(&req, hdev);
2134
2135 __hci_req_update_class(&req);
2136 __hci_req_update_eir(&req);
2137
2138 err = hci_req_run(&req, add_uuid_complete);
2139 if (err < 0) {
2140 if (err != -ENODATA)
2141 goto failed;
2142
2143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2144 hdev->dev_class, 3);
2145 goto failed;
2146 }
2147
2148 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2149 if (!cmd) {
2150 err = -ENOMEM;
2151 goto failed;
2152 }
2153
2154 err = 0;
2155
2156 failed:
2157 hci_dev_unlock(hdev);
2158 return err;
2159 }
2160
enable_service_cache(struct hci_dev * hdev)2161 static bool enable_service_cache(struct hci_dev *hdev)
2162 {
2163 if (!hdev_is_powered(hdev))
2164 return false;
2165
2166 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2167 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2168 CACHE_TIMEOUT);
2169 return true;
2170 }
2171
2172 return false;
2173 }
2174
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2175 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2176 {
2177 bt_dev_dbg(hdev, "status 0x%02x", status);
2178
2179 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2180 }
2181
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2182 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2183 u16 len)
2184 {
2185 struct mgmt_cp_remove_uuid *cp = data;
2186 struct mgmt_pending_cmd *cmd;
2187 struct bt_uuid *match, *tmp;
2188 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2189 struct hci_request req;
2190 int err, found;
2191
2192 bt_dev_dbg(hdev, "sock %p", sk);
2193
2194 hci_dev_lock(hdev);
2195
2196 if (pending_eir_or_class(hdev)) {
2197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2198 MGMT_STATUS_BUSY);
2199 goto unlock;
2200 }
2201
2202 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2203 hci_uuids_clear(hdev);
2204
2205 if (enable_service_cache(hdev)) {
2206 err = mgmt_cmd_complete(sk, hdev->id,
2207 MGMT_OP_REMOVE_UUID,
2208 0, hdev->dev_class, 3);
2209 goto unlock;
2210 }
2211
2212 goto update_class;
2213 }
2214
2215 found = 0;
2216
2217 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2218 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2219 continue;
2220
2221 list_del(&match->list);
2222 kfree(match);
2223 found++;
2224 }
2225
2226 if (found == 0) {
2227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2228 MGMT_STATUS_INVALID_PARAMS);
2229 goto unlock;
2230 }
2231
2232 update_class:
2233 hci_req_init(&req, hdev);
2234
2235 __hci_req_update_class(&req);
2236 __hci_req_update_eir(&req);
2237
2238 err = hci_req_run(&req, remove_uuid_complete);
2239 if (err < 0) {
2240 if (err != -ENODATA)
2241 goto unlock;
2242
2243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2244 hdev->dev_class, 3);
2245 goto unlock;
2246 }
2247
2248 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2249 if (!cmd) {
2250 err = -ENOMEM;
2251 goto unlock;
2252 }
2253
2254 err = 0;
2255
2256 unlock:
2257 hci_dev_unlock(hdev);
2258 return err;
2259 }
2260
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2261 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2262 {
2263 bt_dev_dbg(hdev, "status 0x%02x", status);
2264
2265 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2266 }
2267
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2268 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2269 u16 len)
2270 {
2271 struct mgmt_cp_set_dev_class *cp = data;
2272 struct mgmt_pending_cmd *cmd;
2273 struct hci_request req;
2274 int err;
2275
2276 bt_dev_dbg(hdev, "sock %p", sk);
2277
2278 if (!lmp_bredr_capable(hdev))
2279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_NOT_SUPPORTED);
2281
2282 hci_dev_lock(hdev);
2283
2284 if (pending_eir_or_class(hdev)) {
2285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2286 MGMT_STATUS_BUSY);
2287 goto unlock;
2288 }
2289
2290 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2292 MGMT_STATUS_INVALID_PARAMS);
2293 goto unlock;
2294 }
2295
2296 hdev->major_class = cp->major;
2297 hdev->minor_class = cp->minor;
2298
2299 if (!hdev_is_powered(hdev)) {
2300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2301 hdev->dev_class, 3);
2302 goto unlock;
2303 }
2304
2305 hci_req_init(&req, hdev);
2306
2307 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2308 hci_dev_unlock(hdev);
2309 cancel_delayed_work_sync(&hdev->service_cache);
2310 hci_dev_lock(hdev);
2311 __hci_req_update_eir(&req);
2312 }
2313
2314 __hci_req_update_class(&req);
2315
2316 err = hci_req_run(&req, set_class_complete);
2317 if (err < 0) {
2318 if (err != -ENODATA)
2319 goto unlock;
2320
2321 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2322 hdev->dev_class, 3);
2323 goto unlock;
2324 }
2325
2326 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2327 if (!cmd) {
2328 err = -ENOMEM;
2329 goto unlock;
2330 }
2331
2332 err = 0;
2333
2334 unlock:
2335 hci_dev_unlock(hdev);
2336 return err;
2337 }
2338
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2339 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2340 u16 len)
2341 {
2342 struct mgmt_cp_load_link_keys *cp = data;
2343 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2344 sizeof(struct mgmt_link_key_info));
2345 u16 key_count, expected_len;
2346 bool changed;
2347 int i;
2348
2349 bt_dev_dbg(hdev, "sock %p", sk);
2350
2351 if (!lmp_bredr_capable(hdev))
2352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2353 MGMT_STATUS_NOT_SUPPORTED);
2354
2355 key_count = __le16_to_cpu(cp->key_count);
2356 if (key_count > max_key_count) {
2357 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2358 key_count);
2359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2360 MGMT_STATUS_INVALID_PARAMS);
2361 }
2362
2363 expected_len = struct_size(cp, keys, key_count);
2364 if (expected_len != len) {
2365 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2366 expected_len, len);
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2369 }
2370
2371 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2373 MGMT_STATUS_INVALID_PARAMS);
2374
2375 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2376 key_count);
2377
2378 for (i = 0; i < key_count; i++) {
2379 struct mgmt_link_key_info *key = &cp->keys[i];
2380
2381 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2382 if (key->type > 0x08)
2383 return mgmt_cmd_status(sk, hdev->id,
2384 MGMT_OP_LOAD_LINK_KEYS,
2385 MGMT_STATUS_INVALID_PARAMS);
2386 }
2387
2388 hci_dev_lock(hdev);
2389
2390 hci_link_keys_clear(hdev);
2391
2392 if (cp->debug_keys)
2393 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2394 else
2395 changed = hci_dev_test_and_clear_flag(hdev,
2396 HCI_KEEP_DEBUG_KEYS);
2397
2398 if (changed)
2399 new_settings(hdev, NULL);
2400
2401 for (i = 0; i < key_count; i++) {
2402 struct mgmt_link_key_info *key = &cp->keys[i];
2403
2404 if (hci_is_blocked_key(hdev,
2405 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2406 key->val)) {
2407 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2408 &key->addr.bdaddr);
2409 continue;
2410 }
2411
2412 /* Always ignore debug keys and require a new pairing if
2413 * the user wants to use them.
2414 */
2415 if (key->type == HCI_LK_DEBUG_COMBINATION)
2416 continue;
2417
2418 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2419 key->type, key->pin_len, NULL);
2420 }
2421
2422 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2423
2424 hci_dev_unlock(hdev);
2425
2426 return 0;
2427 }
2428
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2429 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430 u8 addr_type, struct sock *skip_sk)
2431 {
2432 struct mgmt_ev_device_unpaired ev;
2433
2434 bacpy(&ev.addr.bdaddr, bdaddr);
2435 ev.addr.type = addr_type;
2436
2437 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2438 skip_sk);
2439 }
2440
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2441 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2442 u16 len)
2443 {
2444 struct mgmt_cp_unpair_device *cp = data;
2445 struct mgmt_rp_unpair_device rp;
2446 struct hci_conn_params *params;
2447 struct mgmt_pending_cmd *cmd;
2448 struct hci_conn *conn;
2449 u8 addr_type;
2450 int err;
2451
2452 memset(&rp, 0, sizeof(rp));
2453 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2454 rp.addr.type = cp->addr.type;
2455
2456 if (!bdaddr_type_is_valid(cp->addr.type))
2457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_INVALID_PARAMS,
2459 &rp, sizeof(rp));
2460
2461 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2462 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 MGMT_STATUS_INVALID_PARAMS,
2464 &rp, sizeof(rp));
2465
2466 hci_dev_lock(hdev);
2467
2468 if (!hdev_is_powered(hdev)) {
2469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2470 MGMT_STATUS_NOT_POWERED, &rp,
2471 sizeof(rp));
2472 goto unlock;
2473 }
2474
2475 if (cp->addr.type == BDADDR_BREDR) {
2476 /* If disconnection is requested, then look up the
2477 * connection. If the remote device is connected, it
2478 * will be later used to terminate the link.
2479 *
2480 * Setting it to NULL explicitly will cause no
2481 * termination of the link.
2482 */
2483 if (cp->disconnect)
2484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2485 &cp->addr.bdaddr);
2486 else
2487 conn = NULL;
2488
2489 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2490 if (err < 0) {
2491 err = mgmt_cmd_complete(sk, hdev->id,
2492 MGMT_OP_UNPAIR_DEVICE,
2493 MGMT_STATUS_NOT_PAIRED, &rp,
2494 sizeof(rp));
2495 goto unlock;
2496 }
2497
2498 goto done;
2499 }
2500
2501 /* LE address type */
2502 addr_type = le_addr_type(cp->addr.type);
2503
2504 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2505 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2506 if (err < 0) {
2507 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2508 MGMT_STATUS_NOT_PAIRED, &rp,
2509 sizeof(rp));
2510 goto unlock;
2511 }
2512
2513 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2514 if (!conn) {
2515 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2516 goto done;
2517 }
2518
2519
2520 /* Defer clearing up the connection parameters until closing to
2521 * give a chance of keeping them if a repairing happens.
2522 */
2523 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2524
2525 /* Disable auto-connection parameters if present */
2526 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2527 if (params) {
2528 if (params->explicit_connect)
2529 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2530 else
2531 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2532 }
2533
2534 /* If disconnection is not requested, then clear the connection
2535 * variable so that the link is not terminated.
2536 */
2537 if (!cp->disconnect)
2538 conn = NULL;
2539
2540 done:
2541 /* If the connection variable is set, then termination of the
2542 * link is requested.
2543 */
2544 if (!conn) {
2545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2546 &rp, sizeof(rp));
2547 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2548 goto unlock;
2549 }
2550
2551 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2552 sizeof(*cp));
2553 if (!cmd) {
2554 err = -ENOMEM;
2555 goto unlock;
2556 }
2557
2558 cmd->cmd_complete = addr_cmd_complete;
2559
2560 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2561 if (err < 0)
2562 mgmt_pending_remove(cmd);
2563
2564 unlock:
2565 hci_dev_unlock(hdev);
2566 return err;
2567 }
2568
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2569 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2570 u16 len)
2571 {
2572 struct mgmt_cp_disconnect *cp = data;
2573 struct mgmt_rp_disconnect rp;
2574 struct mgmt_pending_cmd *cmd;
2575 struct hci_conn *conn;
2576 int err;
2577
2578 bt_dev_dbg(hdev, "sock %p", sk);
2579
2580 memset(&rp, 0, sizeof(rp));
2581 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2582 rp.addr.type = cp->addr.type;
2583
2584 if (!bdaddr_type_is_valid(cp->addr.type))
2585 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 MGMT_STATUS_INVALID_PARAMS,
2587 &rp, sizeof(rp));
2588
2589 hci_dev_lock(hdev);
2590
2591 if (!test_bit(HCI_UP, &hdev->flags)) {
2592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 MGMT_STATUS_NOT_POWERED, &rp,
2594 sizeof(rp));
2595 goto failed;
2596 }
2597
2598 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2600 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2601 goto failed;
2602 }
2603
2604 if (cp->addr.type == BDADDR_BREDR)
2605 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2606 &cp->addr.bdaddr);
2607 else
2608 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2609 le_addr_type(cp->addr.type));
2610
2611 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2612 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2613 MGMT_STATUS_NOT_CONNECTED, &rp,
2614 sizeof(rp));
2615 goto failed;
2616 }
2617
2618 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2619 if (!cmd) {
2620 err = -ENOMEM;
2621 goto failed;
2622 }
2623
2624 cmd->cmd_complete = generic_cmd_complete;
2625
2626 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2627 if (err < 0)
2628 mgmt_pending_remove(cmd);
2629
2630 failed:
2631 hci_dev_unlock(hdev);
2632 return err;
2633 }
2634
link_to_bdaddr(u8 link_type,u8 addr_type)2635 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2636 {
2637 switch (link_type) {
2638 case LE_LINK:
2639 switch (addr_type) {
2640 case ADDR_LE_DEV_PUBLIC:
2641 return BDADDR_LE_PUBLIC;
2642
2643 default:
2644 /* Fallback to LE Random address type */
2645 return BDADDR_LE_RANDOM;
2646 }
2647
2648 default:
2649 /* Fallback to BR/EDR type */
2650 return BDADDR_BREDR;
2651 }
2652 }
2653
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2654 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2655 u16 data_len)
2656 {
2657 struct mgmt_rp_get_connections *rp;
2658 struct hci_conn *c;
2659 int err;
2660 u16 i;
2661
2662 bt_dev_dbg(hdev, "sock %p", sk);
2663
2664 hci_dev_lock(hdev);
2665
2666 if (!hdev_is_powered(hdev)) {
2667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2668 MGMT_STATUS_NOT_POWERED);
2669 goto unlock;
2670 }
2671
2672 i = 0;
2673 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2674 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2675 i++;
2676 }
2677
2678 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2679 if (!rp) {
2680 err = -ENOMEM;
2681 goto unlock;
2682 }
2683
2684 i = 0;
2685 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2686 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2687 continue;
2688 bacpy(&rp->addr[i].bdaddr, &c->dst);
2689 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2690 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2691 continue;
2692 i++;
2693 }
2694
2695 rp->conn_count = cpu_to_le16(i);
2696
2697 /* Recalculate length in case of filtered SCO connections, etc */
2698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2699 struct_size(rp, addr, i));
2700
2701 kfree(rp);
2702
2703 unlock:
2704 hci_dev_unlock(hdev);
2705 return err;
2706 }
2707
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2708 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2709 struct mgmt_cp_pin_code_neg_reply *cp)
2710 {
2711 struct mgmt_pending_cmd *cmd;
2712 int err;
2713
2714 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2715 sizeof(*cp));
2716 if (!cmd)
2717 return -ENOMEM;
2718
2719 cmd->cmd_complete = addr_cmd_complete;
2720
2721 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2722 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2723 if (err < 0)
2724 mgmt_pending_remove(cmd);
2725
2726 return err;
2727 }
2728
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2729 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2730 u16 len)
2731 {
2732 struct hci_conn *conn;
2733 struct mgmt_cp_pin_code_reply *cp = data;
2734 struct hci_cp_pin_code_reply reply;
2735 struct mgmt_pending_cmd *cmd;
2736 int err;
2737
2738 bt_dev_dbg(hdev, "sock %p", sk);
2739
2740 hci_dev_lock(hdev);
2741
2742 if (!hdev_is_powered(hdev)) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 MGMT_STATUS_NOT_POWERED);
2745 goto failed;
2746 }
2747
2748 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2749 if (!conn) {
2750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2751 MGMT_STATUS_NOT_CONNECTED);
2752 goto failed;
2753 }
2754
2755 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2756 struct mgmt_cp_pin_code_neg_reply ncp;
2757
2758 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2759
2760 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2761
2762 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2763 if (err >= 0)
2764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2765 MGMT_STATUS_INVALID_PARAMS);
2766
2767 goto failed;
2768 }
2769
2770 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2771 if (!cmd) {
2772 err = -ENOMEM;
2773 goto failed;
2774 }
2775
2776 cmd->cmd_complete = addr_cmd_complete;
2777
2778 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2779 reply.pin_len = cp->pin_len;
2780 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2781
2782 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2783 if (err < 0)
2784 mgmt_pending_remove(cmd);
2785
2786 failed:
2787 hci_dev_unlock(hdev);
2788 return err;
2789 }
2790
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2791 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2792 u16 len)
2793 {
2794 struct mgmt_cp_set_io_capability *cp = data;
2795
2796 bt_dev_dbg(hdev, "sock %p", sk);
2797
2798 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2799 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2800 MGMT_STATUS_INVALID_PARAMS);
2801
2802 hci_dev_lock(hdev);
2803
2804 hdev->io_capability = cp->io_capability;
2805
2806 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2807
2808 hci_dev_unlock(hdev);
2809
2810 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2811 NULL, 0);
2812 }
2813
find_pairing(struct hci_conn * conn)2814 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2815 {
2816 struct hci_dev *hdev = conn->hdev;
2817 struct mgmt_pending_cmd *cmd;
2818
2819 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2820 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2821 continue;
2822
2823 if (cmd->user_data != conn)
2824 continue;
2825
2826 return cmd;
2827 }
2828
2829 return NULL;
2830 }
2831
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2832 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2833 {
2834 struct mgmt_rp_pair_device rp;
2835 struct hci_conn *conn = cmd->user_data;
2836 int err;
2837
2838 bacpy(&rp.addr.bdaddr, &conn->dst);
2839 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2840
2841 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2842 status, &rp, sizeof(rp));
2843
2844 /* So we don't get further callbacks for this connection */
2845 conn->connect_cfm_cb = NULL;
2846 conn->security_cfm_cb = NULL;
2847 conn->disconn_cfm_cb = NULL;
2848
2849 hci_conn_drop(conn);
2850
2851 /* The device is paired so there is no need to remove
2852 * its connection parameters anymore.
2853 */
2854 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2855
2856 hci_conn_put(conn);
2857
2858 return err;
2859 }
2860
mgmt_smp_complete(struct hci_conn * conn,bool complete)2861 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2862 {
2863 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2864 struct mgmt_pending_cmd *cmd;
2865
2866 cmd = find_pairing(conn);
2867 if (cmd) {
2868 cmd->cmd_complete(cmd, status);
2869 mgmt_pending_remove(cmd);
2870 }
2871 }
2872
pairing_complete_cb(struct hci_conn * conn,u8 status)2873 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2874 {
2875 struct mgmt_pending_cmd *cmd;
2876
2877 BT_DBG("status %u", status);
2878
2879 cmd = find_pairing(conn);
2880 if (!cmd) {
2881 BT_DBG("Unable to find a pending command");
2882 return;
2883 }
2884
2885 cmd->cmd_complete(cmd, mgmt_status(status));
2886 mgmt_pending_remove(cmd);
2887 }
2888
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2889 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2890 {
2891 struct mgmt_pending_cmd *cmd;
2892
2893 BT_DBG("status %u", status);
2894
2895 if (!status)
2896 return;
2897
2898 cmd = find_pairing(conn);
2899 if (!cmd) {
2900 BT_DBG("Unable to find a pending command");
2901 return;
2902 }
2903
2904 cmd->cmd_complete(cmd, mgmt_status(status));
2905 mgmt_pending_remove(cmd);
2906 }
2907
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2908 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2909 u16 len)
2910 {
2911 struct mgmt_cp_pair_device *cp = data;
2912 struct mgmt_rp_pair_device rp;
2913 struct mgmt_pending_cmd *cmd;
2914 u8 sec_level, auth_type;
2915 struct hci_conn *conn;
2916 int err;
2917
2918 bt_dev_dbg(hdev, "sock %p", sk);
2919
2920 memset(&rp, 0, sizeof(rp));
2921 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2922 rp.addr.type = cp->addr.type;
2923
2924 if (!bdaddr_type_is_valid(cp->addr.type))
2925 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_INVALID_PARAMS,
2927 &rp, sizeof(rp));
2928
2929 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2930 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 MGMT_STATUS_INVALID_PARAMS,
2932 &rp, sizeof(rp));
2933
2934 hci_dev_lock(hdev);
2935
2936 if (!hdev_is_powered(hdev)) {
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 MGMT_STATUS_NOT_POWERED, &rp,
2939 sizeof(rp));
2940 goto unlock;
2941 }
2942
2943 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2945 MGMT_STATUS_ALREADY_PAIRED, &rp,
2946 sizeof(rp));
2947 goto unlock;
2948 }
2949
2950 sec_level = BT_SECURITY_MEDIUM;
2951 auth_type = HCI_AT_DEDICATED_BONDING;
2952
2953 if (cp->addr.type == BDADDR_BREDR) {
2954 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2955 auth_type, CONN_REASON_PAIR_DEVICE);
2956 } else {
2957 u8 addr_type = le_addr_type(cp->addr.type);
2958 struct hci_conn_params *p;
2959
2960 /* When pairing a new device, it is expected to remember
2961 * this device for future connections. Adding the connection
2962 * parameter information ahead of time allows tracking
2963 * of the peripheral preferred values and will speed up any
2964 * further connection establishment.
2965 *
2966 * If connection parameters already exist, then they
2967 * will be kept and this function does nothing.
2968 */
2969 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2970
2971 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2972 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2973
2974 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2975 sec_level, HCI_LE_CONN_TIMEOUT,
2976 CONN_REASON_PAIR_DEVICE);
2977 }
2978
2979 if (IS_ERR(conn)) {
2980 int status;
2981
2982 if (PTR_ERR(conn) == -EBUSY)
2983 status = MGMT_STATUS_BUSY;
2984 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2985 status = MGMT_STATUS_NOT_SUPPORTED;
2986 else if (PTR_ERR(conn) == -ECONNREFUSED)
2987 status = MGMT_STATUS_REJECTED;
2988 else
2989 status = MGMT_STATUS_CONNECT_FAILED;
2990
2991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 status, &rp, sizeof(rp));
2993 goto unlock;
2994 }
2995
2996 if (conn->connect_cfm_cb) {
2997 hci_conn_drop(conn);
2998 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2999 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3000 goto unlock;
3001 }
3002
3003 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3004 if (!cmd) {
3005 err = -ENOMEM;
3006 hci_conn_drop(conn);
3007 goto unlock;
3008 }
3009
3010 cmd->cmd_complete = pairing_complete;
3011
3012 /* For LE, just connecting isn't a proof that the pairing finished */
3013 if (cp->addr.type == BDADDR_BREDR) {
3014 conn->connect_cfm_cb = pairing_complete_cb;
3015 conn->security_cfm_cb = pairing_complete_cb;
3016 conn->disconn_cfm_cb = pairing_complete_cb;
3017 } else {
3018 conn->connect_cfm_cb = le_pairing_complete_cb;
3019 conn->security_cfm_cb = le_pairing_complete_cb;
3020 conn->disconn_cfm_cb = le_pairing_complete_cb;
3021 }
3022
3023 conn->io_capability = cp->io_cap;
3024 cmd->user_data = hci_conn_get(conn);
3025
3026 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3027 hci_conn_security(conn, sec_level, auth_type, true)) {
3028 cmd->cmd_complete(cmd, 0);
3029 mgmt_pending_remove(cmd);
3030 }
3031
3032 err = 0;
3033
3034 unlock:
3035 hci_dev_unlock(hdev);
3036 return err;
3037 }
3038
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3039 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3040 u16 len)
3041 {
3042 struct mgmt_addr_info *addr = data;
3043 struct mgmt_pending_cmd *cmd;
3044 struct hci_conn *conn;
3045 int err;
3046
3047 bt_dev_dbg(hdev, "sock %p", sk);
3048
3049 hci_dev_lock(hdev);
3050
3051 if (!hdev_is_powered(hdev)) {
3052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 MGMT_STATUS_NOT_POWERED);
3054 goto unlock;
3055 }
3056
3057 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3058 if (!cmd) {
3059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3060 MGMT_STATUS_INVALID_PARAMS);
3061 goto unlock;
3062 }
3063
3064 conn = cmd->user_data;
3065
3066 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3067 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3068 MGMT_STATUS_INVALID_PARAMS);
3069 goto unlock;
3070 }
3071
3072 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3073 mgmt_pending_remove(cmd);
3074
3075 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3076 addr, sizeof(*addr));
3077
3078 /* Since user doesn't want to proceed with the connection, abort any
3079 * ongoing pairing and then terminate the link if it was created
3080 * because of the pair device action.
3081 */
3082 if (addr->type == BDADDR_BREDR)
3083 hci_remove_link_key(hdev, &addr->bdaddr);
3084 else
3085 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3086 le_addr_type(addr->type));
3087
3088 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3089 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3090
3091 unlock:
3092 hci_dev_unlock(hdev);
3093 return err;
3094 }
3095
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3096 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3097 struct mgmt_addr_info *addr, u16 mgmt_op,
3098 u16 hci_op, __le32 passkey)
3099 {
3100 struct mgmt_pending_cmd *cmd;
3101 struct hci_conn *conn;
3102 int err;
3103
3104 hci_dev_lock(hdev);
3105
3106 if (!hdev_is_powered(hdev)) {
3107 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3108 MGMT_STATUS_NOT_POWERED, addr,
3109 sizeof(*addr));
3110 goto done;
3111 }
3112
3113 if (addr->type == BDADDR_BREDR)
3114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3115 else
3116 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3117 le_addr_type(addr->type));
3118
3119 if (!conn) {
3120 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3121 MGMT_STATUS_NOT_CONNECTED, addr,
3122 sizeof(*addr));
3123 goto done;
3124 }
3125
3126 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3127 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3128 if (!err)
3129 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3130 MGMT_STATUS_SUCCESS, addr,
3131 sizeof(*addr));
3132 else
3133 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3134 MGMT_STATUS_FAILED, addr,
3135 sizeof(*addr));
3136
3137 goto done;
3138 }
3139
3140 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3141 if (!cmd) {
3142 err = -ENOMEM;
3143 goto done;
3144 }
3145
3146 cmd->cmd_complete = addr_cmd_complete;
3147
3148 /* Continue with pairing via HCI */
3149 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3150 struct hci_cp_user_passkey_reply cp;
3151
3152 bacpy(&cp.bdaddr, &addr->bdaddr);
3153 cp.passkey = passkey;
3154 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3155 } else
3156 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3157 &addr->bdaddr);
3158
3159 if (err < 0)
3160 mgmt_pending_remove(cmd);
3161
3162 done:
3163 hci_dev_unlock(hdev);
3164 return err;
3165 }
3166
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3167 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3168 void *data, u16 len)
3169 {
3170 struct mgmt_cp_pin_code_neg_reply *cp = data;
3171
3172 bt_dev_dbg(hdev, "sock %p", sk);
3173
3174 return user_pairing_resp(sk, hdev, &cp->addr,
3175 MGMT_OP_PIN_CODE_NEG_REPLY,
3176 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3177 }
3178
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3179 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3180 u16 len)
3181 {
3182 struct mgmt_cp_user_confirm_reply *cp = data;
3183
3184 bt_dev_dbg(hdev, "sock %p", sk);
3185
3186 if (len != sizeof(*cp))
3187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3188 MGMT_STATUS_INVALID_PARAMS);
3189
3190 return user_pairing_resp(sk, hdev, &cp->addr,
3191 MGMT_OP_USER_CONFIRM_REPLY,
3192 HCI_OP_USER_CONFIRM_REPLY, 0);
3193 }
3194
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3195 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3196 void *data, u16 len)
3197 {
3198 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3199
3200 bt_dev_dbg(hdev, "sock %p", sk);
3201
3202 return user_pairing_resp(sk, hdev, &cp->addr,
3203 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3204 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3205 }
3206
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3207 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3208 u16 len)
3209 {
3210 struct mgmt_cp_user_passkey_reply *cp = data;
3211
3212 bt_dev_dbg(hdev, "sock %p", sk);
3213
3214 return user_pairing_resp(sk, hdev, &cp->addr,
3215 MGMT_OP_USER_PASSKEY_REPLY,
3216 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3217 }
3218
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3219 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3220 void *data, u16 len)
3221 {
3222 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3223
3224 bt_dev_dbg(hdev, "sock %p", sk);
3225
3226 return user_pairing_resp(sk, hdev, &cp->addr,
3227 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3228 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3229 }
3230
adv_expire(struct hci_dev * hdev,u32 flags)3231 static void adv_expire(struct hci_dev *hdev, u32 flags)
3232 {
3233 struct adv_info *adv_instance;
3234 struct hci_request req;
3235 int err;
3236
3237 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3238 if (!adv_instance)
3239 return;
3240
3241 /* stop if current instance doesn't need to be changed */
3242 if (!(adv_instance->flags & flags))
3243 return;
3244
3245 cancel_adv_timeout(hdev);
3246
3247 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3248 if (!adv_instance)
3249 return;
3250
3251 hci_req_init(&req, hdev);
3252 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3253 true);
3254 if (err)
3255 return;
3256
3257 hci_req_run(&req, NULL);
3258 }
3259
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3260 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3261 {
3262 struct mgmt_cp_set_local_name *cp;
3263 struct mgmt_pending_cmd *cmd;
3264
3265 bt_dev_dbg(hdev, "status 0x%02x", status);
3266
3267 hci_dev_lock(hdev);
3268
3269 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3270 if (!cmd)
3271 goto unlock;
3272
3273 cp = cmd->param;
3274
3275 if (status) {
3276 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3277 mgmt_status(status));
3278 } else {
3279 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3280 cp, sizeof(*cp));
3281
3282 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3283 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3284 }
3285
3286 mgmt_pending_remove(cmd);
3287
3288 unlock:
3289 hci_dev_unlock(hdev);
3290 }
3291
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3292 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3293 u16 len)
3294 {
3295 struct mgmt_cp_set_local_name *cp = data;
3296 struct mgmt_pending_cmd *cmd;
3297 struct hci_request req;
3298 int err;
3299
3300 bt_dev_dbg(hdev, "sock %p", sk);
3301
3302 hci_dev_lock(hdev);
3303
3304 /* If the old values are the same as the new ones just return a
3305 * direct command complete event.
3306 */
3307 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3308 !memcmp(hdev->short_name, cp->short_name,
3309 sizeof(hdev->short_name))) {
3310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3311 data, len);
3312 goto failed;
3313 }
3314
3315 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3316
3317 if (!hdev_is_powered(hdev)) {
3318 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3319
3320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3321 data, len);
3322 if (err < 0)
3323 goto failed;
3324
3325 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3326 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3327 ext_info_changed(hdev, sk);
3328
3329 goto failed;
3330 }
3331
3332 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3333 if (!cmd) {
3334 err = -ENOMEM;
3335 goto failed;
3336 }
3337
3338 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3339
3340 hci_req_init(&req, hdev);
3341
3342 if (lmp_bredr_capable(hdev)) {
3343 __hci_req_update_name(&req);
3344 __hci_req_update_eir(&req);
3345 }
3346
3347 /* The name is stored in the scan response data and so
3348 * no need to update the advertising data here.
3349 */
3350 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3351 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3352
3353 err = hci_req_run(&req, set_name_complete);
3354 if (err < 0)
3355 mgmt_pending_remove(cmd);
3356
3357 failed:
3358 hci_dev_unlock(hdev);
3359 return err;
3360 }
3361
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3362 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3363 u16 len)
3364 {
3365 struct mgmt_cp_set_appearance *cp = data;
3366 u16 appearance;
3367 int err;
3368
3369 bt_dev_dbg(hdev, "sock %p", sk);
3370
3371 if (!lmp_le_capable(hdev))
3372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3373 MGMT_STATUS_NOT_SUPPORTED);
3374
3375 appearance = le16_to_cpu(cp->appearance);
3376
3377 hci_dev_lock(hdev);
3378
3379 if (hdev->appearance != appearance) {
3380 hdev->appearance = appearance;
3381
3382 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3383 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3384
3385 ext_info_changed(hdev, sk);
3386 }
3387
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3389 0);
3390
3391 hci_dev_unlock(hdev);
3392
3393 return err;
3394 }
3395
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3396 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3397 void *data, u16 len)
3398 {
3399 struct mgmt_rp_get_phy_configuration rp;
3400
3401 bt_dev_dbg(hdev, "sock %p", sk);
3402
3403 hci_dev_lock(hdev);
3404
3405 memset(&rp, 0, sizeof(rp));
3406
3407 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3408 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3409 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3410
3411 hci_dev_unlock(hdev);
3412
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3414 &rp, sizeof(rp));
3415 }
3416
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3417 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3418 {
3419 struct mgmt_ev_phy_configuration_changed ev;
3420
3421 memset(&ev, 0, sizeof(ev));
3422
3423 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3424
3425 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3426 sizeof(ev), skip);
3427 }
3428
set_default_phy_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3429 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3430 u16 opcode, struct sk_buff *skb)
3431 {
3432 struct mgmt_pending_cmd *cmd;
3433
3434 bt_dev_dbg(hdev, "status 0x%02x", status);
3435
3436 hci_dev_lock(hdev);
3437
3438 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3439 if (!cmd)
3440 goto unlock;
3441
3442 if (status) {
3443 mgmt_cmd_status(cmd->sk, hdev->id,
3444 MGMT_OP_SET_PHY_CONFIGURATION,
3445 mgmt_status(status));
3446 } else {
3447 mgmt_cmd_complete(cmd->sk, hdev->id,
3448 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3449 NULL, 0);
3450
3451 mgmt_phy_configuration_changed(hdev, cmd->sk);
3452 }
3453
3454 mgmt_pending_remove(cmd);
3455
3456 unlock:
3457 hci_dev_unlock(hdev);
3458 }
3459
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3460 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3461 void *data, u16 len)
3462 {
3463 struct mgmt_cp_set_phy_configuration *cp = data;
3464 struct hci_cp_le_set_default_phy cp_phy;
3465 struct mgmt_pending_cmd *cmd;
3466 struct hci_request req;
3467 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3468 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3469 bool changed = false;
3470 int err;
3471
3472 bt_dev_dbg(hdev, "sock %p", sk);
3473
3474 configurable_phys = get_configurable_phys(hdev);
3475 supported_phys = get_supported_phys(hdev);
3476 selected_phys = __le32_to_cpu(cp->selected_phys);
3477
3478 if (selected_phys & ~supported_phys)
3479 return mgmt_cmd_status(sk, hdev->id,
3480 MGMT_OP_SET_PHY_CONFIGURATION,
3481 MGMT_STATUS_INVALID_PARAMS);
3482
3483 unconfigure_phys = supported_phys & ~configurable_phys;
3484
3485 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3486 return mgmt_cmd_status(sk, hdev->id,
3487 MGMT_OP_SET_PHY_CONFIGURATION,
3488 MGMT_STATUS_INVALID_PARAMS);
3489
3490 if (selected_phys == get_selected_phys(hdev))
3491 return mgmt_cmd_complete(sk, hdev->id,
3492 MGMT_OP_SET_PHY_CONFIGURATION,
3493 0, NULL, 0);
3494
3495 hci_dev_lock(hdev);
3496
3497 if (!hdev_is_powered(hdev)) {
3498 err = mgmt_cmd_status(sk, hdev->id,
3499 MGMT_OP_SET_PHY_CONFIGURATION,
3500 MGMT_STATUS_REJECTED);
3501 goto unlock;
3502 }
3503
3504 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3505 err = mgmt_cmd_status(sk, hdev->id,
3506 MGMT_OP_SET_PHY_CONFIGURATION,
3507 MGMT_STATUS_BUSY);
3508 goto unlock;
3509 }
3510
3511 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3512 pkt_type |= (HCI_DH3 | HCI_DM3);
3513 else
3514 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3515
3516 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3517 pkt_type |= (HCI_DH5 | HCI_DM5);
3518 else
3519 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3520
3521 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3522 pkt_type &= ~HCI_2DH1;
3523 else
3524 pkt_type |= HCI_2DH1;
3525
3526 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3527 pkt_type &= ~HCI_2DH3;
3528 else
3529 pkt_type |= HCI_2DH3;
3530
3531 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3532 pkt_type &= ~HCI_2DH5;
3533 else
3534 pkt_type |= HCI_2DH5;
3535
3536 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3537 pkt_type &= ~HCI_3DH1;
3538 else
3539 pkt_type |= HCI_3DH1;
3540
3541 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3542 pkt_type &= ~HCI_3DH3;
3543 else
3544 pkt_type |= HCI_3DH3;
3545
3546 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3547 pkt_type &= ~HCI_3DH5;
3548 else
3549 pkt_type |= HCI_3DH5;
3550
3551 if (pkt_type != hdev->pkt_type) {
3552 hdev->pkt_type = pkt_type;
3553 changed = true;
3554 }
3555
3556 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3557 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3558 if (changed)
3559 mgmt_phy_configuration_changed(hdev, sk);
3560
3561 err = mgmt_cmd_complete(sk, hdev->id,
3562 MGMT_OP_SET_PHY_CONFIGURATION,
3563 0, NULL, 0);
3564
3565 goto unlock;
3566 }
3567
3568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3569 len);
3570 if (!cmd) {
3571 err = -ENOMEM;
3572 goto unlock;
3573 }
3574
3575 hci_req_init(&req, hdev);
3576
3577 memset(&cp_phy, 0, sizeof(cp_phy));
3578
3579 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3580 cp_phy.all_phys |= 0x01;
3581
3582 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3583 cp_phy.all_phys |= 0x02;
3584
3585 if (selected_phys & MGMT_PHY_LE_1M_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3587
3588 if (selected_phys & MGMT_PHY_LE_2M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3590
3591 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3593
3594 if (selected_phys & MGMT_PHY_LE_1M_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3596
3597 if (selected_phys & MGMT_PHY_LE_2M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3599
3600 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3602
3603 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3604
3605 err = hci_req_run_skb(&req, set_default_phy_complete);
3606 if (err < 0)
3607 mgmt_pending_remove(cmd);
3608
3609 unlock:
3610 hci_dev_unlock(hdev);
3611
3612 return err;
3613 }
3614
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3615 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3616 u16 len)
3617 {
3618 int err = MGMT_STATUS_SUCCESS;
3619 struct mgmt_cp_set_blocked_keys *keys = data;
3620 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3621 sizeof(struct mgmt_blocked_key_info));
3622 u16 key_count, expected_len;
3623 int i;
3624
3625 bt_dev_dbg(hdev, "sock %p", sk);
3626
3627 key_count = __le16_to_cpu(keys->key_count);
3628 if (key_count > max_key_count) {
3629 bt_dev_err(hdev, "too big key_count value %u", key_count);
3630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3631 MGMT_STATUS_INVALID_PARAMS);
3632 }
3633
3634 expected_len = struct_size(keys, keys, key_count);
3635 if (expected_len != len) {
3636 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3637 expected_len, len);
3638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3639 MGMT_STATUS_INVALID_PARAMS);
3640 }
3641
3642 hci_dev_lock(hdev);
3643
3644 hci_blocked_keys_clear(hdev);
3645
3646 for (i = 0; i < keys->key_count; ++i) {
3647 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3648
3649 if (!b) {
3650 err = MGMT_STATUS_NO_RESOURCES;
3651 break;
3652 }
3653
3654 b->type = keys->keys[i].type;
3655 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3656 list_add_rcu(&b->list, &hdev->blocked_keys);
3657 }
3658 hci_dev_unlock(hdev);
3659
3660 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3661 err, NULL, 0);
3662 }
3663
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3664 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3665 void *data, u16 len)
3666 {
3667 struct mgmt_mode *cp = data;
3668 int err;
3669 bool changed = false;
3670
3671 bt_dev_dbg(hdev, "sock %p", sk);
3672
3673 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3674 return mgmt_cmd_status(sk, hdev->id,
3675 MGMT_OP_SET_WIDEBAND_SPEECH,
3676 MGMT_STATUS_NOT_SUPPORTED);
3677
3678 if (cp->val != 0x00 && cp->val != 0x01)
3679 return mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_WIDEBAND_SPEECH,
3681 MGMT_STATUS_INVALID_PARAMS);
3682
3683 hci_dev_lock(hdev);
3684
3685 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3686 err = mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_WIDEBAND_SPEECH,
3688 MGMT_STATUS_BUSY);
3689 goto unlock;
3690 }
3691
3692 if (hdev_is_powered(hdev) &&
3693 !!cp->val != hci_dev_test_flag(hdev,
3694 HCI_WIDEBAND_SPEECH_ENABLED)) {
3695 err = mgmt_cmd_status(sk, hdev->id,
3696 MGMT_OP_SET_WIDEBAND_SPEECH,
3697 MGMT_STATUS_REJECTED);
3698 goto unlock;
3699 }
3700
3701 if (cp->val)
3702 changed = !hci_dev_test_and_set_flag(hdev,
3703 HCI_WIDEBAND_SPEECH_ENABLED);
3704 else
3705 changed = hci_dev_test_and_clear_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3707
3708 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3709 if (err < 0)
3710 goto unlock;
3711
3712 if (changed)
3713 err = new_settings(hdev, sk);
3714
3715 unlock:
3716 hci_dev_unlock(hdev);
3717 return err;
3718 }
3719
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3720 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 data_len)
3722 {
3723 char buf[20];
3724 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3725 u16 cap_len = 0;
3726 u8 flags = 0;
3727 u8 tx_power_range[2];
3728
3729 bt_dev_dbg(hdev, "sock %p", sk);
3730
3731 memset(&buf, 0, sizeof(buf));
3732
3733 hci_dev_lock(hdev);
3734
3735 /* When the Read Simple Pairing Options command is supported, then
3736 * the remote public key validation is supported.
3737 *
3738 * Alternatively, when Microsoft extensions are available, they can
3739 * indicate support for public key validation as well.
3740 */
3741 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3742 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3743
3744 flags |= 0x02; /* Remote public key validation (LE) */
3745
3746 /* When the Read Encryption Key Size command is supported, then the
3747 * encryption key size is enforced.
3748 */
3749 if (hdev->commands[20] & 0x10)
3750 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3751
3752 flags |= 0x08; /* Encryption key size enforcement (LE) */
3753
3754 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3755 &flags, 1);
3756
3757 /* When the Read Simple Pairing Options command is supported, then
3758 * also max encryption key size information is provided.
3759 */
3760 if (hdev->commands[41] & 0x08)
3761 cap_len = eir_append_le16(rp->cap, cap_len,
3762 MGMT_CAP_MAX_ENC_KEY_SIZE,
3763 hdev->max_enc_key_size);
3764
3765 cap_len = eir_append_le16(rp->cap, cap_len,
3766 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3767 SMP_MAX_ENC_KEY_SIZE);
3768
3769 /* Append the min/max LE tx power parameters if we were able to fetch
3770 * it from the controller
3771 */
3772 if (hdev->commands[38] & 0x80) {
3773 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3774 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3775 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3776 tx_power_range, 2);
3777 }
3778
3779 rp->cap_len = cpu_to_le16(cap_len);
3780
3781 hci_dev_unlock(hdev);
3782
3783 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3784 rp, sizeof(*rp) + cap_len);
3785 }
3786
3787 #ifdef CONFIG_BT_FEATURE_DEBUG
3788 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3789 static const u8 debug_uuid[16] = {
3790 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3791 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3792 };
3793 #endif
3794
3795 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3796 static const u8 simult_central_periph_uuid[16] = {
3797 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3798 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3799 };
3800
3801 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3802 static const u8 rpa_resolution_uuid[16] = {
3803 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3804 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3805 };
3806
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3807 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3808 void *data, u16 data_len)
3809 {
3810 char buf[62]; /* Enough space for 3 features */
3811 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3812 u16 idx = 0;
3813 u32 flags;
3814
3815 bt_dev_dbg(hdev, "sock %p", sk);
3816
3817 memset(&buf, 0, sizeof(buf));
3818
3819 #ifdef CONFIG_BT_FEATURE_DEBUG
3820 if (!hdev) {
3821 flags = bt_dbg_get() ? BIT(0) : 0;
3822
3823 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3824 rp->features[idx].flags = cpu_to_le32(flags);
3825 idx++;
3826 }
3827 #endif
3828
3829 if (hdev) {
3830 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3831 (hdev->le_states[4] & 0x08) && /* Central */
3832 (hdev->le_states[4] & 0x40) && /* Peripheral */
3833 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3834 flags = BIT(0);
3835 else
3836 flags = 0;
3837
3838 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3839 rp->features[idx].flags = cpu_to_le32(flags);
3840 idx++;
3841 }
3842
3843 if (hdev && use_ll_privacy(hdev)) {
3844 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3845 flags = BIT(0) | BIT(1);
3846 else
3847 flags = BIT(1);
3848
3849 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3850 rp->features[idx].flags = cpu_to_le32(flags);
3851 idx++;
3852 }
3853
3854 rp->feature_count = cpu_to_le16(idx);
3855
3856 /* After reading the experimental features information, enable
3857 * the events to update client on any future change.
3858 */
3859 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3860
3861 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3862 MGMT_OP_READ_EXP_FEATURES_INFO,
3863 0, rp, sizeof(*rp) + (20 * idx));
3864 }
3865
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)3866 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3867 struct sock *skip)
3868 {
3869 struct mgmt_ev_exp_feature_changed ev;
3870
3871 memset(&ev, 0, sizeof(ev));
3872 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3873 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3874
3875 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3876 &ev, sizeof(ev),
3877 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3878
3879 }
3880
3881 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled,struct sock * skip)3882 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3883 {
3884 struct mgmt_ev_exp_feature_changed ev;
3885
3886 memset(&ev, 0, sizeof(ev));
3887 memcpy(ev.uuid, debug_uuid, 16);
3888 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3889
3890 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3891 &ev, sizeof(ev),
3892 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3893 }
3894 #endif
3895
3896 #define EXP_FEAT(_uuid, _set_func) \
3897 { \
3898 .uuid = _uuid, \
3899 .set_func = _set_func, \
3900 }
3901
3902 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)3903 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3904 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3905 {
3906 struct mgmt_rp_set_exp_feature rp;
3907
3908 memset(rp.uuid, 0, 16);
3909 rp.flags = cpu_to_le32(0);
3910
3911 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 if (!hdev) {
3913 bool changed = bt_dbg_get();
3914
3915 bt_dbg_set(false);
3916
3917 if (changed)
3918 exp_debug_feature_changed(false, sk);
3919 }
3920 #endif
3921
3922 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3923 bool changed;
3924
3925 changed = hci_dev_test_and_clear_flag(hdev,
3926 HCI_ENABLE_LL_PRIVACY);
3927 if (changed)
3928 exp_ll_privacy_feature_changed(false, hdev, sk);
3929 }
3930
3931 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3932
3933 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3934 MGMT_OP_SET_EXP_FEATURE, 0,
3935 &rp, sizeof(rp));
3936 }
3937
3938 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)3939 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3940 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3941 {
3942 struct mgmt_rp_set_exp_feature rp;
3943
3944 bool val, changed;
3945 int err;
3946
3947 /* Command requires to use the non-controller index */
3948 if (hdev)
3949 return mgmt_cmd_status(sk, hdev->id,
3950 MGMT_OP_SET_EXP_FEATURE,
3951 MGMT_STATUS_INVALID_INDEX);
3952
3953 /* Parameters are limited to a single octet */
3954 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3955 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3956 MGMT_OP_SET_EXP_FEATURE,
3957 MGMT_STATUS_INVALID_PARAMS);
3958
3959 /* Only boolean on/off is supported */
3960 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3962 MGMT_OP_SET_EXP_FEATURE,
3963 MGMT_STATUS_INVALID_PARAMS);
3964
3965 val = !!cp->param[0];
3966 changed = val ? !bt_dbg_get() : bt_dbg_get();
3967 bt_dbg_set(val);
3968
3969 memcpy(rp.uuid, debug_uuid, 16);
3970 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3971
3972 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3973
3974 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3975 MGMT_OP_SET_EXP_FEATURE, 0,
3976 &rp, sizeof(rp));
3977
3978 if (changed)
3979 exp_debug_feature_changed(val, sk);
3980
3981 return err;
3982 }
3983 #endif
3984
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)3985 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3986 struct mgmt_cp_set_exp_feature *cp,
3987 u16 data_len)
3988 {
3989 struct mgmt_rp_set_exp_feature rp;
3990 bool val, changed;
3991 int err;
3992 u32 flags;
3993
3994 /* Command requires to use the controller index */
3995 if (!hdev)
3996 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3997 MGMT_OP_SET_EXP_FEATURE,
3998 MGMT_STATUS_INVALID_INDEX);
3999
4000 /* Changes can only be made when controller is powered down */
4001 if (hdev_is_powered(hdev))
4002 return mgmt_cmd_status(sk, hdev->id,
4003 MGMT_OP_SET_EXP_FEATURE,
4004 MGMT_STATUS_REJECTED);
4005
4006 /* Parameters are limited to a single octet */
4007 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4008 return mgmt_cmd_status(sk, hdev->id,
4009 MGMT_OP_SET_EXP_FEATURE,
4010 MGMT_STATUS_INVALID_PARAMS);
4011
4012 /* Only boolean on/off is supported */
4013 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4014 return mgmt_cmd_status(sk, hdev->id,
4015 MGMT_OP_SET_EXP_FEATURE,
4016 MGMT_STATUS_INVALID_PARAMS);
4017
4018 val = !!cp->param[0];
4019
4020 if (val) {
4021 changed = !hci_dev_test_and_set_flag(hdev,
4022 HCI_ENABLE_LL_PRIVACY);
4023 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4024
4025 /* Enable LL privacy + supported settings changed */
4026 flags = BIT(0) | BIT(1);
4027 } else {
4028 changed = hci_dev_test_and_clear_flag(hdev,
4029 HCI_ENABLE_LL_PRIVACY);
4030
4031 /* Disable LL privacy + supported settings changed */
4032 flags = BIT(1);
4033 }
4034
4035 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4036 rp.flags = cpu_to_le32(flags);
4037
4038 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4039
4040 err = mgmt_cmd_complete(sk, hdev->id,
4041 MGMT_OP_SET_EXP_FEATURE, 0,
4042 &rp, sizeof(rp));
4043
4044 if (changed)
4045 exp_ll_privacy_feature_changed(val, hdev, sk);
4046
4047 return err;
4048 }
4049
4050 static const struct mgmt_exp_feature {
4051 const u8 *uuid;
4052 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4053 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4054 } exp_features[] = {
4055 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4056 #ifdef CONFIG_BT_FEATURE_DEBUG
4057 EXP_FEAT(debug_uuid, set_debug_func),
4058 #endif
4059 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4060
4061 /* end with a null feature */
4062 EXP_FEAT(NULL, NULL)
4063 };
4064
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4065 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4066 void *data, u16 data_len)
4067 {
4068 struct mgmt_cp_set_exp_feature *cp = data;
4069 size_t i = 0;
4070
4071 bt_dev_dbg(hdev, "sock %p", sk);
4072
4073 for (i = 0; exp_features[i].uuid; i++) {
4074 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4075 return exp_features[i].set_func(sk, hdev, cp, data_len);
4076 }
4077
4078 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4079 MGMT_OP_SET_EXP_FEATURE,
4080 MGMT_STATUS_NOT_SUPPORTED);
4081 }
4082
4083 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4084
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4085 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4086 u16 data_len)
4087 {
4088 struct mgmt_cp_get_device_flags *cp = data;
4089 struct mgmt_rp_get_device_flags rp;
4090 struct bdaddr_list_with_flags *br_params;
4091 struct hci_conn_params *params;
4092 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4093 u32 current_flags = 0;
4094 u8 status = MGMT_STATUS_INVALID_PARAMS;
4095
4096 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4097 &cp->addr.bdaddr, cp->addr.type);
4098
4099 hci_dev_lock(hdev);
4100
4101 memset(&rp, 0, sizeof(rp));
4102
4103 if (cp->addr.type == BDADDR_BREDR) {
4104 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4105 &cp->addr.bdaddr,
4106 cp->addr.type);
4107 if (!br_params)
4108 goto done;
4109
4110 current_flags = br_params->current_flags;
4111 } else {
4112 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4113 le_addr_type(cp->addr.type));
4114
4115 if (!params)
4116 goto done;
4117
4118 current_flags = params->current_flags;
4119 }
4120
4121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4122 rp.addr.type = cp->addr.type;
4123 rp.supported_flags = cpu_to_le32(supported_flags);
4124 rp.current_flags = cpu_to_le32(current_flags);
4125
4126 status = MGMT_STATUS_SUCCESS;
4127
4128 done:
4129 hci_dev_unlock(hdev);
4130
4131 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4132 &rp, sizeof(rp));
4133 }
4134
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4135 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4136 bdaddr_t *bdaddr, u8 bdaddr_type,
4137 u32 supported_flags, u32 current_flags)
4138 {
4139 struct mgmt_ev_device_flags_changed ev;
4140
4141 bacpy(&ev.addr.bdaddr, bdaddr);
4142 ev.addr.type = bdaddr_type;
4143 ev.supported_flags = cpu_to_le32(supported_flags);
4144 ev.current_flags = cpu_to_le32(current_flags);
4145
4146 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4147 }
4148
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4149 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4150 u16 len)
4151 {
4152 struct mgmt_cp_set_device_flags *cp = data;
4153 struct bdaddr_list_with_flags *br_params;
4154 struct hci_conn_params *params;
4155 u8 status = MGMT_STATUS_INVALID_PARAMS;
4156 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4157 u32 current_flags = __le32_to_cpu(cp->current_flags);
4158
4159 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4160 &cp->addr.bdaddr, cp->addr.type,
4161 __le32_to_cpu(current_flags));
4162
4163 if ((supported_flags | current_flags) != supported_flags) {
4164 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4165 current_flags, supported_flags);
4166 goto done;
4167 }
4168
4169 hci_dev_lock(hdev);
4170
4171 if (cp->addr.type == BDADDR_BREDR) {
4172 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4173 &cp->addr.bdaddr,
4174 cp->addr.type);
4175
4176 if (br_params) {
4177 br_params->current_flags = current_flags;
4178 status = MGMT_STATUS_SUCCESS;
4179 } else {
4180 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4181 &cp->addr.bdaddr, cp->addr.type);
4182 }
4183 } else {
4184 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4185 le_addr_type(cp->addr.type));
4186 if (params) {
4187 params->current_flags = current_flags;
4188 status = MGMT_STATUS_SUCCESS;
4189 } else {
4190 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4191 &cp->addr.bdaddr,
4192 le_addr_type(cp->addr.type));
4193 }
4194 }
4195
4196 done:
4197 hci_dev_unlock(hdev);
4198
4199 if (status == MGMT_STATUS_SUCCESS)
4200 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4201 supported_flags, current_flags);
4202
4203 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4204 &cp->addr, sizeof(cp->addr));
4205 }
4206
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4207 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4208 u16 handle)
4209 {
4210 struct mgmt_ev_adv_monitor_added ev;
4211
4212 ev.monitor_handle = cpu_to_le16(handle);
4213
4214 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4215 }
4216
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)4217 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4218 {
4219 struct mgmt_ev_adv_monitor_removed ev;
4220 struct mgmt_pending_cmd *cmd;
4221 struct sock *sk_skip = NULL;
4222 struct mgmt_cp_remove_adv_monitor *cp;
4223
4224 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4225 if (cmd) {
4226 cp = cmd->param;
4227
4228 if (cp->monitor_handle)
4229 sk_skip = cmd->sk;
4230 }
4231
4232 ev.monitor_handle = cpu_to_le16(handle);
4233
4234 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4235 }
4236
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4237 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4238 void *data, u16 len)
4239 {
4240 struct adv_monitor *monitor = NULL;
4241 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4242 int handle, err;
4243 size_t rp_size = 0;
4244 __u32 supported = 0;
4245 __u32 enabled = 0;
4246 __u16 num_handles = 0;
4247 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4248
4249 BT_DBG("request for %s", hdev->name);
4250
4251 hci_dev_lock(hdev);
4252
4253 if (msft_monitor_supported(hdev))
4254 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4255
4256 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4257 handles[num_handles++] = monitor->handle;
4258
4259 hci_dev_unlock(hdev);
4260
4261 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4262 rp = kmalloc(rp_size, GFP_KERNEL);
4263 if (!rp)
4264 return -ENOMEM;
4265
4266 /* All supported features are currently enabled */
4267 enabled = supported;
4268
4269 rp->supported_features = cpu_to_le32(supported);
4270 rp->enabled_features = cpu_to_le32(enabled);
4271 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4272 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4273 rp->num_handles = cpu_to_le16(num_handles);
4274 if (num_handles)
4275 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4276
4277 err = mgmt_cmd_complete(sk, hdev->id,
4278 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4279 MGMT_STATUS_SUCCESS, rp, rp_size);
4280
4281 kfree(rp);
4282
4283 return err;
4284 }
4285
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,u8 status)4286 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4287 {
4288 struct mgmt_rp_add_adv_patterns_monitor rp;
4289 struct mgmt_pending_cmd *cmd;
4290 struct adv_monitor *monitor;
4291 int err = 0;
4292
4293 hci_dev_lock(hdev);
4294
4295 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4296 if (!cmd) {
4297 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4298 if (!cmd)
4299 goto done;
4300 }
4301
4302 monitor = cmd->user_data;
4303 rp.monitor_handle = cpu_to_le16(monitor->handle);
4304
4305 if (!status) {
4306 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4307 hdev->adv_monitors_cnt++;
4308 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4309 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4310 hci_update_background_scan(hdev);
4311 }
4312
4313 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4314 mgmt_status(status), &rp, sizeof(rp));
4315 mgmt_pending_remove(cmd);
4316
4317 done:
4318 hci_dev_unlock(hdev);
4319 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4320 rp.monitor_handle, status);
4321
4322 return err;
4323 }
4324
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)4325 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4326 struct adv_monitor *m, u8 status,
4327 void *data, u16 len, u16 op)
4328 {
4329 struct mgmt_rp_add_adv_patterns_monitor rp;
4330 struct mgmt_pending_cmd *cmd;
4331 int err;
4332 bool pending;
4333
4334 hci_dev_lock(hdev);
4335
4336 if (status)
4337 goto unlock;
4338
4339 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4340 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4341 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4342 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4343 status = MGMT_STATUS_BUSY;
4344 goto unlock;
4345 }
4346
4347 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4348 if (!cmd) {
4349 status = MGMT_STATUS_NO_RESOURCES;
4350 goto unlock;
4351 }
4352
4353 cmd->user_data = m;
4354 pending = hci_add_adv_monitor(hdev, m, &err);
4355 if (err) {
4356 if (err == -ENOSPC || err == -ENOMEM)
4357 status = MGMT_STATUS_NO_RESOURCES;
4358 else if (err == -EINVAL)
4359 status = MGMT_STATUS_INVALID_PARAMS;
4360 else
4361 status = MGMT_STATUS_FAILED;
4362
4363 mgmt_pending_remove(cmd);
4364 goto unlock;
4365 }
4366
4367 if (!pending) {
4368 mgmt_pending_remove(cmd);
4369 rp.monitor_handle = cpu_to_le16(m->handle);
4370 mgmt_adv_monitor_added(sk, hdev, m->handle);
4371 m->state = ADV_MONITOR_STATE_REGISTERED;
4372 hdev->adv_monitors_cnt++;
4373
4374 hci_dev_unlock(hdev);
4375 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4376 &rp, sizeof(rp));
4377 }
4378
4379 hci_dev_unlock(hdev);
4380
4381 return 0;
4382
4383 unlock:
4384 hci_free_adv_monitor(hdev, m);
4385 hci_dev_unlock(hdev);
4386 return mgmt_cmd_status(sk, hdev->id, op, status);
4387 }
4388
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)4389 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4390 struct mgmt_adv_rssi_thresholds *rssi)
4391 {
4392 if (rssi) {
4393 m->rssi.low_threshold = rssi->low_threshold;
4394 m->rssi.low_threshold_timeout =
4395 __le16_to_cpu(rssi->low_threshold_timeout);
4396 m->rssi.high_threshold = rssi->high_threshold;
4397 m->rssi.high_threshold_timeout =
4398 __le16_to_cpu(rssi->high_threshold_timeout);
4399 m->rssi.sampling_period = rssi->sampling_period;
4400 } else {
4401 /* Default values. These numbers are the least constricting
4402 * parameters for MSFT API to work, so it behaves as if there
4403 * are no rssi parameter to consider. May need to be changed
4404 * if other API are to be supported.
4405 */
4406 m->rssi.low_threshold = -127;
4407 m->rssi.low_threshold_timeout = 60;
4408 m->rssi.high_threshold = -127;
4409 m->rssi.high_threshold_timeout = 0;
4410 m->rssi.sampling_period = 0;
4411 }
4412 }
4413
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)4414 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4415 struct mgmt_adv_pattern *patterns)
4416 {
4417 u8 offset = 0, length = 0;
4418 struct adv_pattern *p = NULL;
4419 int i;
4420
4421 for (i = 0; i < pattern_count; i++) {
4422 offset = patterns[i].offset;
4423 length = patterns[i].length;
4424 if (offset >= HCI_MAX_AD_LENGTH ||
4425 length > HCI_MAX_AD_LENGTH ||
4426 (offset + length) > HCI_MAX_AD_LENGTH)
4427 return MGMT_STATUS_INVALID_PARAMS;
4428
4429 p = kmalloc(sizeof(*p), GFP_KERNEL);
4430 if (!p)
4431 return MGMT_STATUS_NO_RESOURCES;
4432
4433 p->ad_type = patterns[i].ad_type;
4434 p->offset = patterns[i].offset;
4435 p->length = patterns[i].length;
4436 memcpy(p->value, patterns[i].value, p->length);
4437
4438 INIT_LIST_HEAD(&p->list);
4439 list_add(&p->list, &m->patterns);
4440 }
4441
4442 return MGMT_STATUS_SUCCESS;
4443 }
4444
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4445 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4446 void *data, u16 len)
4447 {
4448 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4449 struct adv_monitor *m = NULL;
4450 u8 status = MGMT_STATUS_SUCCESS;
4451 size_t expected_size = sizeof(*cp);
4452
4453 BT_DBG("request for %s", hdev->name);
4454
4455 if (len <= sizeof(*cp)) {
4456 status = MGMT_STATUS_INVALID_PARAMS;
4457 goto done;
4458 }
4459
4460 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4461 if (len != expected_size) {
4462 status = MGMT_STATUS_INVALID_PARAMS;
4463 goto done;
4464 }
4465
4466 m = kzalloc(sizeof(*m), GFP_KERNEL);
4467 if (!m) {
4468 status = MGMT_STATUS_NO_RESOURCES;
4469 goto done;
4470 }
4471
4472 INIT_LIST_HEAD(&m->patterns);
4473
4474 parse_adv_monitor_rssi(m, NULL);
4475 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4476
4477 done:
4478 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4479 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4480 }
4481
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4482 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4483 void *data, u16 len)
4484 {
4485 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4486 struct adv_monitor *m = NULL;
4487 u8 status = MGMT_STATUS_SUCCESS;
4488 size_t expected_size = sizeof(*cp);
4489
4490 BT_DBG("request for %s", hdev->name);
4491
4492 if (len <= sizeof(*cp)) {
4493 status = MGMT_STATUS_INVALID_PARAMS;
4494 goto done;
4495 }
4496
4497 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4498 if (len != expected_size) {
4499 status = MGMT_STATUS_INVALID_PARAMS;
4500 goto done;
4501 }
4502
4503 m = kzalloc(sizeof(*m), GFP_KERNEL);
4504 if (!m) {
4505 status = MGMT_STATUS_NO_RESOURCES;
4506 goto done;
4507 }
4508
4509 INIT_LIST_HEAD(&m->patterns);
4510
4511 parse_adv_monitor_rssi(m, &cp->rssi);
4512 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4513
4514 done:
4515 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4516 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4517 }
4518
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,u8 status)4519 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4520 {
4521 struct mgmt_rp_remove_adv_monitor rp;
4522 struct mgmt_cp_remove_adv_monitor *cp;
4523 struct mgmt_pending_cmd *cmd;
4524 int err = 0;
4525
4526 hci_dev_lock(hdev);
4527
4528 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4529 if (!cmd)
4530 goto done;
4531
4532 cp = cmd->param;
4533 rp.monitor_handle = cp->monitor_handle;
4534
4535 if (!status)
4536 hci_update_background_scan(hdev);
4537
4538 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4539 mgmt_status(status), &rp, sizeof(rp));
4540 mgmt_pending_remove(cmd);
4541
4542 done:
4543 hci_dev_unlock(hdev);
4544 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4545 rp.monitor_handle, status);
4546
4547 return err;
4548 }
4549
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4550 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4551 void *data, u16 len)
4552 {
4553 struct mgmt_cp_remove_adv_monitor *cp = data;
4554 struct mgmt_rp_remove_adv_monitor rp;
4555 struct mgmt_pending_cmd *cmd;
4556 u16 handle = __le16_to_cpu(cp->monitor_handle);
4557 int err, status;
4558 bool pending;
4559
4560 BT_DBG("request for %s", hdev->name);
4561 rp.monitor_handle = cp->monitor_handle;
4562
4563 hci_dev_lock(hdev);
4564
4565 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4566 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4567 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4568 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4569 status = MGMT_STATUS_BUSY;
4570 goto unlock;
4571 }
4572
4573 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4574 if (!cmd) {
4575 status = MGMT_STATUS_NO_RESOURCES;
4576 goto unlock;
4577 }
4578
4579 if (handle)
4580 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4581 else
4582 pending = hci_remove_all_adv_monitor(hdev, &err);
4583
4584 if (err) {
4585 mgmt_pending_remove(cmd);
4586
4587 if (err == -ENOENT)
4588 status = MGMT_STATUS_INVALID_INDEX;
4589 else
4590 status = MGMT_STATUS_FAILED;
4591
4592 goto unlock;
4593 }
4594
4595 /* monitor can be removed without forwarding request to controller */
4596 if (!pending) {
4597 mgmt_pending_remove(cmd);
4598 hci_dev_unlock(hdev);
4599
4600 return mgmt_cmd_complete(sk, hdev->id,
4601 MGMT_OP_REMOVE_ADV_MONITOR,
4602 MGMT_STATUS_SUCCESS,
4603 &rp, sizeof(rp));
4604 }
4605
4606 hci_dev_unlock(hdev);
4607 return 0;
4608
4609 unlock:
4610 hci_dev_unlock(hdev);
4611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4612 status);
4613 }
4614
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)4615 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4616 u16 opcode, struct sk_buff *skb)
4617 {
4618 struct mgmt_rp_read_local_oob_data mgmt_rp;
4619 size_t rp_size = sizeof(mgmt_rp);
4620 struct mgmt_pending_cmd *cmd;
4621
4622 bt_dev_dbg(hdev, "status %u", status);
4623
4624 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4625 if (!cmd)
4626 return;
4627
4628 if (status || !skb) {
4629 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4630 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4631 goto remove;
4632 }
4633
4634 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4635
4636 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4637 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4638
4639 if (skb->len < sizeof(*rp)) {
4640 mgmt_cmd_status(cmd->sk, hdev->id,
4641 MGMT_OP_READ_LOCAL_OOB_DATA,
4642 MGMT_STATUS_FAILED);
4643 goto remove;
4644 }
4645
4646 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4647 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4648
4649 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4650 } else {
4651 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4652
4653 if (skb->len < sizeof(*rp)) {
4654 mgmt_cmd_status(cmd->sk, hdev->id,
4655 MGMT_OP_READ_LOCAL_OOB_DATA,
4656 MGMT_STATUS_FAILED);
4657 goto remove;
4658 }
4659
4660 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4661 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4662
4663 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4664 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4665 }
4666
4667 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4668 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4669
4670 remove:
4671 mgmt_pending_remove(cmd);
4672 }
4673
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4674 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4675 void *data, u16 data_len)
4676 {
4677 struct mgmt_pending_cmd *cmd;
4678 struct hci_request req;
4679 int err;
4680
4681 bt_dev_dbg(hdev, "sock %p", sk);
4682
4683 hci_dev_lock(hdev);
4684
4685 if (!hdev_is_powered(hdev)) {
4686 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4687 MGMT_STATUS_NOT_POWERED);
4688 goto unlock;
4689 }
4690
4691 if (!lmp_ssp_capable(hdev)) {
4692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4693 MGMT_STATUS_NOT_SUPPORTED);
4694 goto unlock;
4695 }
4696
4697 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4699 MGMT_STATUS_BUSY);
4700 goto unlock;
4701 }
4702
4703 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4704 if (!cmd) {
4705 err = -ENOMEM;
4706 goto unlock;
4707 }
4708
4709 hci_req_init(&req, hdev);
4710
4711 if (bredr_sc_enabled(hdev))
4712 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4713 else
4714 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4715
4716 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4717 if (err < 0)
4718 mgmt_pending_remove(cmd);
4719
4720 unlock:
4721 hci_dev_unlock(hdev);
4722 return err;
4723 }
4724
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4725 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4726 void *data, u16 len)
4727 {
4728 struct mgmt_addr_info *addr = data;
4729 int err;
4730
4731 bt_dev_dbg(hdev, "sock %p", sk);
4732
4733 if (!bdaddr_type_is_valid(addr->type))
4734 return mgmt_cmd_complete(sk, hdev->id,
4735 MGMT_OP_ADD_REMOTE_OOB_DATA,
4736 MGMT_STATUS_INVALID_PARAMS,
4737 addr, sizeof(*addr));
4738
4739 hci_dev_lock(hdev);
4740
4741 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4742 struct mgmt_cp_add_remote_oob_data *cp = data;
4743 u8 status;
4744
4745 if (cp->addr.type != BDADDR_BREDR) {
4746 err = mgmt_cmd_complete(sk, hdev->id,
4747 MGMT_OP_ADD_REMOTE_OOB_DATA,
4748 MGMT_STATUS_INVALID_PARAMS,
4749 &cp->addr, sizeof(cp->addr));
4750 goto unlock;
4751 }
4752
4753 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4754 cp->addr.type, cp->hash,
4755 cp->rand, NULL, NULL);
4756 if (err < 0)
4757 status = MGMT_STATUS_FAILED;
4758 else
4759 status = MGMT_STATUS_SUCCESS;
4760
4761 err = mgmt_cmd_complete(sk, hdev->id,
4762 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4763 &cp->addr, sizeof(cp->addr));
4764 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4765 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4766 u8 *rand192, *hash192, *rand256, *hash256;
4767 u8 status;
4768
4769 if (bdaddr_type_is_le(cp->addr.type)) {
4770 /* Enforce zero-valued 192-bit parameters as
4771 * long as legacy SMP OOB isn't implemented.
4772 */
4773 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4774 memcmp(cp->hash192, ZERO_KEY, 16)) {
4775 err = mgmt_cmd_complete(sk, hdev->id,
4776 MGMT_OP_ADD_REMOTE_OOB_DATA,
4777 MGMT_STATUS_INVALID_PARAMS,
4778 addr, sizeof(*addr));
4779 goto unlock;
4780 }
4781
4782 rand192 = NULL;
4783 hash192 = NULL;
4784 } else {
4785 /* In case one of the P-192 values is set to zero,
4786 * then just disable OOB data for P-192.
4787 */
4788 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4789 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4790 rand192 = NULL;
4791 hash192 = NULL;
4792 } else {
4793 rand192 = cp->rand192;
4794 hash192 = cp->hash192;
4795 }
4796 }
4797
4798 /* In case one of the P-256 values is set to zero, then just
4799 * disable OOB data for P-256.
4800 */
4801 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4802 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4803 rand256 = NULL;
4804 hash256 = NULL;
4805 } else {
4806 rand256 = cp->rand256;
4807 hash256 = cp->hash256;
4808 }
4809
4810 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4811 cp->addr.type, hash192, rand192,
4812 hash256, rand256);
4813 if (err < 0)
4814 status = MGMT_STATUS_FAILED;
4815 else
4816 status = MGMT_STATUS_SUCCESS;
4817
4818 err = mgmt_cmd_complete(sk, hdev->id,
4819 MGMT_OP_ADD_REMOTE_OOB_DATA,
4820 status, &cp->addr, sizeof(cp->addr));
4821 } else {
4822 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4823 len);
4824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4825 MGMT_STATUS_INVALID_PARAMS);
4826 }
4827
4828 unlock:
4829 hci_dev_unlock(hdev);
4830 return err;
4831 }
4832
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4833 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4834 void *data, u16 len)
4835 {
4836 struct mgmt_cp_remove_remote_oob_data *cp = data;
4837 u8 status;
4838 int err;
4839
4840 bt_dev_dbg(hdev, "sock %p", sk);
4841
4842 if (cp->addr.type != BDADDR_BREDR)
4843 return mgmt_cmd_complete(sk, hdev->id,
4844 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4845 MGMT_STATUS_INVALID_PARAMS,
4846 &cp->addr, sizeof(cp->addr));
4847
4848 hci_dev_lock(hdev);
4849
4850 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4851 hci_remote_oob_data_clear(hdev);
4852 status = MGMT_STATUS_SUCCESS;
4853 goto done;
4854 }
4855
4856 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4857 if (err < 0)
4858 status = MGMT_STATUS_INVALID_PARAMS;
4859 else
4860 status = MGMT_STATUS_SUCCESS;
4861
4862 done:
4863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4864 status, &cp->addr, sizeof(cp->addr));
4865
4866 hci_dev_unlock(hdev);
4867 return err;
4868 }
4869
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)4870 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4871 {
4872 struct mgmt_pending_cmd *cmd;
4873
4874 bt_dev_dbg(hdev, "status %u", status);
4875
4876 hci_dev_lock(hdev);
4877
4878 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4879 if (!cmd)
4880 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4881
4882 if (!cmd)
4883 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4884
4885 if (cmd) {
4886 cmd->cmd_complete(cmd, mgmt_status(status));
4887 mgmt_pending_remove(cmd);
4888 }
4889
4890 hci_dev_unlock(hdev);
4891
4892 /* Handle suspend notifier */
4893 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4894 hdev->suspend_tasks)) {
4895 bt_dev_dbg(hdev, "Unpaused discovery");
4896 wake_up(&hdev->suspend_wait_q);
4897 }
4898 }
4899
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)4900 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4901 uint8_t *mgmt_status)
4902 {
4903 switch (type) {
4904 case DISCOV_TYPE_LE:
4905 *mgmt_status = mgmt_le_support(hdev);
4906 if (*mgmt_status)
4907 return false;
4908 break;
4909 case DISCOV_TYPE_INTERLEAVED:
4910 *mgmt_status = mgmt_le_support(hdev);
4911 if (*mgmt_status)
4912 return false;
4913 fallthrough;
4914 case DISCOV_TYPE_BREDR:
4915 *mgmt_status = mgmt_bredr_support(hdev);
4916 if (*mgmt_status)
4917 return false;
4918 break;
4919 default:
4920 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4921 return false;
4922 }
4923
4924 return true;
4925 }
4926
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)4927 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4928 u16 op, void *data, u16 len)
4929 {
4930 struct mgmt_cp_start_discovery *cp = data;
4931 struct mgmt_pending_cmd *cmd;
4932 u8 status;
4933 int err;
4934
4935 bt_dev_dbg(hdev, "sock %p", sk);
4936
4937 hci_dev_lock(hdev);
4938
4939 if (!hdev_is_powered(hdev)) {
4940 err = mgmt_cmd_complete(sk, hdev->id, op,
4941 MGMT_STATUS_NOT_POWERED,
4942 &cp->type, sizeof(cp->type));
4943 goto failed;
4944 }
4945
4946 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4947 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4948 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4949 &cp->type, sizeof(cp->type));
4950 goto failed;
4951 }
4952
4953 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4954 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4955 &cp->type, sizeof(cp->type));
4956 goto failed;
4957 }
4958
4959 /* Can't start discovery when it is paused */
4960 if (hdev->discovery_paused) {
4961 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4962 &cp->type, sizeof(cp->type));
4963 goto failed;
4964 }
4965
4966 /* Clear the discovery filter first to free any previously
4967 * allocated memory for the UUID list.
4968 */
4969 hci_discovery_filter_clear(hdev);
4970
4971 hdev->discovery.type = cp->type;
4972 hdev->discovery.report_invalid_rssi = false;
4973 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4974 hdev->discovery.limited = true;
4975 else
4976 hdev->discovery.limited = false;
4977
4978 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4979 if (!cmd) {
4980 err = -ENOMEM;
4981 goto failed;
4982 }
4983
4984 cmd->cmd_complete = generic_cmd_complete;
4985
4986 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4987 queue_work(hdev->req_workqueue, &hdev->discov_update);
4988 err = 0;
4989
4990 failed:
4991 hci_dev_unlock(hdev);
4992 return err;
4993 }
4994
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4995 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4996 void *data, u16 len)
4997 {
4998 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4999 data, len);
5000 }
5001
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5002 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5003 void *data, u16 len)
5004 {
5005 return start_discovery_internal(sk, hdev,
5006 MGMT_OP_START_LIMITED_DISCOVERY,
5007 data, len);
5008 }
5009
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)5010 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5011 u8 status)
5012 {
5013 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5014 cmd->param, 1);
5015 }
5016
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5017 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5018 void *data, u16 len)
5019 {
5020 struct mgmt_cp_start_service_discovery *cp = data;
5021 struct mgmt_pending_cmd *cmd;
5022 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5023 u16 uuid_count, expected_len;
5024 u8 status;
5025 int err;
5026
5027 bt_dev_dbg(hdev, "sock %p", sk);
5028
5029 hci_dev_lock(hdev);
5030
5031 if (!hdev_is_powered(hdev)) {
5032 err = mgmt_cmd_complete(sk, hdev->id,
5033 MGMT_OP_START_SERVICE_DISCOVERY,
5034 MGMT_STATUS_NOT_POWERED,
5035 &cp->type, sizeof(cp->type));
5036 goto failed;
5037 }
5038
5039 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5040 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5041 err = mgmt_cmd_complete(sk, hdev->id,
5042 MGMT_OP_START_SERVICE_DISCOVERY,
5043 MGMT_STATUS_BUSY, &cp->type,
5044 sizeof(cp->type));
5045 goto failed;
5046 }
5047
5048 if (hdev->discovery_paused) {
5049 err = mgmt_cmd_complete(sk, hdev->id,
5050 MGMT_OP_START_SERVICE_DISCOVERY,
5051 MGMT_STATUS_BUSY, &cp->type,
5052 sizeof(cp->type));
5053 goto failed;
5054 }
5055
5056 uuid_count = __le16_to_cpu(cp->uuid_count);
5057 if (uuid_count > max_uuid_count) {
5058 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5059 uuid_count);
5060 err = mgmt_cmd_complete(sk, hdev->id,
5061 MGMT_OP_START_SERVICE_DISCOVERY,
5062 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5063 sizeof(cp->type));
5064 goto failed;
5065 }
5066
5067 expected_len = sizeof(*cp) + uuid_count * 16;
5068 if (expected_len != len) {
5069 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5070 expected_len, len);
5071 err = mgmt_cmd_complete(sk, hdev->id,
5072 MGMT_OP_START_SERVICE_DISCOVERY,
5073 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5074 sizeof(cp->type));
5075 goto failed;
5076 }
5077
5078 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5079 err = mgmt_cmd_complete(sk, hdev->id,
5080 MGMT_OP_START_SERVICE_DISCOVERY,
5081 status, &cp->type, sizeof(cp->type));
5082 goto failed;
5083 }
5084
5085 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5086 hdev, data, len);
5087 if (!cmd) {
5088 err = -ENOMEM;
5089 goto failed;
5090 }
5091
5092 cmd->cmd_complete = service_discovery_cmd_complete;
5093
5094 /* Clear the discovery filter first to free any previously
5095 * allocated memory for the UUID list.
5096 */
5097 hci_discovery_filter_clear(hdev);
5098
5099 hdev->discovery.result_filtering = true;
5100 hdev->discovery.type = cp->type;
5101 hdev->discovery.rssi = cp->rssi;
5102 hdev->discovery.uuid_count = uuid_count;
5103
5104 if (uuid_count > 0) {
5105 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5106 GFP_KERNEL);
5107 if (!hdev->discovery.uuids) {
5108 err = mgmt_cmd_complete(sk, hdev->id,
5109 MGMT_OP_START_SERVICE_DISCOVERY,
5110 MGMT_STATUS_FAILED,
5111 &cp->type, sizeof(cp->type));
5112 mgmt_pending_remove(cmd);
5113 goto failed;
5114 }
5115 }
5116
5117 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5118 queue_work(hdev->req_workqueue, &hdev->discov_update);
5119 err = 0;
5120
5121 failed:
5122 hci_dev_unlock(hdev);
5123 return err;
5124 }
5125
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)5126 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5127 {
5128 struct mgmt_pending_cmd *cmd;
5129
5130 bt_dev_dbg(hdev, "status %u", status);
5131
5132 hci_dev_lock(hdev);
5133
5134 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5135 if (cmd) {
5136 cmd->cmd_complete(cmd, mgmt_status(status));
5137 mgmt_pending_remove(cmd);
5138 }
5139
5140 hci_dev_unlock(hdev);
5141
5142 /* Handle suspend notifier */
5143 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5144 bt_dev_dbg(hdev, "Paused discovery");
5145 wake_up(&hdev->suspend_wait_q);
5146 }
5147 }
5148
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5149 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5150 u16 len)
5151 {
5152 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5153 struct mgmt_pending_cmd *cmd;
5154 int err;
5155
5156 bt_dev_dbg(hdev, "sock %p", sk);
5157
5158 hci_dev_lock(hdev);
5159
5160 if (!hci_discovery_active(hdev)) {
5161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5162 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5163 sizeof(mgmt_cp->type));
5164 goto unlock;
5165 }
5166
5167 if (hdev->discovery.type != mgmt_cp->type) {
5168 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5169 MGMT_STATUS_INVALID_PARAMS,
5170 &mgmt_cp->type, sizeof(mgmt_cp->type));
5171 goto unlock;
5172 }
5173
5174 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5175 if (!cmd) {
5176 err = -ENOMEM;
5177 goto unlock;
5178 }
5179
5180 cmd->cmd_complete = generic_cmd_complete;
5181
5182 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5183 queue_work(hdev->req_workqueue, &hdev->discov_update);
5184 err = 0;
5185
5186 unlock:
5187 hci_dev_unlock(hdev);
5188 return err;
5189 }
5190
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5191 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5192 u16 len)
5193 {
5194 struct mgmt_cp_confirm_name *cp = data;
5195 struct inquiry_entry *e;
5196 int err;
5197
5198 bt_dev_dbg(hdev, "sock %p", sk);
5199
5200 hci_dev_lock(hdev);
5201
5202 if (!hci_discovery_active(hdev)) {
5203 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5204 MGMT_STATUS_FAILED, &cp->addr,
5205 sizeof(cp->addr));
5206 goto failed;
5207 }
5208
5209 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5210 if (!e) {
5211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5212 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5213 sizeof(cp->addr));
5214 goto failed;
5215 }
5216
5217 if (cp->name_known) {
5218 e->name_state = NAME_KNOWN;
5219 list_del(&e->list);
5220 } else {
5221 e->name_state = NAME_NEEDED;
5222 hci_inquiry_cache_update_resolve(hdev, e);
5223 }
5224
5225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5226 &cp->addr, sizeof(cp->addr));
5227
5228 failed:
5229 hci_dev_unlock(hdev);
5230 return err;
5231 }
5232
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5233 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5234 u16 len)
5235 {
5236 struct mgmt_cp_block_device *cp = data;
5237 u8 status;
5238 int err;
5239
5240 bt_dev_dbg(hdev, "sock %p", sk);
5241
5242 if (!bdaddr_type_is_valid(cp->addr.type))
5243 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5244 MGMT_STATUS_INVALID_PARAMS,
5245 &cp->addr, sizeof(cp->addr));
5246
5247 hci_dev_lock(hdev);
5248
5249 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5250 cp->addr.type);
5251 if (err < 0) {
5252 status = MGMT_STATUS_FAILED;
5253 goto done;
5254 }
5255
5256 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5257 sk);
5258 status = MGMT_STATUS_SUCCESS;
5259
5260 done:
5261 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5262 &cp->addr, sizeof(cp->addr));
5263
5264 hci_dev_unlock(hdev);
5265
5266 return err;
5267 }
5268
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5269 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5270 u16 len)
5271 {
5272 struct mgmt_cp_unblock_device *cp = data;
5273 u8 status;
5274 int err;
5275
5276 bt_dev_dbg(hdev, "sock %p", sk);
5277
5278 if (!bdaddr_type_is_valid(cp->addr.type))
5279 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5280 MGMT_STATUS_INVALID_PARAMS,
5281 &cp->addr, sizeof(cp->addr));
5282
5283 hci_dev_lock(hdev);
5284
5285 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5286 cp->addr.type);
5287 if (err < 0) {
5288 status = MGMT_STATUS_INVALID_PARAMS;
5289 goto done;
5290 }
5291
5292 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5293 sk);
5294 status = MGMT_STATUS_SUCCESS;
5295
5296 done:
5297 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5298 &cp->addr, sizeof(cp->addr));
5299
5300 hci_dev_unlock(hdev);
5301
5302 return err;
5303 }
5304
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5305 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5306 u16 len)
5307 {
5308 struct mgmt_cp_set_device_id *cp = data;
5309 struct hci_request req;
5310 int err;
5311 __u16 source;
5312
5313 bt_dev_dbg(hdev, "sock %p", sk);
5314
5315 source = __le16_to_cpu(cp->source);
5316
5317 if (source > 0x0002)
5318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5319 MGMT_STATUS_INVALID_PARAMS);
5320
5321 hci_dev_lock(hdev);
5322
5323 hdev->devid_source = source;
5324 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5325 hdev->devid_product = __le16_to_cpu(cp->product);
5326 hdev->devid_version = __le16_to_cpu(cp->version);
5327
5328 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5329 NULL, 0);
5330
5331 hci_req_init(&req, hdev);
5332 __hci_req_update_eir(&req);
5333 hci_req_run(&req, NULL);
5334
5335 hci_dev_unlock(hdev);
5336
5337 return err;
5338 }
5339
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)5340 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5341 u16 opcode)
5342 {
5343 bt_dev_dbg(hdev, "status %u", status);
5344 }
5345
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)5346 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5347 u16 opcode)
5348 {
5349 struct cmd_lookup match = { NULL, hdev };
5350 struct hci_request req;
5351 u8 instance;
5352 struct adv_info *adv_instance;
5353 int err;
5354
5355 hci_dev_lock(hdev);
5356
5357 if (status) {
5358 u8 mgmt_err = mgmt_status(status);
5359
5360 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5361 cmd_status_rsp, &mgmt_err);
5362 goto unlock;
5363 }
5364
5365 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5366 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5367 else
5368 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5369
5370 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5371 &match);
5372
5373 new_settings(hdev, match.sk);
5374
5375 if (match.sk)
5376 sock_put(match.sk);
5377
5378 /* Handle suspend notifier */
5379 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5380 hdev->suspend_tasks)) {
5381 bt_dev_dbg(hdev, "Paused advertising");
5382 wake_up(&hdev->suspend_wait_q);
5383 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5384 hdev->suspend_tasks)) {
5385 bt_dev_dbg(hdev, "Unpaused advertising");
5386 wake_up(&hdev->suspend_wait_q);
5387 }
5388
5389 /* If "Set Advertising" was just disabled and instance advertising was
5390 * set up earlier, then re-enable multi-instance advertising.
5391 */
5392 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5393 list_empty(&hdev->adv_instances))
5394 goto unlock;
5395
5396 instance = hdev->cur_adv_instance;
5397 if (!instance) {
5398 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5399 struct adv_info, list);
5400 if (!adv_instance)
5401 goto unlock;
5402
5403 instance = adv_instance->instance;
5404 }
5405
5406 hci_req_init(&req, hdev);
5407
5408 err = __hci_req_schedule_adv_instance(&req, instance, true);
5409
5410 if (!err)
5411 err = hci_req_run(&req, enable_advertising_instance);
5412
5413 if (err)
5414 bt_dev_err(hdev, "failed to re-configure advertising");
5415
5416 unlock:
5417 hci_dev_unlock(hdev);
5418 }
5419
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5420 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5421 u16 len)
5422 {
5423 struct mgmt_mode *cp = data;
5424 struct mgmt_pending_cmd *cmd;
5425 struct hci_request req;
5426 u8 val, status;
5427 int err;
5428
5429 bt_dev_dbg(hdev, "sock %p", sk);
5430
5431 status = mgmt_le_support(hdev);
5432 if (status)
5433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5434 status);
5435
5436 /* Enabling the experimental LL Privay support disables support for
5437 * advertising.
5438 */
5439 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5441 MGMT_STATUS_NOT_SUPPORTED);
5442
5443 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5445 MGMT_STATUS_INVALID_PARAMS);
5446
5447 if (hdev->advertising_paused)
5448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5449 MGMT_STATUS_BUSY);
5450
5451 hci_dev_lock(hdev);
5452
5453 val = !!cp->val;
5454
5455 /* The following conditions are ones which mean that we should
5456 * not do any HCI communication but directly send a mgmt
5457 * response to user space (after toggling the flag if
5458 * necessary).
5459 */
5460 if (!hdev_is_powered(hdev) ||
5461 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5462 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5463 hci_conn_num(hdev, LE_LINK) > 0 ||
5464 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5465 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5466 bool changed;
5467
5468 if (cp->val) {
5469 hdev->cur_adv_instance = 0x00;
5470 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5471 if (cp->val == 0x02)
5472 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5473 else
5474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5475 } else {
5476 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5478 }
5479
5480 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5481 if (err < 0)
5482 goto unlock;
5483
5484 if (changed)
5485 err = new_settings(hdev, sk);
5486
5487 goto unlock;
5488 }
5489
5490 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5491 pending_find(MGMT_OP_SET_LE, hdev)) {
5492 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5493 MGMT_STATUS_BUSY);
5494 goto unlock;
5495 }
5496
5497 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5498 if (!cmd) {
5499 err = -ENOMEM;
5500 goto unlock;
5501 }
5502
5503 hci_req_init(&req, hdev);
5504
5505 if (cp->val == 0x02)
5506 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5507 else
5508 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5509
5510 cancel_adv_timeout(hdev);
5511
5512 if (val) {
5513 /* Switch to instance "0" for the Set Advertising setting.
5514 * We cannot use update_[adv|scan_rsp]_data() here as the
5515 * HCI_ADVERTISING flag is not yet set.
5516 */
5517 hdev->cur_adv_instance = 0x00;
5518
5519 if (ext_adv_capable(hdev)) {
5520 __hci_req_start_ext_adv(&req, 0x00);
5521 } else {
5522 __hci_req_update_adv_data(&req, 0x00);
5523 __hci_req_update_scan_rsp_data(&req, 0x00);
5524 __hci_req_enable_advertising(&req);
5525 }
5526 } else {
5527 __hci_req_disable_advertising(&req);
5528 }
5529
5530 err = hci_req_run(&req, set_advertising_complete);
5531 if (err < 0)
5532 mgmt_pending_remove(cmd);
5533
5534 unlock:
5535 hci_dev_unlock(hdev);
5536 return err;
5537 }
5538
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5539 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5540 void *data, u16 len)
5541 {
5542 struct mgmt_cp_set_static_address *cp = data;
5543 int err;
5544
5545 bt_dev_dbg(hdev, "sock %p", sk);
5546
5547 if (!lmp_le_capable(hdev))
5548 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5549 MGMT_STATUS_NOT_SUPPORTED);
5550
5551 if (hdev_is_powered(hdev))
5552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5553 MGMT_STATUS_REJECTED);
5554
5555 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5556 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5557 return mgmt_cmd_status(sk, hdev->id,
5558 MGMT_OP_SET_STATIC_ADDRESS,
5559 MGMT_STATUS_INVALID_PARAMS);
5560
5561 /* Two most significant bits shall be set */
5562 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5563 return mgmt_cmd_status(sk, hdev->id,
5564 MGMT_OP_SET_STATIC_ADDRESS,
5565 MGMT_STATUS_INVALID_PARAMS);
5566 }
5567
5568 hci_dev_lock(hdev);
5569
5570 bacpy(&hdev->static_addr, &cp->bdaddr);
5571
5572 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5573 if (err < 0)
5574 goto unlock;
5575
5576 err = new_settings(hdev, sk);
5577
5578 unlock:
5579 hci_dev_unlock(hdev);
5580 return err;
5581 }
5582
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5583 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5584 void *data, u16 len)
5585 {
5586 struct mgmt_cp_set_scan_params *cp = data;
5587 __u16 interval, window;
5588 int err;
5589
5590 bt_dev_dbg(hdev, "sock %p", sk);
5591
5592 if (!lmp_le_capable(hdev))
5593 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5594 MGMT_STATUS_NOT_SUPPORTED);
5595
5596 interval = __le16_to_cpu(cp->interval);
5597
5598 if (interval < 0x0004 || interval > 0x4000)
5599 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5600 MGMT_STATUS_INVALID_PARAMS);
5601
5602 window = __le16_to_cpu(cp->window);
5603
5604 if (window < 0x0004 || window > 0x4000)
5605 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5606 MGMT_STATUS_INVALID_PARAMS);
5607
5608 if (window > interval)
5609 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5610 MGMT_STATUS_INVALID_PARAMS);
5611
5612 hci_dev_lock(hdev);
5613
5614 hdev->le_scan_interval = interval;
5615 hdev->le_scan_window = window;
5616
5617 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5618 NULL, 0);
5619
5620 /* If background scan is running, restart it so new parameters are
5621 * loaded.
5622 */
5623 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5624 hdev->discovery.state == DISCOVERY_STOPPED) {
5625 struct hci_request req;
5626
5627 hci_req_init(&req, hdev);
5628
5629 hci_req_add_le_scan_disable(&req, false);
5630 hci_req_add_le_passive_scan(&req);
5631
5632 hci_req_run(&req, NULL);
5633 }
5634
5635 hci_dev_unlock(hdev);
5636
5637 return err;
5638 }
5639
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5640 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5641 u16 opcode)
5642 {
5643 struct mgmt_pending_cmd *cmd;
5644
5645 bt_dev_dbg(hdev, "status 0x%02x", status);
5646
5647 hci_dev_lock(hdev);
5648
5649 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5650 if (!cmd)
5651 goto unlock;
5652
5653 if (status) {
5654 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5655 mgmt_status(status));
5656 } else {
5657 struct mgmt_mode *cp = cmd->param;
5658
5659 if (cp->val)
5660 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5661 else
5662 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5663
5664 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5665 new_settings(hdev, cmd->sk);
5666 }
5667
5668 mgmt_pending_remove(cmd);
5669
5670 unlock:
5671 hci_dev_unlock(hdev);
5672 }
5673
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5674 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5675 void *data, u16 len)
5676 {
5677 struct mgmt_mode *cp = data;
5678 struct mgmt_pending_cmd *cmd;
5679 struct hci_request req;
5680 int err;
5681
5682 bt_dev_dbg(hdev, "sock %p", sk);
5683
5684 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5685 hdev->hci_ver < BLUETOOTH_VER_1_2)
5686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5687 MGMT_STATUS_NOT_SUPPORTED);
5688
5689 if (cp->val != 0x00 && cp->val != 0x01)
5690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5691 MGMT_STATUS_INVALID_PARAMS);
5692
5693 hci_dev_lock(hdev);
5694
5695 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5697 MGMT_STATUS_BUSY);
5698 goto unlock;
5699 }
5700
5701 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5702 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5703 hdev);
5704 goto unlock;
5705 }
5706
5707 if (!hdev_is_powered(hdev)) {
5708 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5709 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 hdev);
5711 new_settings(hdev, sk);
5712 goto unlock;
5713 }
5714
5715 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5716 data, len);
5717 if (!cmd) {
5718 err = -ENOMEM;
5719 goto unlock;
5720 }
5721
5722 hci_req_init(&req, hdev);
5723
5724 __hci_req_write_fast_connectable(&req, cp->val);
5725
5726 err = hci_req_run(&req, fast_connectable_complete);
5727 if (err < 0) {
5728 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5729 MGMT_STATUS_FAILED);
5730 mgmt_pending_remove(cmd);
5731 }
5732
5733 unlock:
5734 hci_dev_unlock(hdev);
5735
5736 return err;
5737 }
5738
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5739 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5740 {
5741 struct mgmt_pending_cmd *cmd;
5742
5743 bt_dev_dbg(hdev, "status 0x%02x", status);
5744
5745 hci_dev_lock(hdev);
5746
5747 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5748 if (!cmd)
5749 goto unlock;
5750
5751 if (status) {
5752 u8 mgmt_err = mgmt_status(status);
5753
5754 /* We need to restore the flag if related HCI commands
5755 * failed.
5756 */
5757 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5758
5759 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5760 } else {
5761 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5762 new_settings(hdev, cmd->sk);
5763 }
5764
5765 mgmt_pending_remove(cmd);
5766
5767 unlock:
5768 hci_dev_unlock(hdev);
5769 }
5770
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5771 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5772 {
5773 struct mgmt_mode *cp = data;
5774 struct mgmt_pending_cmd *cmd;
5775 struct hci_request req;
5776 int err;
5777
5778 bt_dev_dbg(hdev, "sock %p", sk);
5779
5780 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5782 MGMT_STATUS_NOT_SUPPORTED);
5783
5784 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5785 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5786 MGMT_STATUS_REJECTED);
5787
5788 if (cp->val != 0x00 && cp->val != 0x01)
5789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5790 MGMT_STATUS_INVALID_PARAMS);
5791
5792 hci_dev_lock(hdev);
5793
5794 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5795 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5796 goto unlock;
5797 }
5798
5799 if (!hdev_is_powered(hdev)) {
5800 if (!cp->val) {
5801 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5802 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5803 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5804 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5805 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5806 }
5807
5808 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5809
5810 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5811 if (err < 0)
5812 goto unlock;
5813
5814 err = new_settings(hdev, sk);
5815 goto unlock;
5816 }
5817
5818 /* Reject disabling when powered on */
5819 if (!cp->val) {
5820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5821 MGMT_STATUS_REJECTED);
5822 goto unlock;
5823 } else {
5824 /* When configuring a dual-mode controller to operate
5825 * with LE only and using a static address, then switching
5826 * BR/EDR back on is not allowed.
5827 *
5828 * Dual-mode controllers shall operate with the public
5829 * address as its identity address for BR/EDR and LE. So
5830 * reject the attempt to create an invalid configuration.
5831 *
5832 * The same restrictions applies when secure connections
5833 * has been enabled. For BR/EDR this is a controller feature
5834 * while for LE it is a host stack feature. This means that
5835 * switching BR/EDR back on when secure connections has been
5836 * enabled is not a supported transaction.
5837 */
5838 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5839 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5840 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5841 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5842 MGMT_STATUS_REJECTED);
5843 goto unlock;
5844 }
5845 }
5846
5847 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5849 MGMT_STATUS_BUSY);
5850 goto unlock;
5851 }
5852
5853 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5854 if (!cmd) {
5855 err = -ENOMEM;
5856 goto unlock;
5857 }
5858
5859 /* We need to flip the bit already here so that
5860 * hci_req_update_adv_data generates the correct flags.
5861 */
5862 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5863
5864 hci_req_init(&req, hdev);
5865
5866 __hci_req_write_fast_connectable(&req, false);
5867 __hci_req_update_scan(&req);
5868
5869 /* Since only the advertising data flags will change, there
5870 * is no need to update the scan response data.
5871 */
5872 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5873
5874 err = hci_req_run(&req, set_bredr_complete);
5875 if (err < 0)
5876 mgmt_pending_remove(cmd);
5877
5878 unlock:
5879 hci_dev_unlock(hdev);
5880 return err;
5881 }
5882
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5883 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5884 {
5885 struct mgmt_pending_cmd *cmd;
5886 struct mgmt_mode *cp;
5887
5888 bt_dev_dbg(hdev, "status %u", status);
5889
5890 hci_dev_lock(hdev);
5891
5892 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5893 if (!cmd)
5894 goto unlock;
5895
5896 if (status) {
5897 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5898 mgmt_status(status));
5899 goto remove;
5900 }
5901
5902 cp = cmd->param;
5903
5904 switch (cp->val) {
5905 case 0x00:
5906 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5907 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5908 break;
5909 case 0x01:
5910 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5911 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5912 break;
5913 case 0x02:
5914 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5915 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5916 break;
5917 }
5918
5919 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5920 new_settings(hdev, cmd->sk);
5921
5922 remove:
5923 mgmt_pending_remove(cmd);
5924 unlock:
5925 hci_dev_unlock(hdev);
5926 }
5927
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5928 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5929 void *data, u16 len)
5930 {
5931 struct mgmt_mode *cp = data;
5932 struct mgmt_pending_cmd *cmd;
5933 struct hci_request req;
5934 u8 val;
5935 int err;
5936
5937 bt_dev_dbg(hdev, "sock %p", sk);
5938
5939 if (!lmp_sc_capable(hdev) &&
5940 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5942 MGMT_STATUS_NOT_SUPPORTED);
5943
5944 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5945 lmp_sc_capable(hdev) &&
5946 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5948 MGMT_STATUS_REJECTED);
5949
5950 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5951 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5952 MGMT_STATUS_INVALID_PARAMS);
5953
5954 hci_dev_lock(hdev);
5955
5956 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5957 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5958 bool changed;
5959
5960 if (cp->val) {
5961 changed = !hci_dev_test_and_set_flag(hdev,
5962 HCI_SC_ENABLED);
5963 if (cp->val == 0x02)
5964 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5965 else
5966 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5967 } else {
5968 changed = hci_dev_test_and_clear_flag(hdev,
5969 HCI_SC_ENABLED);
5970 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5971 }
5972
5973 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5974 if (err < 0)
5975 goto failed;
5976
5977 if (changed)
5978 err = new_settings(hdev, sk);
5979
5980 goto failed;
5981 }
5982
5983 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5984 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5985 MGMT_STATUS_BUSY);
5986 goto failed;
5987 }
5988
5989 val = !!cp->val;
5990
5991 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5992 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5993 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5994 goto failed;
5995 }
5996
5997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5998 if (!cmd) {
5999 err = -ENOMEM;
6000 goto failed;
6001 }
6002
6003 hci_req_init(&req, hdev);
6004 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6005 err = hci_req_run(&req, sc_enable_complete);
6006 if (err < 0) {
6007 mgmt_pending_remove(cmd);
6008 goto failed;
6009 }
6010
6011 failed:
6012 hci_dev_unlock(hdev);
6013 return err;
6014 }
6015
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6016 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6017 void *data, u16 len)
6018 {
6019 struct mgmt_mode *cp = data;
6020 bool changed, use_changed;
6021 int err;
6022
6023 bt_dev_dbg(hdev, "sock %p", sk);
6024
6025 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6027 MGMT_STATUS_INVALID_PARAMS);
6028
6029 hci_dev_lock(hdev);
6030
6031 if (cp->val)
6032 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6033 else
6034 changed = hci_dev_test_and_clear_flag(hdev,
6035 HCI_KEEP_DEBUG_KEYS);
6036
6037 if (cp->val == 0x02)
6038 use_changed = !hci_dev_test_and_set_flag(hdev,
6039 HCI_USE_DEBUG_KEYS);
6040 else
6041 use_changed = hci_dev_test_and_clear_flag(hdev,
6042 HCI_USE_DEBUG_KEYS);
6043
6044 if (hdev_is_powered(hdev) && use_changed &&
6045 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6046 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6047 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6048 sizeof(mode), &mode);
6049 }
6050
6051 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6052 if (err < 0)
6053 goto unlock;
6054
6055 if (changed)
6056 err = new_settings(hdev, sk);
6057
6058 unlock:
6059 hci_dev_unlock(hdev);
6060 return err;
6061 }
6062
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6063 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6064 u16 len)
6065 {
6066 struct mgmt_cp_set_privacy *cp = cp_data;
6067 bool changed;
6068 int err;
6069
6070 bt_dev_dbg(hdev, "sock %p", sk);
6071
6072 if (!lmp_le_capable(hdev))
6073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6074 MGMT_STATUS_NOT_SUPPORTED);
6075
6076 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6078 MGMT_STATUS_INVALID_PARAMS);
6079
6080 if (hdev_is_powered(hdev))
6081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6082 MGMT_STATUS_REJECTED);
6083
6084 hci_dev_lock(hdev);
6085
6086 /* If user space supports this command it is also expected to
6087 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6088 */
6089 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6090
6091 if (cp->privacy) {
6092 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6093 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6094 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6095 hci_adv_instances_set_rpa_expired(hdev, true);
6096 if (cp->privacy == 0x02)
6097 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6098 else
6099 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6100 } else {
6101 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6102 memset(hdev->irk, 0, sizeof(hdev->irk));
6103 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6104 hci_adv_instances_set_rpa_expired(hdev, false);
6105 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6106 }
6107
6108 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6109 if (err < 0)
6110 goto unlock;
6111
6112 if (changed)
6113 err = new_settings(hdev, sk);
6114
6115 unlock:
6116 hci_dev_unlock(hdev);
6117 return err;
6118 }
6119
irk_is_valid(struct mgmt_irk_info * irk)6120 static bool irk_is_valid(struct mgmt_irk_info *irk)
6121 {
6122 switch (irk->addr.type) {
6123 case BDADDR_LE_PUBLIC:
6124 return true;
6125
6126 case BDADDR_LE_RANDOM:
6127 /* Two most significant bits shall be set */
6128 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6129 return false;
6130 return true;
6131 }
6132
6133 return false;
6134 }
6135
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6136 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6137 u16 len)
6138 {
6139 struct mgmt_cp_load_irks *cp = cp_data;
6140 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6141 sizeof(struct mgmt_irk_info));
6142 u16 irk_count, expected_len;
6143 int i, err;
6144
6145 bt_dev_dbg(hdev, "sock %p", sk);
6146
6147 if (!lmp_le_capable(hdev))
6148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6149 MGMT_STATUS_NOT_SUPPORTED);
6150
6151 irk_count = __le16_to_cpu(cp->irk_count);
6152 if (irk_count > max_irk_count) {
6153 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6154 irk_count);
6155 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6156 MGMT_STATUS_INVALID_PARAMS);
6157 }
6158
6159 expected_len = struct_size(cp, irks, irk_count);
6160 if (expected_len != len) {
6161 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6162 expected_len, len);
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6164 MGMT_STATUS_INVALID_PARAMS);
6165 }
6166
6167 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6168
6169 for (i = 0; i < irk_count; i++) {
6170 struct mgmt_irk_info *key = &cp->irks[i];
6171
6172 if (!irk_is_valid(key))
6173 return mgmt_cmd_status(sk, hdev->id,
6174 MGMT_OP_LOAD_IRKS,
6175 MGMT_STATUS_INVALID_PARAMS);
6176 }
6177
6178 hci_dev_lock(hdev);
6179
6180 hci_smp_irks_clear(hdev);
6181
6182 for (i = 0; i < irk_count; i++) {
6183 struct mgmt_irk_info *irk = &cp->irks[i];
6184 u8 addr_type = le_addr_type(irk->addr.type);
6185
6186 if (hci_is_blocked_key(hdev,
6187 HCI_BLOCKED_KEY_TYPE_IRK,
6188 irk->val)) {
6189 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6190 &irk->addr.bdaddr);
6191 continue;
6192 }
6193
6194 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6195 if (irk->addr.type == BDADDR_BREDR)
6196 addr_type = BDADDR_BREDR;
6197
6198 hci_add_irk(hdev, &irk->addr.bdaddr,
6199 addr_type, irk->val,
6200 BDADDR_ANY);
6201 }
6202
6203 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6204
6205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6206
6207 hci_dev_unlock(hdev);
6208
6209 return err;
6210 }
6211
ltk_is_valid(struct mgmt_ltk_info * key)6212 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6213 {
6214 if (key->initiator != 0x00 && key->initiator != 0x01)
6215 return false;
6216
6217 switch (key->addr.type) {
6218 case BDADDR_LE_PUBLIC:
6219 return true;
6220
6221 case BDADDR_LE_RANDOM:
6222 /* Two most significant bits shall be set */
6223 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6224 return false;
6225 return true;
6226 }
6227
6228 return false;
6229 }
6230
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6231 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6232 void *cp_data, u16 len)
6233 {
6234 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6235 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6236 sizeof(struct mgmt_ltk_info));
6237 u16 key_count, expected_len;
6238 int i, err;
6239
6240 bt_dev_dbg(hdev, "sock %p", sk);
6241
6242 if (!lmp_le_capable(hdev))
6243 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6244 MGMT_STATUS_NOT_SUPPORTED);
6245
6246 key_count = __le16_to_cpu(cp->key_count);
6247 if (key_count > max_key_count) {
6248 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6249 key_count);
6250 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6251 MGMT_STATUS_INVALID_PARAMS);
6252 }
6253
6254 expected_len = struct_size(cp, keys, key_count);
6255 if (expected_len != len) {
6256 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6257 expected_len, len);
6258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6259 MGMT_STATUS_INVALID_PARAMS);
6260 }
6261
6262 bt_dev_dbg(hdev, "key_count %u", key_count);
6263
6264 for (i = 0; i < key_count; i++) {
6265 struct mgmt_ltk_info *key = &cp->keys[i];
6266
6267 if (!ltk_is_valid(key))
6268 return mgmt_cmd_status(sk, hdev->id,
6269 MGMT_OP_LOAD_LONG_TERM_KEYS,
6270 MGMT_STATUS_INVALID_PARAMS);
6271 }
6272
6273 hci_dev_lock(hdev);
6274
6275 hci_smp_ltks_clear(hdev);
6276
6277 for (i = 0; i < key_count; i++) {
6278 struct mgmt_ltk_info *key = &cp->keys[i];
6279 u8 type, authenticated;
6280 u8 addr_type = le_addr_type(key->addr.type);
6281
6282 if (hci_is_blocked_key(hdev,
6283 HCI_BLOCKED_KEY_TYPE_LTK,
6284 key->val)) {
6285 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6286 &key->addr.bdaddr);
6287 continue;
6288 }
6289
6290 switch (key->type) {
6291 case MGMT_LTK_UNAUTHENTICATED:
6292 authenticated = 0x00;
6293 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6294 break;
6295 case MGMT_LTK_AUTHENTICATED:
6296 authenticated = 0x01;
6297 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6298 break;
6299 case MGMT_LTK_P256_UNAUTH:
6300 authenticated = 0x00;
6301 type = SMP_LTK_P256;
6302 break;
6303 case MGMT_LTK_P256_AUTH:
6304 authenticated = 0x01;
6305 type = SMP_LTK_P256;
6306 break;
6307 case MGMT_LTK_P256_DEBUG:
6308 authenticated = 0x00;
6309 type = SMP_LTK_P256_DEBUG;
6310 fallthrough;
6311 default:
6312 continue;
6313 }
6314
6315 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6316 if (key->addr.type == BDADDR_BREDR)
6317 addr_type = BDADDR_BREDR;
6318
6319 hci_add_ltk(hdev, &key->addr.bdaddr,
6320 addr_type, type, authenticated,
6321 key->val, key->enc_size, key->ediv, key->rand);
6322 }
6323
6324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6325 NULL, 0);
6326
6327 hci_dev_unlock(hdev);
6328
6329 return err;
6330 }
6331
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6332 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6333 {
6334 struct hci_conn *conn = cmd->user_data;
6335 struct mgmt_rp_get_conn_info rp;
6336 int err;
6337
6338 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6339
6340 if (status == MGMT_STATUS_SUCCESS) {
6341 rp.rssi = conn->rssi;
6342 rp.tx_power = conn->tx_power;
6343 rp.max_tx_power = conn->max_tx_power;
6344 } else {
6345 rp.rssi = HCI_RSSI_INVALID;
6346 rp.tx_power = HCI_TX_POWER_INVALID;
6347 rp.max_tx_power = HCI_TX_POWER_INVALID;
6348 }
6349
6350 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6351 status, &rp, sizeof(rp));
6352
6353 hci_conn_drop(conn);
6354 hci_conn_put(conn);
6355
6356 return err;
6357 }
6358
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)6359 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6360 u16 opcode)
6361 {
6362 struct hci_cp_read_rssi *cp;
6363 struct mgmt_pending_cmd *cmd;
6364 struct hci_conn *conn;
6365 u16 handle;
6366 u8 status;
6367
6368 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6369
6370 hci_dev_lock(hdev);
6371
6372 /* Commands sent in request are either Read RSSI or Read Transmit Power
6373 * Level so we check which one was last sent to retrieve connection
6374 * handle. Both commands have handle as first parameter so it's safe to
6375 * cast data on the same command struct.
6376 *
6377 * First command sent is always Read RSSI and we fail only if it fails.
6378 * In other case we simply override error to indicate success as we
6379 * already remembered if TX power value is actually valid.
6380 */
6381 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6382 if (!cp) {
6383 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6384 status = MGMT_STATUS_SUCCESS;
6385 } else {
6386 status = mgmt_status(hci_status);
6387 }
6388
6389 if (!cp) {
6390 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6391 goto unlock;
6392 }
6393
6394 handle = __le16_to_cpu(cp->handle);
6395 conn = hci_conn_hash_lookup_handle(hdev, handle);
6396 if (!conn) {
6397 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
6398 handle);
6399 goto unlock;
6400 }
6401
6402 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6403 if (!cmd)
6404 goto unlock;
6405
6406 cmd->cmd_complete(cmd, status);
6407 mgmt_pending_remove(cmd);
6408
6409 unlock:
6410 hci_dev_unlock(hdev);
6411 }
6412
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6413 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6414 u16 len)
6415 {
6416 struct mgmt_cp_get_conn_info *cp = data;
6417 struct mgmt_rp_get_conn_info rp;
6418 struct hci_conn *conn;
6419 unsigned long conn_info_age;
6420 int err = 0;
6421
6422 bt_dev_dbg(hdev, "sock %p", sk);
6423
6424 memset(&rp, 0, sizeof(rp));
6425 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6426 rp.addr.type = cp->addr.type;
6427
6428 if (!bdaddr_type_is_valid(cp->addr.type))
6429 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6430 MGMT_STATUS_INVALID_PARAMS,
6431 &rp, sizeof(rp));
6432
6433 hci_dev_lock(hdev);
6434
6435 if (!hdev_is_powered(hdev)) {
6436 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6437 MGMT_STATUS_NOT_POWERED, &rp,
6438 sizeof(rp));
6439 goto unlock;
6440 }
6441
6442 if (cp->addr.type == BDADDR_BREDR)
6443 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6444 &cp->addr.bdaddr);
6445 else
6446 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6447
6448 if (!conn || conn->state != BT_CONNECTED) {
6449 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6450 MGMT_STATUS_NOT_CONNECTED, &rp,
6451 sizeof(rp));
6452 goto unlock;
6453 }
6454
6455 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6457 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6458 goto unlock;
6459 }
6460
6461 /* To avoid client trying to guess when to poll again for information we
6462 * calculate conn info age as random value between min/max set in hdev.
6463 */
6464 conn_info_age = hdev->conn_info_min_age +
6465 prandom_u32_max(hdev->conn_info_max_age -
6466 hdev->conn_info_min_age);
6467
6468 /* Query controller to refresh cached values if they are too old or were
6469 * never read.
6470 */
6471 if (time_after(jiffies, conn->conn_info_timestamp +
6472 msecs_to_jiffies(conn_info_age)) ||
6473 !conn->conn_info_timestamp) {
6474 struct hci_request req;
6475 struct hci_cp_read_tx_power req_txp_cp;
6476 struct hci_cp_read_rssi req_rssi_cp;
6477 struct mgmt_pending_cmd *cmd;
6478
6479 hci_req_init(&req, hdev);
6480 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6481 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6482 &req_rssi_cp);
6483
6484 /* For LE links TX power does not change thus we don't need to
6485 * query for it once value is known.
6486 */
6487 if (!bdaddr_type_is_le(cp->addr.type) ||
6488 conn->tx_power == HCI_TX_POWER_INVALID) {
6489 req_txp_cp.handle = cpu_to_le16(conn->handle);
6490 req_txp_cp.type = 0x00;
6491 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6492 sizeof(req_txp_cp), &req_txp_cp);
6493 }
6494
6495 /* Max TX power needs to be read only once per connection */
6496 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6497 req_txp_cp.handle = cpu_to_le16(conn->handle);
6498 req_txp_cp.type = 0x01;
6499 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6500 sizeof(req_txp_cp), &req_txp_cp);
6501 }
6502
6503 err = hci_req_run(&req, conn_info_refresh_complete);
6504 if (err < 0)
6505 goto unlock;
6506
6507 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6508 data, len);
6509 if (!cmd) {
6510 err = -ENOMEM;
6511 goto unlock;
6512 }
6513
6514 hci_conn_hold(conn);
6515 cmd->user_data = hci_conn_get(conn);
6516 cmd->cmd_complete = conn_info_cmd_complete;
6517
6518 conn->conn_info_timestamp = jiffies;
6519 } else {
6520 /* Cache is valid, just reply with values cached in hci_conn */
6521 rp.rssi = conn->rssi;
6522 rp.tx_power = conn->tx_power;
6523 rp.max_tx_power = conn->max_tx_power;
6524
6525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6526 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6527 }
6528
6529 unlock:
6530 hci_dev_unlock(hdev);
6531 return err;
6532 }
6533
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6534 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6535 {
6536 struct hci_conn *conn = cmd->user_data;
6537 struct mgmt_rp_get_clock_info rp;
6538 struct hci_dev *hdev;
6539 int err;
6540
6541 memset(&rp, 0, sizeof(rp));
6542 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6543
6544 if (status)
6545 goto complete;
6546
6547 hdev = hci_dev_get(cmd->index);
6548 if (hdev) {
6549 rp.local_clock = cpu_to_le32(hdev->clock);
6550 hci_dev_put(hdev);
6551 }
6552
6553 if (conn) {
6554 rp.piconet_clock = cpu_to_le32(conn->clock);
6555 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6556 }
6557
6558 complete:
6559 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6560 sizeof(rp));
6561
6562 if (conn) {
6563 hci_conn_drop(conn);
6564 hci_conn_put(conn);
6565 }
6566
6567 return err;
6568 }
6569
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)6570 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6571 {
6572 struct hci_cp_read_clock *hci_cp;
6573 struct mgmt_pending_cmd *cmd;
6574 struct hci_conn *conn;
6575
6576 bt_dev_dbg(hdev, "status %u", status);
6577
6578 hci_dev_lock(hdev);
6579
6580 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6581 if (!hci_cp)
6582 goto unlock;
6583
6584 if (hci_cp->which) {
6585 u16 handle = __le16_to_cpu(hci_cp->handle);
6586 conn = hci_conn_hash_lookup_handle(hdev, handle);
6587 } else {
6588 conn = NULL;
6589 }
6590
6591 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6592 if (!cmd)
6593 goto unlock;
6594
6595 cmd->cmd_complete(cmd, mgmt_status(status));
6596 mgmt_pending_remove(cmd);
6597
6598 unlock:
6599 hci_dev_unlock(hdev);
6600 }
6601
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6602 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6603 u16 len)
6604 {
6605 struct mgmt_cp_get_clock_info *cp = data;
6606 struct mgmt_rp_get_clock_info rp;
6607 struct hci_cp_read_clock hci_cp;
6608 struct mgmt_pending_cmd *cmd;
6609 struct hci_request req;
6610 struct hci_conn *conn;
6611 int err;
6612
6613 bt_dev_dbg(hdev, "sock %p", sk);
6614
6615 memset(&rp, 0, sizeof(rp));
6616 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6617 rp.addr.type = cp->addr.type;
6618
6619 if (cp->addr.type != BDADDR_BREDR)
6620 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6621 MGMT_STATUS_INVALID_PARAMS,
6622 &rp, sizeof(rp));
6623
6624 hci_dev_lock(hdev);
6625
6626 if (!hdev_is_powered(hdev)) {
6627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6628 MGMT_STATUS_NOT_POWERED, &rp,
6629 sizeof(rp));
6630 goto unlock;
6631 }
6632
6633 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6635 &cp->addr.bdaddr);
6636 if (!conn || conn->state != BT_CONNECTED) {
6637 err = mgmt_cmd_complete(sk, hdev->id,
6638 MGMT_OP_GET_CLOCK_INFO,
6639 MGMT_STATUS_NOT_CONNECTED,
6640 &rp, sizeof(rp));
6641 goto unlock;
6642 }
6643 } else {
6644 conn = NULL;
6645 }
6646
6647 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6648 if (!cmd) {
6649 err = -ENOMEM;
6650 goto unlock;
6651 }
6652
6653 cmd->cmd_complete = clock_info_cmd_complete;
6654
6655 hci_req_init(&req, hdev);
6656
6657 memset(&hci_cp, 0, sizeof(hci_cp));
6658 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6659
6660 if (conn) {
6661 hci_conn_hold(conn);
6662 cmd->user_data = hci_conn_get(conn);
6663
6664 hci_cp.handle = cpu_to_le16(conn->handle);
6665 hci_cp.which = 0x01; /* Piconet clock */
6666 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6667 }
6668
6669 err = hci_req_run(&req, get_clock_info_complete);
6670 if (err < 0)
6671 mgmt_pending_remove(cmd);
6672
6673 unlock:
6674 hci_dev_unlock(hdev);
6675 return err;
6676 }
6677
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6678 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6679 {
6680 struct hci_conn *conn;
6681
6682 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6683 if (!conn)
6684 return false;
6685
6686 if (conn->dst_type != type)
6687 return false;
6688
6689 if (conn->state != BT_CONNECTED)
6690 return false;
6691
6692 return true;
6693 }
6694
6695 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6696 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6697 u8 addr_type, u8 auto_connect)
6698 {
6699 struct hci_conn_params *params;
6700
6701 params = hci_conn_params_add(hdev, addr, addr_type);
6702 if (!params)
6703 return -EIO;
6704
6705 if (params->auto_connect == auto_connect)
6706 return 0;
6707
6708 list_del_init(¶ms->action);
6709
6710 switch (auto_connect) {
6711 case HCI_AUTO_CONN_DISABLED:
6712 case HCI_AUTO_CONN_LINK_LOSS:
6713 /* If auto connect is being disabled when we're trying to
6714 * connect to device, keep connecting.
6715 */
6716 if (params->explicit_connect)
6717 list_add(¶ms->action, &hdev->pend_le_conns);
6718 break;
6719 case HCI_AUTO_CONN_REPORT:
6720 if (params->explicit_connect)
6721 list_add(¶ms->action, &hdev->pend_le_conns);
6722 else
6723 list_add(¶ms->action, &hdev->pend_le_reports);
6724 break;
6725 case HCI_AUTO_CONN_DIRECT:
6726 case HCI_AUTO_CONN_ALWAYS:
6727 if (!is_connected(hdev, addr, addr_type))
6728 list_add(¶ms->action, &hdev->pend_le_conns);
6729 break;
6730 }
6731
6732 params->auto_connect = auto_connect;
6733
6734 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6735 addr, addr_type, auto_connect);
6736
6737 return 0;
6738 }
6739
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6740 static void device_added(struct sock *sk, struct hci_dev *hdev,
6741 bdaddr_t *bdaddr, u8 type, u8 action)
6742 {
6743 struct mgmt_ev_device_added ev;
6744
6745 bacpy(&ev.addr.bdaddr, bdaddr);
6746 ev.addr.type = type;
6747 ev.action = action;
6748
6749 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6750 }
6751
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6752 static int add_device(struct sock *sk, struct hci_dev *hdev,
6753 void *data, u16 len)
6754 {
6755 struct mgmt_cp_add_device *cp = data;
6756 u8 auto_conn, addr_type;
6757 struct hci_conn_params *params;
6758 int err;
6759 u32 current_flags = 0;
6760
6761 bt_dev_dbg(hdev, "sock %p", sk);
6762
6763 if (!bdaddr_type_is_valid(cp->addr.type) ||
6764 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6765 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6766 MGMT_STATUS_INVALID_PARAMS,
6767 &cp->addr, sizeof(cp->addr));
6768
6769 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6770 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6771 MGMT_STATUS_INVALID_PARAMS,
6772 &cp->addr, sizeof(cp->addr));
6773
6774 hci_dev_lock(hdev);
6775
6776 if (cp->addr.type == BDADDR_BREDR) {
6777 /* Only incoming connections action is supported for now */
6778 if (cp->action != 0x01) {
6779 err = mgmt_cmd_complete(sk, hdev->id,
6780 MGMT_OP_ADD_DEVICE,
6781 MGMT_STATUS_INVALID_PARAMS,
6782 &cp->addr, sizeof(cp->addr));
6783 goto unlock;
6784 }
6785
6786 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6787 &cp->addr.bdaddr,
6788 cp->addr.type, 0);
6789 if (err)
6790 goto unlock;
6791
6792 hci_req_update_scan(hdev);
6793
6794 goto added;
6795 }
6796
6797 addr_type = le_addr_type(cp->addr.type);
6798
6799 if (cp->action == 0x02)
6800 auto_conn = HCI_AUTO_CONN_ALWAYS;
6801 else if (cp->action == 0x01)
6802 auto_conn = HCI_AUTO_CONN_DIRECT;
6803 else
6804 auto_conn = HCI_AUTO_CONN_REPORT;
6805
6806 /* Kernel internally uses conn_params with resolvable private
6807 * address, but Add Device allows only identity addresses.
6808 * Make sure it is enforced before calling
6809 * hci_conn_params_lookup.
6810 */
6811 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6813 MGMT_STATUS_INVALID_PARAMS,
6814 &cp->addr, sizeof(cp->addr));
6815 goto unlock;
6816 }
6817
6818 /* If the connection parameters don't exist for this device,
6819 * they will be created and configured with defaults.
6820 */
6821 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6822 auto_conn) < 0) {
6823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6824 MGMT_STATUS_FAILED, &cp->addr,
6825 sizeof(cp->addr));
6826 goto unlock;
6827 } else {
6828 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6829 addr_type);
6830 if (params)
6831 current_flags = params->current_flags;
6832 }
6833
6834 hci_update_background_scan(hdev);
6835
6836 added:
6837 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6838 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6839 SUPPORTED_DEVICE_FLAGS(), current_flags);
6840
6841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6842 MGMT_STATUS_SUCCESS, &cp->addr,
6843 sizeof(cp->addr));
6844
6845 unlock:
6846 hci_dev_unlock(hdev);
6847 return err;
6848 }
6849
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6850 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6851 bdaddr_t *bdaddr, u8 type)
6852 {
6853 struct mgmt_ev_device_removed ev;
6854
6855 bacpy(&ev.addr.bdaddr, bdaddr);
6856 ev.addr.type = type;
6857
6858 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6859 }
6860
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6861 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6862 void *data, u16 len)
6863 {
6864 struct mgmt_cp_remove_device *cp = data;
6865 int err;
6866
6867 bt_dev_dbg(hdev, "sock %p", sk);
6868
6869 hci_dev_lock(hdev);
6870
6871 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6872 struct hci_conn_params *params;
6873 u8 addr_type;
6874
6875 if (!bdaddr_type_is_valid(cp->addr.type)) {
6876 err = mgmt_cmd_complete(sk, hdev->id,
6877 MGMT_OP_REMOVE_DEVICE,
6878 MGMT_STATUS_INVALID_PARAMS,
6879 &cp->addr, sizeof(cp->addr));
6880 goto unlock;
6881 }
6882
6883 if (cp->addr.type == BDADDR_BREDR) {
6884 err = hci_bdaddr_list_del(&hdev->accept_list,
6885 &cp->addr.bdaddr,
6886 cp->addr.type);
6887 if (err) {
6888 err = mgmt_cmd_complete(sk, hdev->id,
6889 MGMT_OP_REMOVE_DEVICE,
6890 MGMT_STATUS_INVALID_PARAMS,
6891 &cp->addr,
6892 sizeof(cp->addr));
6893 goto unlock;
6894 }
6895
6896 hci_req_update_scan(hdev);
6897
6898 device_removed(sk, hdev, &cp->addr.bdaddr,
6899 cp->addr.type);
6900 goto complete;
6901 }
6902
6903 addr_type = le_addr_type(cp->addr.type);
6904
6905 /* Kernel internally uses conn_params with resolvable private
6906 * address, but Remove Device allows only identity addresses.
6907 * Make sure it is enforced before calling
6908 * hci_conn_params_lookup.
6909 */
6910 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6911 err = mgmt_cmd_complete(sk, hdev->id,
6912 MGMT_OP_REMOVE_DEVICE,
6913 MGMT_STATUS_INVALID_PARAMS,
6914 &cp->addr, sizeof(cp->addr));
6915 goto unlock;
6916 }
6917
6918 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6919 addr_type);
6920 if (!params) {
6921 err = mgmt_cmd_complete(sk, hdev->id,
6922 MGMT_OP_REMOVE_DEVICE,
6923 MGMT_STATUS_INVALID_PARAMS,
6924 &cp->addr, sizeof(cp->addr));
6925 goto unlock;
6926 }
6927
6928 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6929 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6930 err = mgmt_cmd_complete(sk, hdev->id,
6931 MGMT_OP_REMOVE_DEVICE,
6932 MGMT_STATUS_INVALID_PARAMS,
6933 &cp->addr, sizeof(cp->addr));
6934 goto unlock;
6935 }
6936
6937 list_del(¶ms->action);
6938 list_del(¶ms->list);
6939 kfree(params);
6940 hci_update_background_scan(hdev);
6941
6942 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6943 } else {
6944 struct hci_conn_params *p, *tmp;
6945 struct bdaddr_list *b, *btmp;
6946
6947 if (cp->addr.type) {
6948 err = mgmt_cmd_complete(sk, hdev->id,
6949 MGMT_OP_REMOVE_DEVICE,
6950 MGMT_STATUS_INVALID_PARAMS,
6951 &cp->addr, sizeof(cp->addr));
6952 goto unlock;
6953 }
6954
6955 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6956 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6957 list_del(&b->list);
6958 kfree(b);
6959 }
6960
6961 hci_req_update_scan(hdev);
6962
6963 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6964 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6965 continue;
6966 device_removed(sk, hdev, &p->addr, p->addr_type);
6967 if (p->explicit_connect) {
6968 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6969 continue;
6970 }
6971 list_del(&p->action);
6972 list_del(&p->list);
6973 kfree(p);
6974 }
6975
6976 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6977
6978 hci_update_background_scan(hdev);
6979 }
6980
6981 complete:
6982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6983 MGMT_STATUS_SUCCESS, &cp->addr,
6984 sizeof(cp->addr));
6985 unlock:
6986 hci_dev_unlock(hdev);
6987 return err;
6988 }
6989
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6990 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6991 u16 len)
6992 {
6993 struct mgmt_cp_load_conn_param *cp = data;
6994 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6995 sizeof(struct mgmt_conn_param));
6996 u16 param_count, expected_len;
6997 int i;
6998
6999 if (!lmp_le_capable(hdev))
7000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7001 MGMT_STATUS_NOT_SUPPORTED);
7002
7003 param_count = __le16_to_cpu(cp->param_count);
7004 if (param_count > max_param_count) {
7005 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7006 param_count);
7007 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7008 MGMT_STATUS_INVALID_PARAMS);
7009 }
7010
7011 expected_len = struct_size(cp, params, param_count);
7012 if (expected_len != len) {
7013 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7014 expected_len, len);
7015 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7016 MGMT_STATUS_INVALID_PARAMS);
7017 }
7018
7019 bt_dev_dbg(hdev, "param_count %u", param_count);
7020
7021 hci_dev_lock(hdev);
7022
7023 hci_conn_params_clear_disabled(hdev);
7024
7025 for (i = 0; i < param_count; i++) {
7026 struct mgmt_conn_param *param = &cp->params[i];
7027 struct hci_conn_params *hci_param;
7028 u16 min, max, latency, timeout;
7029 u8 addr_type;
7030
7031 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7032 param->addr.type);
7033
7034 if (param->addr.type == BDADDR_LE_PUBLIC) {
7035 addr_type = ADDR_LE_DEV_PUBLIC;
7036 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7037 addr_type = ADDR_LE_DEV_RANDOM;
7038 } else {
7039 bt_dev_err(hdev, "ignoring invalid connection parameters");
7040 continue;
7041 }
7042
7043 min = le16_to_cpu(param->min_interval);
7044 max = le16_to_cpu(param->max_interval);
7045 latency = le16_to_cpu(param->latency);
7046 timeout = le16_to_cpu(param->timeout);
7047
7048 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7049 min, max, latency, timeout);
7050
7051 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7052 bt_dev_err(hdev, "ignoring invalid connection parameters");
7053 continue;
7054 }
7055
7056 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7057 addr_type);
7058 if (!hci_param) {
7059 bt_dev_err(hdev, "failed to add connection parameters");
7060 continue;
7061 }
7062
7063 hci_param->conn_min_interval = min;
7064 hci_param->conn_max_interval = max;
7065 hci_param->conn_latency = latency;
7066 hci_param->supervision_timeout = timeout;
7067 }
7068
7069 hci_dev_unlock(hdev);
7070
7071 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7072 NULL, 0);
7073 }
7074
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7075 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7076 void *data, u16 len)
7077 {
7078 struct mgmt_cp_set_external_config *cp = data;
7079 bool changed;
7080 int err;
7081
7082 bt_dev_dbg(hdev, "sock %p", sk);
7083
7084 if (hdev_is_powered(hdev))
7085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7086 MGMT_STATUS_REJECTED);
7087
7088 if (cp->config != 0x00 && cp->config != 0x01)
7089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7090 MGMT_STATUS_INVALID_PARAMS);
7091
7092 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7094 MGMT_STATUS_NOT_SUPPORTED);
7095
7096 hci_dev_lock(hdev);
7097
7098 if (cp->config)
7099 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7100 else
7101 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7102
7103 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7104 if (err < 0)
7105 goto unlock;
7106
7107 if (!changed)
7108 goto unlock;
7109
7110 err = new_options(hdev, sk);
7111
7112 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7113 mgmt_index_removed(hdev);
7114
7115 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7116 hci_dev_set_flag(hdev, HCI_CONFIG);
7117 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7118
7119 queue_work(hdev->req_workqueue, &hdev->power_on);
7120 } else {
7121 set_bit(HCI_RAW, &hdev->flags);
7122 mgmt_index_added(hdev);
7123 }
7124 }
7125
7126 unlock:
7127 hci_dev_unlock(hdev);
7128 return err;
7129 }
7130
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7131 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7132 void *data, u16 len)
7133 {
7134 struct mgmt_cp_set_public_address *cp = data;
7135 bool changed;
7136 int err;
7137
7138 bt_dev_dbg(hdev, "sock %p", sk);
7139
7140 if (hdev_is_powered(hdev))
7141 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7142 MGMT_STATUS_REJECTED);
7143
7144 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7146 MGMT_STATUS_INVALID_PARAMS);
7147
7148 if (!hdev->set_bdaddr)
7149 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7150 MGMT_STATUS_NOT_SUPPORTED);
7151
7152 hci_dev_lock(hdev);
7153
7154 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7155 bacpy(&hdev->public_addr, &cp->bdaddr);
7156
7157 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7158 if (err < 0)
7159 goto unlock;
7160
7161 if (!changed)
7162 goto unlock;
7163
7164 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7165 err = new_options(hdev, sk);
7166
7167 if (is_configured(hdev)) {
7168 mgmt_index_removed(hdev);
7169
7170 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7171
7172 hci_dev_set_flag(hdev, HCI_CONFIG);
7173 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7174
7175 queue_work(hdev->req_workqueue, &hdev->power_on);
7176 }
7177
7178 unlock:
7179 hci_dev_unlock(hdev);
7180 return err;
7181 }
7182
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)7183 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7184 u16 opcode, struct sk_buff *skb)
7185 {
7186 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7187 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7188 u8 *h192, *r192, *h256, *r256;
7189 struct mgmt_pending_cmd *cmd;
7190 u16 eir_len;
7191 int err;
7192
7193 bt_dev_dbg(hdev, "status %u", status);
7194
7195 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7196 if (!cmd)
7197 return;
7198
7199 mgmt_cp = cmd->param;
7200
7201 if (status) {
7202 status = mgmt_status(status);
7203 eir_len = 0;
7204
7205 h192 = NULL;
7206 r192 = NULL;
7207 h256 = NULL;
7208 r256 = NULL;
7209 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7210 struct hci_rp_read_local_oob_data *rp;
7211
7212 if (skb->len != sizeof(*rp)) {
7213 status = MGMT_STATUS_FAILED;
7214 eir_len = 0;
7215 } else {
7216 status = MGMT_STATUS_SUCCESS;
7217 rp = (void *)skb->data;
7218
7219 eir_len = 5 + 18 + 18;
7220 h192 = rp->hash;
7221 r192 = rp->rand;
7222 h256 = NULL;
7223 r256 = NULL;
7224 }
7225 } else {
7226 struct hci_rp_read_local_oob_ext_data *rp;
7227
7228 if (skb->len != sizeof(*rp)) {
7229 status = MGMT_STATUS_FAILED;
7230 eir_len = 0;
7231 } else {
7232 status = MGMT_STATUS_SUCCESS;
7233 rp = (void *)skb->data;
7234
7235 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7236 eir_len = 5 + 18 + 18;
7237 h192 = NULL;
7238 r192 = NULL;
7239 } else {
7240 eir_len = 5 + 18 + 18 + 18 + 18;
7241 h192 = rp->hash192;
7242 r192 = rp->rand192;
7243 }
7244
7245 h256 = rp->hash256;
7246 r256 = rp->rand256;
7247 }
7248 }
7249
7250 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7251 if (!mgmt_rp)
7252 goto done;
7253
7254 if (eir_len == 0)
7255 goto send_rsp;
7256
7257 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7258 hdev->dev_class, 3);
7259
7260 if (h192 && r192) {
7261 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7262 EIR_SSP_HASH_C192, h192, 16);
7263 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7264 EIR_SSP_RAND_R192, r192, 16);
7265 }
7266
7267 if (h256 && r256) {
7268 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7269 EIR_SSP_HASH_C256, h256, 16);
7270 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7271 EIR_SSP_RAND_R256, r256, 16);
7272 }
7273
7274 send_rsp:
7275 mgmt_rp->type = mgmt_cp->type;
7276 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7277
7278 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7279 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7280 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7281 if (err < 0 || status)
7282 goto done;
7283
7284 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7285
7286 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7287 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7288 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7289 done:
7290 kfree(mgmt_rp);
7291 mgmt_pending_remove(cmd);
7292 }
7293
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7294 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7295 struct mgmt_cp_read_local_oob_ext_data *cp)
7296 {
7297 struct mgmt_pending_cmd *cmd;
7298 struct hci_request req;
7299 int err;
7300
7301 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7302 cp, sizeof(*cp));
7303 if (!cmd)
7304 return -ENOMEM;
7305
7306 hci_req_init(&req, hdev);
7307
7308 if (bredr_sc_enabled(hdev))
7309 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7310 else
7311 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7312
7313 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7314 if (err < 0) {
7315 mgmt_pending_remove(cmd);
7316 return err;
7317 }
7318
7319 return 0;
7320 }
7321
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7322 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7323 void *data, u16 data_len)
7324 {
7325 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7326 struct mgmt_rp_read_local_oob_ext_data *rp;
7327 size_t rp_len;
7328 u16 eir_len;
7329 u8 status, flags, role, addr[7], hash[16], rand[16];
7330 int err;
7331
7332 bt_dev_dbg(hdev, "sock %p", sk);
7333
7334 if (hdev_is_powered(hdev)) {
7335 switch (cp->type) {
7336 case BIT(BDADDR_BREDR):
7337 status = mgmt_bredr_support(hdev);
7338 if (status)
7339 eir_len = 0;
7340 else
7341 eir_len = 5;
7342 break;
7343 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7344 status = mgmt_le_support(hdev);
7345 if (status)
7346 eir_len = 0;
7347 else
7348 eir_len = 9 + 3 + 18 + 18 + 3;
7349 break;
7350 default:
7351 status = MGMT_STATUS_INVALID_PARAMS;
7352 eir_len = 0;
7353 break;
7354 }
7355 } else {
7356 status = MGMT_STATUS_NOT_POWERED;
7357 eir_len = 0;
7358 }
7359
7360 rp_len = sizeof(*rp) + eir_len;
7361 rp = kmalloc(rp_len, GFP_ATOMIC);
7362 if (!rp)
7363 return -ENOMEM;
7364
7365 if (status)
7366 goto complete;
7367
7368 hci_dev_lock(hdev);
7369
7370 eir_len = 0;
7371 switch (cp->type) {
7372 case BIT(BDADDR_BREDR):
7373 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7374 err = read_local_ssp_oob_req(hdev, sk, cp);
7375 hci_dev_unlock(hdev);
7376 if (!err)
7377 goto done;
7378
7379 status = MGMT_STATUS_FAILED;
7380 goto complete;
7381 } else {
7382 eir_len = eir_append_data(rp->eir, eir_len,
7383 EIR_CLASS_OF_DEV,
7384 hdev->dev_class, 3);
7385 }
7386 break;
7387 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7388 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7389 smp_generate_oob(hdev, hash, rand) < 0) {
7390 hci_dev_unlock(hdev);
7391 status = MGMT_STATUS_FAILED;
7392 goto complete;
7393 }
7394
7395 /* This should return the active RPA, but since the RPA
7396 * is only programmed on demand, it is really hard to fill
7397 * this in at the moment. For now disallow retrieving
7398 * local out-of-band data when privacy is in use.
7399 *
7400 * Returning the identity address will not help here since
7401 * pairing happens before the identity resolving key is
7402 * known and thus the connection establishment happens
7403 * based on the RPA and not the identity address.
7404 */
7405 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7406 hci_dev_unlock(hdev);
7407 status = MGMT_STATUS_REJECTED;
7408 goto complete;
7409 }
7410
7411 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7412 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7413 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7414 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7415 memcpy(addr, &hdev->static_addr, 6);
7416 addr[6] = 0x01;
7417 } else {
7418 memcpy(addr, &hdev->bdaddr, 6);
7419 addr[6] = 0x00;
7420 }
7421
7422 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7423 addr, sizeof(addr));
7424
7425 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7426 role = 0x02;
7427 else
7428 role = 0x01;
7429
7430 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7431 &role, sizeof(role));
7432
7433 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7434 eir_len = eir_append_data(rp->eir, eir_len,
7435 EIR_LE_SC_CONFIRM,
7436 hash, sizeof(hash));
7437
7438 eir_len = eir_append_data(rp->eir, eir_len,
7439 EIR_LE_SC_RANDOM,
7440 rand, sizeof(rand));
7441 }
7442
7443 flags = mgmt_get_adv_discov_flags(hdev);
7444
7445 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7446 flags |= LE_AD_NO_BREDR;
7447
7448 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7449 &flags, sizeof(flags));
7450 break;
7451 }
7452
7453 hci_dev_unlock(hdev);
7454
7455 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7456
7457 status = MGMT_STATUS_SUCCESS;
7458
7459 complete:
7460 rp->type = cp->type;
7461 rp->eir_len = cpu_to_le16(eir_len);
7462
7463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7464 status, rp, sizeof(*rp) + eir_len);
7465 if (err < 0 || status)
7466 goto done;
7467
7468 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7469 rp, sizeof(*rp) + eir_len,
7470 HCI_MGMT_OOB_DATA_EVENTS, sk);
7471
7472 done:
7473 kfree(rp);
7474
7475 return err;
7476 }
7477
get_supported_adv_flags(struct hci_dev * hdev)7478 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7479 {
7480 u32 flags = 0;
7481
7482 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7483 flags |= MGMT_ADV_FLAG_DISCOV;
7484 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7485 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7486 flags |= MGMT_ADV_FLAG_APPEARANCE;
7487 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7488 flags |= MGMT_ADV_PARAM_DURATION;
7489 flags |= MGMT_ADV_PARAM_TIMEOUT;
7490 flags |= MGMT_ADV_PARAM_INTERVALS;
7491 flags |= MGMT_ADV_PARAM_TX_POWER;
7492 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7493
7494 /* In extended adv TX_POWER returned from Set Adv Param
7495 * will be always valid.
7496 */
7497 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7498 ext_adv_capable(hdev))
7499 flags |= MGMT_ADV_FLAG_TX_POWER;
7500
7501 if (ext_adv_capable(hdev)) {
7502 flags |= MGMT_ADV_FLAG_SEC_1M;
7503 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7504 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7505
7506 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7507 flags |= MGMT_ADV_FLAG_SEC_2M;
7508
7509 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7510 flags |= MGMT_ADV_FLAG_SEC_CODED;
7511 }
7512
7513 return flags;
7514 }
7515
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7516 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7517 void *data, u16 data_len)
7518 {
7519 struct mgmt_rp_read_adv_features *rp;
7520 size_t rp_len;
7521 int err;
7522 struct adv_info *adv_instance;
7523 u32 supported_flags;
7524 u8 *instance;
7525
7526 bt_dev_dbg(hdev, "sock %p", sk);
7527
7528 if (!lmp_le_capable(hdev))
7529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7530 MGMT_STATUS_REJECTED);
7531
7532 /* Enabling the experimental LL Privay support disables support for
7533 * advertising.
7534 */
7535 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7537 MGMT_STATUS_NOT_SUPPORTED);
7538
7539 hci_dev_lock(hdev);
7540
7541 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7542 rp = kmalloc(rp_len, GFP_ATOMIC);
7543 if (!rp) {
7544 hci_dev_unlock(hdev);
7545 return -ENOMEM;
7546 }
7547
7548 supported_flags = get_supported_adv_flags(hdev);
7549
7550 rp->supported_flags = cpu_to_le32(supported_flags);
7551 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7552 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7553 rp->max_instances = hdev->le_num_of_adv_sets;
7554 rp->num_instances = hdev->adv_instance_cnt;
7555
7556 instance = rp->instance;
7557 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7558 *instance = adv_instance->instance;
7559 instance++;
7560 }
7561
7562 hci_dev_unlock(hdev);
7563
7564 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7565 MGMT_STATUS_SUCCESS, rp, rp_len);
7566
7567 kfree(rp);
7568
7569 return err;
7570 }
7571
calculate_name_len(struct hci_dev * hdev)7572 static u8 calculate_name_len(struct hci_dev *hdev)
7573 {
7574 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7575
7576 return append_local_name(hdev, buf, 0);
7577 }
7578
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7579 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7580 bool is_adv_data)
7581 {
7582 u8 max_len = HCI_MAX_AD_LENGTH;
7583
7584 if (is_adv_data) {
7585 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7586 MGMT_ADV_FLAG_LIMITED_DISCOV |
7587 MGMT_ADV_FLAG_MANAGED_FLAGS))
7588 max_len -= 3;
7589
7590 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7591 max_len -= 3;
7592 } else {
7593 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7594 max_len -= calculate_name_len(hdev);
7595
7596 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7597 max_len -= 4;
7598 }
7599
7600 return max_len;
7601 }
7602
flags_managed(u32 adv_flags)7603 static bool flags_managed(u32 adv_flags)
7604 {
7605 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7606 MGMT_ADV_FLAG_LIMITED_DISCOV |
7607 MGMT_ADV_FLAG_MANAGED_FLAGS);
7608 }
7609
tx_power_managed(u32 adv_flags)7610 static bool tx_power_managed(u32 adv_flags)
7611 {
7612 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7613 }
7614
name_managed(u32 adv_flags)7615 static bool name_managed(u32 adv_flags)
7616 {
7617 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7618 }
7619
appearance_managed(u32 adv_flags)7620 static bool appearance_managed(u32 adv_flags)
7621 {
7622 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7623 }
7624
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7625 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7626 u8 len, bool is_adv_data)
7627 {
7628 int i, cur_len;
7629 u8 max_len;
7630
7631 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7632
7633 if (len > max_len)
7634 return false;
7635
7636 /* Make sure that the data is correctly formatted. */
7637 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7638 cur_len = data[i];
7639
7640 if (!cur_len)
7641 continue;
7642
7643 if (data[i + 1] == EIR_FLAGS &&
7644 (!is_adv_data || flags_managed(adv_flags)))
7645 return false;
7646
7647 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7648 return false;
7649
7650 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7651 return false;
7652
7653 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7654 return false;
7655
7656 if (data[i + 1] == EIR_APPEARANCE &&
7657 appearance_managed(adv_flags))
7658 return false;
7659
7660 /* If the current field length would exceed the total data
7661 * length, then it's invalid.
7662 */
7663 if (i + cur_len >= len)
7664 return false;
7665 }
7666
7667 return true;
7668 }
7669
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)7670 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7671 {
7672 u32 supported_flags, phy_flags;
7673
7674 /* The current implementation only supports a subset of the specified
7675 * flags. Also need to check mutual exclusiveness of sec flags.
7676 */
7677 supported_flags = get_supported_adv_flags(hdev);
7678 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7679 if (adv_flags & ~supported_flags ||
7680 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7681 return false;
7682
7683 return true;
7684 }
7685
adv_busy(struct hci_dev * hdev)7686 static bool adv_busy(struct hci_dev *hdev)
7687 {
7688 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7689 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7690 pending_find(MGMT_OP_SET_LE, hdev) ||
7691 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7692 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7693 }
7694
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7695 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7696 u16 opcode)
7697 {
7698 struct mgmt_pending_cmd *cmd;
7699 struct mgmt_cp_add_advertising *cp;
7700 struct mgmt_rp_add_advertising rp;
7701 struct adv_info *adv_instance, *n;
7702 u8 instance;
7703
7704 bt_dev_dbg(hdev, "status %u", status);
7705
7706 hci_dev_lock(hdev);
7707
7708 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7709 if (!cmd)
7710 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7711
7712 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7713 if (!adv_instance->pending)
7714 continue;
7715
7716 if (!status) {
7717 adv_instance->pending = false;
7718 continue;
7719 }
7720
7721 instance = adv_instance->instance;
7722
7723 if (hdev->cur_adv_instance == instance)
7724 cancel_adv_timeout(hdev);
7725
7726 hci_remove_adv_instance(hdev, instance);
7727 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7728 }
7729
7730 if (!cmd)
7731 goto unlock;
7732
7733 cp = cmd->param;
7734 rp.instance = cp->instance;
7735
7736 if (status)
7737 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7738 mgmt_status(status));
7739 else
7740 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7741 mgmt_status(status), &rp, sizeof(rp));
7742
7743 mgmt_pending_remove(cmd);
7744
7745 unlock:
7746 hci_dev_unlock(hdev);
7747 }
7748
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7749 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7750 void *data, u16 data_len)
7751 {
7752 struct mgmt_cp_add_advertising *cp = data;
7753 struct mgmt_rp_add_advertising rp;
7754 u32 flags;
7755 u8 status;
7756 u16 timeout, duration;
7757 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7758 u8 schedule_instance = 0;
7759 struct adv_info *next_instance;
7760 int err;
7761 struct mgmt_pending_cmd *cmd;
7762 struct hci_request req;
7763
7764 bt_dev_dbg(hdev, "sock %p", sk);
7765
7766 status = mgmt_le_support(hdev);
7767 if (status)
7768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7769 status);
7770
7771 /* Enabling the experimental LL Privay support disables support for
7772 * advertising.
7773 */
7774 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7776 MGMT_STATUS_NOT_SUPPORTED);
7777
7778 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7780 MGMT_STATUS_INVALID_PARAMS);
7781
7782 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7784 MGMT_STATUS_INVALID_PARAMS);
7785
7786 flags = __le32_to_cpu(cp->flags);
7787 timeout = __le16_to_cpu(cp->timeout);
7788 duration = __le16_to_cpu(cp->duration);
7789
7790 if (!requested_adv_flags_are_valid(hdev, flags))
7791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7792 MGMT_STATUS_INVALID_PARAMS);
7793
7794 hci_dev_lock(hdev);
7795
7796 if (timeout && !hdev_is_powered(hdev)) {
7797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7798 MGMT_STATUS_REJECTED);
7799 goto unlock;
7800 }
7801
7802 if (adv_busy(hdev)) {
7803 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7804 MGMT_STATUS_BUSY);
7805 goto unlock;
7806 }
7807
7808 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7809 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7810 cp->scan_rsp_len, false)) {
7811 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7812 MGMT_STATUS_INVALID_PARAMS);
7813 goto unlock;
7814 }
7815
7816 err = hci_add_adv_instance(hdev, cp->instance, flags,
7817 cp->adv_data_len, cp->data,
7818 cp->scan_rsp_len,
7819 cp->data + cp->adv_data_len,
7820 timeout, duration,
7821 HCI_ADV_TX_POWER_NO_PREFERENCE,
7822 hdev->le_adv_min_interval,
7823 hdev->le_adv_max_interval);
7824 if (err < 0) {
7825 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7826 MGMT_STATUS_FAILED);
7827 goto unlock;
7828 }
7829
7830 /* Only trigger an advertising added event if a new instance was
7831 * actually added.
7832 */
7833 if (hdev->adv_instance_cnt > prev_instance_cnt)
7834 mgmt_advertising_added(sk, hdev, cp->instance);
7835
7836 if (hdev->cur_adv_instance == cp->instance) {
7837 /* If the currently advertised instance is being changed then
7838 * cancel the current advertising and schedule the next
7839 * instance. If there is only one instance then the overridden
7840 * advertising data will be visible right away.
7841 */
7842 cancel_adv_timeout(hdev);
7843
7844 next_instance = hci_get_next_instance(hdev, cp->instance);
7845 if (next_instance)
7846 schedule_instance = next_instance->instance;
7847 } else if (!hdev->adv_instance_timeout) {
7848 /* Immediately advertise the new instance if no other
7849 * instance is currently being advertised.
7850 */
7851 schedule_instance = cp->instance;
7852 }
7853
7854 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7855 * there is no instance to be advertised then we have no HCI
7856 * communication to make. Simply return.
7857 */
7858 if (!hdev_is_powered(hdev) ||
7859 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7860 !schedule_instance) {
7861 rp.instance = cp->instance;
7862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7863 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7864 goto unlock;
7865 }
7866
7867 /* We're good to go, update advertising data, parameters, and start
7868 * advertising.
7869 */
7870 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7871 data_len);
7872 if (!cmd) {
7873 err = -ENOMEM;
7874 goto unlock;
7875 }
7876
7877 hci_req_init(&req, hdev);
7878
7879 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7880
7881 if (!err)
7882 err = hci_req_run(&req, add_advertising_complete);
7883
7884 if (err < 0) {
7885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7886 MGMT_STATUS_FAILED);
7887 mgmt_pending_remove(cmd);
7888 }
7889
7890 unlock:
7891 hci_dev_unlock(hdev);
7892
7893 return err;
7894 }
7895
add_ext_adv_params_complete(struct hci_dev * hdev,u8 status,u16 opcode)7896 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7897 u16 opcode)
7898 {
7899 struct mgmt_pending_cmd *cmd;
7900 struct mgmt_cp_add_ext_adv_params *cp;
7901 struct mgmt_rp_add_ext_adv_params rp;
7902 struct adv_info *adv_instance;
7903 u32 flags;
7904
7905 BT_DBG("%s", hdev->name);
7906
7907 hci_dev_lock(hdev);
7908
7909 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7910 if (!cmd)
7911 goto unlock;
7912
7913 cp = cmd->param;
7914 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7915 if (!adv_instance)
7916 goto unlock;
7917
7918 rp.instance = cp->instance;
7919 rp.tx_power = adv_instance->tx_power;
7920
7921 /* While we're at it, inform userspace of the available space for this
7922 * advertisement, given the flags that will be used.
7923 */
7924 flags = __le32_to_cpu(cp->flags);
7925 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7926 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7927
7928 if (status) {
7929 /* If this advertisement was previously advertising and we
7930 * failed to update it, we signal that it has been removed and
7931 * delete its structure
7932 */
7933 if (!adv_instance->pending)
7934 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7935
7936 hci_remove_adv_instance(hdev, cp->instance);
7937
7938 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7939 mgmt_status(status));
7940
7941 } else {
7942 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7943 mgmt_status(status), &rp, sizeof(rp));
7944 }
7945
7946 unlock:
7947 if (cmd)
7948 mgmt_pending_remove(cmd);
7949
7950 hci_dev_unlock(hdev);
7951 }
7952
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7953 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7954 void *data, u16 data_len)
7955 {
7956 struct mgmt_cp_add_ext_adv_params *cp = data;
7957 struct mgmt_rp_add_ext_adv_params rp;
7958 struct mgmt_pending_cmd *cmd = NULL;
7959 struct adv_info *adv_instance;
7960 struct hci_request req;
7961 u32 flags, min_interval, max_interval;
7962 u16 timeout, duration;
7963 u8 status;
7964 s8 tx_power;
7965 int err;
7966
7967 BT_DBG("%s", hdev->name);
7968
7969 status = mgmt_le_support(hdev);
7970 if (status)
7971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7972 status);
7973
7974 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7976 MGMT_STATUS_INVALID_PARAMS);
7977
7978 /* The purpose of breaking add_advertising into two separate MGMT calls
7979 * for params and data is to allow more parameters to be added to this
7980 * structure in the future. For this reason, we verify that we have the
7981 * bare minimum structure we know of when the interface was defined. Any
7982 * extra parameters we don't know about will be ignored in this request.
7983 */
7984 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7986 MGMT_STATUS_INVALID_PARAMS);
7987
7988 flags = __le32_to_cpu(cp->flags);
7989
7990 if (!requested_adv_flags_are_valid(hdev, flags))
7991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7992 MGMT_STATUS_INVALID_PARAMS);
7993
7994 hci_dev_lock(hdev);
7995
7996 /* In new interface, we require that we are powered to register */
7997 if (!hdev_is_powered(hdev)) {
7998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7999 MGMT_STATUS_REJECTED);
8000 goto unlock;
8001 }
8002
8003 if (adv_busy(hdev)) {
8004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8005 MGMT_STATUS_BUSY);
8006 goto unlock;
8007 }
8008
8009 /* Parse defined parameters from request, use defaults otherwise */
8010 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8011 __le16_to_cpu(cp->timeout) : 0;
8012
8013 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8014 __le16_to_cpu(cp->duration) :
8015 hdev->def_multi_adv_rotation_duration;
8016
8017 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8018 __le32_to_cpu(cp->min_interval) :
8019 hdev->le_adv_min_interval;
8020
8021 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8022 __le32_to_cpu(cp->max_interval) :
8023 hdev->le_adv_max_interval;
8024
8025 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8026 cp->tx_power :
8027 HCI_ADV_TX_POWER_NO_PREFERENCE;
8028
8029 /* Create advertising instance with no advertising or response data */
8030 err = hci_add_adv_instance(hdev, cp->instance, flags,
8031 0, NULL, 0, NULL, timeout, duration,
8032 tx_power, min_interval, max_interval);
8033
8034 if (err < 0) {
8035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8036 MGMT_STATUS_FAILED);
8037 goto unlock;
8038 }
8039
8040 /* Submit request for advertising params if ext adv available */
8041 if (ext_adv_capable(hdev)) {
8042 hci_req_init(&req, hdev);
8043 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8044
8045 /* Updating parameters of an active instance will return a
8046 * Command Disallowed error, so we must first disable the
8047 * instance if it is active.
8048 */
8049 if (!adv_instance->pending)
8050 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8051
8052 __hci_req_setup_ext_adv_instance(&req, cp->instance);
8053
8054 err = hci_req_run(&req, add_ext_adv_params_complete);
8055
8056 if (!err)
8057 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8058 hdev, data, data_len);
8059 if (!cmd) {
8060 err = -ENOMEM;
8061 hci_remove_adv_instance(hdev, cp->instance);
8062 goto unlock;
8063 }
8064
8065 } else {
8066 rp.instance = cp->instance;
8067 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8068 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8069 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8070 err = mgmt_cmd_complete(sk, hdev->id,
8071 MGMT_OP_ADD_EXT_ADV_PARAMS,
8072 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8073 }
8074
8075 unlock:
8076 hci_dev_unlock(hdev);
8077
8078 return err;
8079 }
8080
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8081 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8082 u16 data_len)
8083 {
8084 struct mgmt_cp_add_ext_adv_data *cp = data;
8085 struct mgmt_rp_add_ext_adv_data rp;
8086 u8 schedule_instance = 0;
8087 struct adv_info *next_instance;
8088 struct adv_info *adv_instance;
8089 int err = 0;
8090 struct mgmt_pending_cmd *cmd;
8091 struct hci_request req;
8092
8093 BT_DBG("%s", hdev->name);
8094
8095 hci_dev_lock(hdev);
8096
8097 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8098
8099 if (!adv_instance) {
8100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8101 MGMT_STATUS_INVALID_PARAMS);
8102 goto unlock;
8103 }
8104
8105 /* In new interface, we require that we are powered to register */
8106 if (!hdev_is_powered(hdev)) {
8107 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8108 MGMT_STATUS_REJECTED);
8109 goto clear_new_instance;
8110 }
8111
8112 if (adv_busy(hdev)) {
8113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8114 MGMT_STATUS_BUSY);
8115 goto clear_new_instance;
8116 }
8117
8118 /* Validate new data */
8119 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8120 cp->adv_data_len, true) ||
8121 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8122 cp->adv_data_len, cp->scan_rsp_len, false)) {
8123 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8124 MGMT_STATUS_INVALID_PARAMS);
8125 goto clear_new_instance;
8126 }
8127
8128 /* Set the data in the advertising instance */
8129 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8130 cp->data, cp->scan_rsp_len,
8131 cp->data + cp->adv_data_len);
8132
8133 /* We're good to go, update advertising data, parameters, and start
8134 * advertising.
8135 */
8136
8137 hci_req_init(&req, hdev);
8138
8139 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8140
8141 if (ext_adv_capable(hdev)) {
8142 __hci_req_update_adv_data(&req, cp->instance);
8143 __hci_req_update_scan_rsp_data(&req, cp->instance);
8144 __hci_req_enable_ext_advertising(&req, cp->instance);
8145
8146 } else {
8147 /* If using software rotation, determine next instance to use */
8148
8149 if (hdev->cur_adv_instance == cp->instance) {
8150 /* If the currently advertised instance is being changed
8151 * then cancel the current advertising and schedule the
8152 * next instance. If there is only one instance then the
8153 * overridden advertising data will be visible right
8154 * away
8155 */
8156 cancel_adv_timeout(hdev);
8157
8158 next_instance = hci_get_next_instance(hdev,
8159 cp->instance);
8160 if (next_instance)
8161 schedule_instance = next_instance->instance;
8162 } else if (!hdev->adv_instance_timeout) {
8163 /* Immediately advertise the new instance if no other
8164 * instance is currently being advertised.
8165 */
8166 schedule_instance = cp->instance;
8167 }
8168
8169 /* If the HCI_ADVERTISING flag is set or there is no instance to
8170 * be advertised then we have no HCI communication to make.
8171 * Simply return.
8172 */
8173 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8174 !schedule_instance) {
8175 if (adv_instance->pending) {
8176 mgmt_advertising_added(sk, hdev, cp->instance);
8177 adv_instance->pending = false;
8178 }
8179 rp.instance = cp->instance;
8180 err = mgmt_cmd_complete(sk, hdev->id,
8181 MGMT_OP_ADD_EXT_ADV_DATA,
8182 MGMT_STATUS_SUCCESS, &rp,
8183 sizeof(rp));
8184 goto unlock;
8185 }
8186
8187 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8188 true);
8189 }
8190
8191 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8192 data_len);
8193 if (!cmd) {
8194 err = -ENOMEM;
8195 goto clear_new_instance;
8196 }
8197
8198 if (!err)
8199 err = hci_req_run(&req, add_advertising_complete);
8200
8201 if (err < 0) {
8202 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8203 MGMT_STATUS_FAILED);
8204 mgmt_pending_remove(cmd);
8205 goto clear_new_instance;
8206 }
8207
8208 /* We were successful in updating data, so trigger advertising_added
8209 * event if this is an instance that wasn't previously advertising. If
8210 * a failure occurs in the requests we initiated, we will remove the
8211 * instance again in add_advertising_complete
8212 */
8213 if (adv_instance->pending)
8214 mgmt_advertising_added(sk, hdev, cp->instance);
8215
8216 goto unlock;
8217
8218 clear_new_instance:
8219 hci_remove_adv_instance(hdev, cp->instance);
8220
8221 unlock:
8222 hci_dev_unlock(hdev);
8223
8224 return err;
8225 }
8226
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)8227 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8228 u16 opcode)
8229 {
8230 struct mgmt_pending_cmd *cmd;
8231 struct mgmt_cp_remove_advertising *cp;
8232 struct mgmt_rp_remove_advertising rp;
8233
8234 bt_dev_dbg(hdev, "status %u", status);
8235
8236 hci_dev_lock(hdev);
8237
8238 /* A failure status here only means that we failed to disable
8239 * advertising. Otherwise, the advertising instance has been removed,
8240 * so report success.
8241 */
8242 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8243 if (!cmd)
8244 goto unlock;
8245
8246 cp = cmd->param;
8247 rp.instance = cp->instance;
8248
8249 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8250 &rp, sizeof(rp));
8251 mgmt_pending_remove(cmd);
8252
8253 unlock:
8254 hci_dev_unlock(hdev);
8255 }
8256
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8257 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8258 void *data, u16 data_len)
8259 {
8260 struct mgmt_cp_remove_advertising *cp = data;
8261 struct mgmt_rp_remove_advertising rp;
8262 struct mgmt_pending_cmd *cmd;
8263 struct hci_request req;
8264 int err;
8265
8266 bt_dev_dbg(hdev, "sock %p", sk);
8267
8268 /* Enabling the experimental LL Privay support disables support for
8269 * advertising.
8270 */
8271 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8273 MGMT_STATUS_NOT_SUPPORTED);
8274
8275 hci_dev_lock(hdev);
8276
8277 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8278 err = mgmt_cmd_status(sk, hdev->id,
8279 MGMT_OP_REMOVE_ADVERTISING,
8280 MGMT_STATUS_INVALID_PARAMS);
8281 goto unlock;
8282 }
8283
8284 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8285 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8286 pending_find(MGMT_OP_SET_LE, hdev)) {
8287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8288 MGMT_STATUS_BUSY);
8289 goto unlock;
8290 }
8291
8292 if (list_empty(&hdev->adv_instances)) {
8293 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8294 MGMT_STATUS_INVALID_PARAMS);
8295 goto unlock;
8296 }
8297
8298 hci_req_init(&req, hdev);
8299
8300 /* If we use extended advertising, instance is disabled and removed */
8301 if (ext_adv_capable(hdev)) {
8302 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8303 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8304 }
8305
8306 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8307
8308 if (list_empty(&hdev->adv_instances))
8309 __hci_req_disable_advertising(&req);
8310
8311 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8312 * flag is set or the device isn't powered then we have no HCI
8313 * communication to make. Simply return.
8314 */
8315 if (skb_queue_empty(&req.cmd_q) ||
8316 !hdev_is_powered(hdev) ||
8317 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8318 hci_req_purge(&req);
8319 rp.instance = cp->instance;
8320 err = mgmt_cmd_complete(sk, hdev->id,
8321 MGMT_OP_REMOVE_ADVERTISING,
8322 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8323 goto unlock;
8324 }
8325
8326 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8327 data_len);
8328 if (!cmd) {
8329 err = -ENOMEM;
8330 goto unlock;
8331 }
8332
8333 err = hci_req_run(&req, remove_advertising_complete);
8334 if (err < 0)
8335 mgmt_pending_remove(cmd);
8336
8337 unlock:
8338 hci_dev_unlock(hdev);
8339
8340 return err;
8341 }
8342
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8343 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8344 void *data, u16 data_len)
8345 {
8346 struct mgmt_cp_get_adv_size_info *cp = data;
8347 struct mgmt_rp_get_adv_size_info rp;
8348 u32 flags, supported_flags;
8349 int err;
8350
8351 bt_dev_dbg(hdev, "sock %p", sk);
8352
8353 if (!lmp_le_capable(hdev))
8354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8355 MGMT_STATUS_REJECTED);
8356
8357 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8359 MGMT_STATUS_INVALID_PARAMS);
8360
8361 flags = __le32_to_cpu(cp->flags);
8362
8363 /* The current implementation only supports a subset of the specified
8364 * flags.
8365 */
8366 supported_flags = get_supported_adv_flags(hdev);
8367 if (flags & ~supported_flags)
8368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8369 MGMT_STATUS_INVALID_PARAMS);
8370
8371 rp.instance = cp->instance;
8372 rp.flags = cp->flags;
8373 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8374 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8375
8376 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8377 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8378
8379 return err;
8380 }
8381
8382 static const struct hci_mgmt_handler mgmt_handlers[] = {
8383 { NULL }, /* 0x0000 (no command) */
8384 { read_version, MGMT_READ_VERSION_SIZE,
8385 HCI_MGMT_NO_HDEV |
8386 HCI_MGMT_UNTRUSTED },
8387 { read_commands, MGMT_READ_COMMANDS_SIZE,
8388 HCI_MGMT_NO_HDEV |
8389 HCI_MGMT_UNTRUSTED },
8390 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8391 HCI_MGMT_NO_HDEV |
8392 HCI_MGMT_UNTRUSTED },
8393 { read_controller_info, MGMT_READ_INFO_SIZE,
8394 HCI_MGMT_UNTRUSTED },
8395 { set_powered, MGMT_SETTING_SIZE },
8396 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8397 { set_connectable, MGMT_SETTING_SIZE },
8398 { set_fast_connectable, MGMT_SETTING_SIZE },
8399 { set_bondable, MGMT_SETTING_SIZE },
8400 { set_link_security, MGMT_SETTING_SIZE },
8401 { set_ssp, MGMT_SETTING_SIZE },
8402 { set_hs, MGMT_SETTING_SIZE },
8403 { set_le, MGMT_SETTING_SIZE },
8404 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8405 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8406 { add_uuid, MGMT_ADD_UUID_SIZE },
8407 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8408 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8409 HCI_MGMT_VAR_LEN },
8410 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8411 HCI_MGMT_VAR_LEN },
8412 { disconnect, MGMT_DISCONNECT_SIZE },
8413 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8414 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8415 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8416 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8417 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8418 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8419 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8420 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8421 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8422 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8423 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8424 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8425 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8426 HCI_MGMT_VAR_LEN },
8427 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8428 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8429 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8430 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8431 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8432 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8433 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8434 { set_advertising, MGMT_SETTING_SIZE },
8435 { set_bredr, MGMT_SETTING_SIZE },
8436 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8437 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8438 { set_secure_conn, MGMT_SETTING_SIZE },
8439 { set_debug_keys, MGMT_SETTING_SIZE },
8440 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8441 { load_irks, MGMT_LOAD_IRKS_SIZE,
8442 HCI_MGMT_VAR_LEN },
8443 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8444 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8445 { add_device, MGMT_ADD_DEVICE_SIZE },
8446 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8447 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8448 HCI_MGMT_VAR_LEN },
8449 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8450 HCI_MGMT_NO_HDEV |
8451 HCI_MGMT_UNTRUSTED },
8452 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8453 HCI_MGMT_UNCONFIGURED |
8454 HCI_MGMT_UNTRUSTED },
8455 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8456 HCI_MGMT_UNCONFIGURED },
8457 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8458 HCI_MGMT_UNCONFIGURED },
8459 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8460 HCI_MGMT_VAR_LEN },
8461 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8462 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8463 HCI_MGMT_NO_HDEV |
8464 HCI_MGMT_UNTRUSTED },
8465 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8466 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8467 HCI_MGMT_VAR_LEN },
8468 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8469 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8470 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8471 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8472 HCI_MGMT_UNTRUSTED },
8473 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8474 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8475 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8476 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8477 HCI_MGMT_VAR_LEN },
8478 { set_wideband_speech, MGMT_SETTING_SIZE },
8479 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8480 HCI_MGMT_UNTRUSTED },
8481 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8482 HCI_MGMT_UNTRUSTED |
8483 HCI_MGMT_HDEV_OPTIONAL },
8484 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8485 HCI_MGMT_VAR_LEN |
8486 HCI_MGMT_HDEV_OPTIONAL },
8487 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8488 HCI_MGMT_UNTRUSTED },
8489 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8490 HCI_MGMT_VAR_LEN },
8491 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8492 HCI_MGMT_UNTRUSTED },
8493 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8494 HCI_MGMT_VAR_LEN },
8495 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8496 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8497 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8498 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8499 HCI_MGMT_VAR_LEN },
8500 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8501 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8502 HCI_MGMT_VAR_LEN },
8503 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8504 HCI_MGMT_VAR_LEN },
8505 { add_adv_patterns_monitor_rssi,
8506 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8507 HCI_MGMT_VAR_LEN },
8508 };
8509
mgmt_index_added(struct hci_dev * hdev)8510 void mgmt_index_added(struct hci_dev *hdev)
8511 {
8512 struct mgmt_ev_ext_index ev;
8513
8514 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8515 return;
8516
8517 switch (hdev->dev_type) {
8518 case HCI_PRIMARY:
8519 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8520 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8521 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8522 ev.type = 0x01;
8523 } else {
8524 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8525 HCI_MGMT_INDEX_EVENTS);
8526 ev.type = 0x00;
8527 }
8528 break;
8529 case HCI_AMP:
8530 ev.type = 0x02;
8531 break;
8532 default:
8533 return;
8534 }
8535
8536 ev.bus = hdev->bus;
8537
8538 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8539 HCI_MGMT_EXT_INDEX_EVENTS);
8540 }
8541
mgmt_index_removed(struct hci_dev * hdev)8542 void mgmt_index_removed(struct hci_dev *hdev)
8543 {
8544 struct mgmt_ev_ext_index ev;
8545 u8 status = MGMT_STATUS_INVALID_INDEX;
8546
8547 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8548 return;
8549
8550 switch (hdev->dev_type) {
8551 case HCI_PRIMARY:
8552 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8553
8554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8555 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8556 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8557 ev.type = 0x01;
8558 } else {
8559 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8560 HCI_MGMT_INDEX_EVENTS);
8561 ev.type = 0x00;
8562 }
8563 break;
8564 case HCI_AMP:
8565 ev.type = 0x02;
8566 break;
8567 default:
8568 return;
8569 }
8570
8571 ev.bus = hdev->bus;
8572
8573 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8574 HCI_MGMT_EXT_INDEX_EVENTS);
8575 }
8576
8577 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)8578 static void restart_le_actions(struct hci_dev *hdev)
8579 {
8580 struct hci_conn_params *p;
8581
8582 list_for_each_entry(p, &hdev->le_conn_params, list) {
8583 /* Needed for AUTO_OFF case where might not "really"
8584 * have been powered off.
8585 */
8586 list_del_init(&p->action);
8587
8588 switch (p->auto_connect) {
8589 case HCI_AUTO_CONN_DIRECT:
8590 case HCI_AUTO_CONN_ALWAYS:
8591 list_add(&p->action, &hdev->pend_le_conns);
8592 break;
8593 case HCI_AUTO_CONN_REPORT:
8594 list_add(&p->action, &hdev->pend_le_reports);
8595 break;
8596 default:
8597 break;
8598 }
8599 }
8600 }
8601
mgmt_power_on(struct hci_dev * hdev,int err)8602 void mgmt_power_on(struct hci_dev *hdev, int err)
8603 {
8604 struct cmd_lookup match = { NULL, hdev };
8605
8606 bt_dev_dbg(hdev, "err %d", err);
8607
8608 hci_dev_lock(hdev);
8609
8610 if (!err) {
8611 restart_le_actions(hdev);
8612 hci_update_background_scan(hdev);
8613 }
8614
8615 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8616
8617 new_settings(hdev, match.sk);
8618
8619 if (match.sk)
8620 sock_put(match.sk);
8621
8622 hci_dev_unlock(hdev);
8623 }
8624
__mgmt_power_off(struct hci_dev * hdev)8625 void __mgmt_power_off(struct hci_dev *hdev)
8626 {
8627 struct cmd_lookup match = { NULL, hdev };
8628 u8 status, zero_cod[] = { 0, 0, 0 };
8629
8630 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8631
8632 /* If the power off is because of hdev unregistration let
8633 * use the appropriate INVALID_INDEX status. Otherwise use
8634 * NOT_POWERED. We cover both scenarios here since later in
8635 * mgmt_index_removed() any hci_conn callbacks will have already
8636 * been triggered, potentially causing misleading DISCONNECTED
8637 * status responses.
8638 */
8639 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8640 status = MGMT_STATUS_INVALID_INDEX;
8641 else
8642 status = MGMT_STATUS_NOT_POWERED;
8643
8644 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8645
8646 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8647 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8648 zero_cod, sizeof(zero_cod),
8649 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8650 ext_info_changed(hdev, NULL);
8651 }
8652
8653 new_settings(hdev, match.sk);
8654
8655 if (match.sk)
8656 sock_put(match.sk);
8657 }
8658
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8659 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8660 {
8661 struct mgmt_pending_cmd *cmd;
8662 u8 status;
8663
8664 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8665 if (!cmd)
8666 return;
8667
8668 if (err == -ERFKILL)
8669 status = MGMT_STATUS_RFKILLED;
8670 else
8671 status = MGMT_STATUS_FAILED;
8672
8673 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8674
8675 mgmt_pending_remove(cmd);
8676 }
8677
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8678 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8679 bool persistent)
8680 {
8681 struct mgmt_ev_new_link_key ev;
8682
8683 memset(&ev, 0, sizeof(ev));
8684
8685 ev.store_hint = persistent;
8686 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8687 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8688 ev.key.type = key->type;
8689 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8690 ev.key.pin_len = key->pin_len;
8691
8692 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8693 }
8694
mgmt_ltk_type(struct smp_ltk * ltk)8695 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8696 {
8697 switch (ltk->type) {
8698 case SMP_LTK:
8699 case SMP_LTK_RESPONDER:
8700 if (ltk->authenticated)
8701 return MGMT_LTK_AUTHENTICATED;
8702 return MGMT_LTK_UNAUTHENTICATED;
8703 case SMP_LTK_P256:
8704 if (ltk->authenticated)
8705 return MGMT_LTK_P256_AUTH;
8706 return MGMT_LTK_P256_UNAUTH;
8707 case SMP_LTK_P256_DEBUG:
8708 return MGMT_LTK_P256_DEBUG;
8709 }
8710
8711 return MGMT_LTK_UNAUTHENTICATED;
8712 }
8713
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8714 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8715 {
8716 struct mgmt_ev_new_long_term_key ev;
8717
8718 memset(&ev, 0, sizeof(ev));
8719
8720 /* Devices using resolvable or non-resolvable random addresses
8721 * without providing an identity resolving key don't require
8722 * to store long term keys. Their addresses will change the
8723 * next time around.
8724 *
8725 * Only when a remote device provides an identity address
8726 * make sure the long term key is stored. If the remote
8727 * identity is known, the long term keys are internally
8728 * mapped to the identity address. So allow static random
8729 * and public addresses here.
8730 */
8731 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8732 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8733 ev.store_hint = 0x00;
8734 else
8735 ev.store_hint = persistent;
8736
8737 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8738 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8739 ev.key.type = mgmt_ltk_type(key);
8740 ev.key.enc_size = key->enc_size;
8741 ev.key.ediv = key->ediv;
8742 ev.key.rand = key->rand;
8743
8744 if (key->type == SMP_LTK)
8745 ev.key.initiator = 1;
8746
8747 /* Make sure we copy only the significant bytes based on the
8748 * encryption key size, and set the rest of the value to zeroes.
8749 */
8750 memcpy(ev.key.val, key->val, key->enc_size);
8751 memset(ev.key.val + key->enc_size, 0,
8752 sizeof(ev.key.val) - key->enc_size);
8753
8754 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8755 }
8756
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)8757 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8758 {
8759 struct mgmt_ev_new_irk ev;
8760
8761 memset(&ev, 0, sizeof(ev));
8762
8763 ev.store_hint = persistent;
8764
8765 bacpy(&ev.rpa, &irk->rpa);
8766 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8767 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
8768 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8769
8770 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8771 }
8772
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)8773 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8774 bool persistent)
8775 {
8776 struct mgmt_ev_new_csrk ev;
8777
8778 memset(&ev, 0, sizeof(ev));
8779
8780 /* Devices using resolvable or non-resolvable random addresses
8781 * without providing an identity resolving key don't require
8782 * to store signature resolving keys. Their addresses will change
8783 * the next time around.
8784 *
8785 * Only when a remote device provides an identity address
8786 * make sure the signature resolving key is stored. So allow
8787 * static random and public addresses here.
8788 */
8789 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8790 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8791 ev.store_hint = 0x00;
8792 else
8793 ev.store_hint = persistent;
8794
8795 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8796 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
8797 ev.key.type = csrk->type;
8798 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8799
8800 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8801 }
8802
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)8803 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8804 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8805 u16 max_interval, u16 latency, u16 timeout)
8806 {
8807 struct mgmt_ev_new_conn_param ev;
8808
8809 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8810 return;
8811
8812 memset(&ev, 0, sizeof(ev));
8813 bacpy(&ev.addr.bdaddr, bdaddr);
8814 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8815 ev.store_hint = store_hint;
8816 ev.min_interval = cpu_to_le16(min_interval);
8817 ev.max_interval = cpu_to_le16(max_interval);
8818 ev.latency = cpu_to_le16(latency);
8819 ev.timeout = cpu_to_le16(timeout);
8820
8821 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8822 }
8823
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)8824 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8825 u8 *name, u8 name_len)
8826 {
8827 char buf[512];
8828 struct mgmt_ev_device_connected *ev = (void *) buf;
8829 u16 eir_len = 0;
8830 u32 flags = 0;
8831
8832 bacpy(&ev->addr.bdaddr, &conn->dst);
8833 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8834
8835 if (conn->out)
8836 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
8837
8838 ev->flags = __cpu_to_le32(flags);
8839
8840 /* We must ensure that the EIR Data fields are ordered and
8841 * unique. Keep it simple for now and avoid the problem by not
8842 * adding any BR/EDR data to the LE adv.
8843 */
8844 if (conn->le_adv_data_len > 0) {
8845 memcpy(&ev->eir[eir_len],
8846 conn->le_adv_data, conn->le_adv_data_len);
8847 eir_len = conn->le_adv_data_len;
8848 } else {
8849 if (name_len > 0)
8850 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8851 name, name_len);
8852
8853 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8854 eir_len = eir_append_data(ev->eir, eir_len,
8855 EIR_CLASS_OF_DEV,
8856 conn->dev_class, 3);
8857 }
8858
8859 ev->eir_len = cpu_to_le16(eir_len);
8860
8861 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8862 sizeof(*ev) + eir_len, NULL);
8863 }
8864
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)8865 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8866 {
8867 struct sock **sk = data;
8868
8869 cmd->cmd_complete(cmd, 0);
8870
8871 *sk = cmd->sk;
8872 sock_hold(*sk);
8873
8874 mgmt_pending_remove(cmd);
8875 }
8876
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)8877 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8878 {
8879 struct hci_dev *hdev = data;
8880 struct mgmt_cp_unpair_device *cp = cmd->param;
8881
8882 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8883
8884 cmd->cmd_complete(cmd, 0);
8885 mgmt_pending_remove(cmd);
8886 }
8887
mgmt_powering_down(struct hci_dev * hdev)8888 bool mgmt_powering_down(struct hci_dev *hdev)
8889 {
8890 struct mgmt_pending_cmd *cmd;
8891 struct mgmt_mode *cp;
8892
8893 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8894 if (!cmd)
8895 return false;
8896
8897 cp = cmd->param;
8898 if (!cp->val)
8899 return true;
8900
8901 return false;
8902 }
8903
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)8904 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8905 u8 link_type, u8 addr_type, u8 reason,
8906 bool mgmt_connected)
8907 {
8908 struct mgmt_ev_device_disconnected ev;
8909 struct sock *sk = NULL;
8910
8911 /* The connection is still in hci_conn_hash so test for 1
8912 * instead of 0 to know if this is the last one.
8913 */
8914 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8915 cancel_delayed_work(&hdev->power_off);
8916 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8917 }
8918
8919 if (!mgmt_connected)
8920 return;
8921
8922 if (link_type != ACL_LINK && link_type != LE_LINK)
8923 return;
8924
8925 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8926
8927 bacpy(&ev.addr.bdaddr, bdaddr);
8928 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8929 ev.reason = reason;
8930
8931 /* Report disconnects due to suspend */
8932 if (hdev->suspended)
8933 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8934
8935 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8936
8937 if (sk)
8938 sock_put(sk);
8939
8940 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8941 hdev);
8942 }
8943
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8944 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8945 u8 link_type, u8 addr_type, u8 status)
8946 {
8947 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8948 struct mgmt_cp_disconnect *cp;
8949 struct mgmt_pending_cmd *cmd;
8950
8951 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8952 hdev);
8953
8954 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8955 if (!cmd)
8956 return;
8957
8958 cp = cmd->param;
8959
8960 if (bacmp(bdaddr, &cp->addr.bdaddr))
8961 return;
8962
8963 if (cp->addr.type != bdaddr_type)
8964 return;
8965
8966 cmd->cmd_complete(cmd, mgmt_status(status));
8967 mgmt_pending_remove(cmd);
8968 }
8969
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8970 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8971 u8 addr_type, u8 status)
8972 {
8973 struct mgmt_ev_connect_failed ev;
8974
8975 /* The connection is still in hci_conn_hash so test for 1
8976 * instead of 0 to know if this is the last one.
8977 */
8978 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8979 cancel_delayed_work(&hdev->power_off);
8980 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8981 }
8982
8983 bacpy(&ev.addr.bdaddr, bdaddr);
8984 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8985 ev.status = mgmt_status(status);
8986
8987 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8988 }
8989
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8990 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8991 {
8992 struct mgmt_ev_pin_code_request ev;
8993
8994 bacpy(&ev.addr.bdaddr, bdaddr);
8995 ev.addr.type = BDADDR_BREDR;
8996 ev.secure = secure;
8997
8998 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8999 }
9000
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9001 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9002 u8 status)
9003 {
9004 struct mgmt_pending_cmd *cmd;
9005
9006 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9007 if (!cmd)
9008 return;
9009
9010 cmd->cmd_complete(cmd, mgmt_status(status));
9011 mgmt_pending_remove(cmd);
9012 }
9013
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9014 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9015 u8 status)
9016 {
9017 struct mgmt_pending_cmd *cmd;
9018
9019 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9020 if (!cmd)
9021 return;
9022
9023 cmd->cmd_complete(cmd, mgmt_status(status));
9024 mgmt_pending_remove(cmd);
9025 }
9026
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9027 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9028 u8 link_type, u8 addr_type, u32 value,
9029 u8 confirm_hint)
9030 {
9031 struct mgmt_ev_user_confirm_request ev;
9032
9033 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9034
9035 bacpy(&ev.addr.bdaddr, bdaddr);
9036 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9037 ev.confirm_hint = confirm_hint;
9038 ev.value = cpu_to_le32(value);
9039
9040 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9041 NULL);
9042 }
9043
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9044 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9045 u8 link_type, u8 addr_type)
9046 {
9047 struct mgmt_ev_user_passkey_request ev;
9048
9049 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9050
9051 bacpy(&ev.addr.bdaddr, bdaddr);
9052 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9053
9054 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9055 NULL);
9056 }
9057
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9058 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9059 u8 link_type, u8 addr_type, u8 status,
9060 u8 opcode)
9061 {
9062 struct mgmt_pending_cmd *cmd;
9063
9064 cmd = pending_find(opcode, hdev);
9065 if (!cmd)
9066 return -ENOENT;
9067
9068 cmd->cmd_complete(cmd, mgmt_status(status));
9069 mgmt_pending_remove(cmd);
9070
9071 return 0;
9072 }
9073
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9074 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9075 u8 link_type, u8 addr_type, u8 status)
9076 {
9077 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9078 status, MGMT_OP_USER_CONFIRM_REPLY);
9079 }
9080
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9081 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9082 u8 link_type, u8 addr_type, u8 status)
9083 {
9084 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9085 status,
9086 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9087 }
9088
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9089 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9090 u8 link_type, u8 addr_type, u8 status)
9091 {
9092 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9093 status, MGMT_OP_USER_PASSKEY_REPLY);
9094 }
9095
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9096 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9097 u8 link_type, u8 addr_type, u8 status)
9098 {
9099 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9100 status,
9101 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9102 }
9103
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9104 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9105 u8 link_type, u8 addr_type, u32 passkey,
9106 u8 entered)
9107 {
9108 struct mgmt_ev_passkey_notify ev;
9109
9110 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9111
9112 bacpy(&ev.addr.bdaddr, bdaddr);
9113 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9114 ev.passkey = __cpu_to_le32(passkey);
9115 ev.entered = entered;
9116
9117 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9118 }
9119
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9120 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9121 {
9122 struct mgmt_ev_auth_failed ev;
9123 struct mgmt_pending_cmd *cmd;
9124 u8 status = mgmt_status(hci_status);
9125
9126 bacpy(&ev.addr.bdaddr, &conn->dst);
9127 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9128 ev.status = status;
9129
9130 cmd = find_pairing(conn);
9131
9132 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9133 cmd ? cmd->sk : NULL);
9134
9135 if (cmd) {
9136 cmd->cmd_complete(cmd, status);
9137 mgmt_pending_remove(cmd);
9138 }
9139 }
9140
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9141 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9142 {
9143 struct cmd_lookup match = { NULL, hdev };
9144 bool changed;
9145
9146 if (status) {
9147 u8 mgmt_err = mgmt_status(status);
9148 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9149 cmd_status_rsp, &mgmt_err);
9150 return;
9151 }
9152
9153 if (test_bit(HCI_AUTH, &hdev->flags))
9154 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9155 else
9156 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9157
9158 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9159 &match);
9160
9161 if (changed)
9162 new_settings(hdev, match.sk);
9163
9164 if (match.sk)
9165 sock_put(match.sk);
9166 }
9167
clear_eir(struct hci_request * req)9168 static void clear_eir(struct hci_request *req)
9169 {
9170 struct hci_dev *hdev = req->hdev;
9171 struct hci_cp_write_eir cp;
9172
9173 if (!lmp_ext_inq_capable(hdev))
9174 return;
9175
9176 memset(hdev->eir, 0, sizeof(hdev->eir));
9177
9178 memset(&cp, 0, sizeof(cp));
9179
9180 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9181 }
9182
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)9183 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9184 {
9185 struct cmd_lookup match = { NULL, hdev };
9186 struct hci_request req;
9187 bool changed = false;
9188
9189 if (status) {
9190 u8 mgmt_err = mgmt_status(status);
9191
9192 if (enable && hci_dev_test_and_clear_flag(hdev,
9193 HCI_SSP_ENABLED)) {
9194 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9195 new_settings(hdev, NULL);
9196 }
9197
9198 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9199 &mgmt_err);
9200 return;
9201 }
9202
9203 if (enable) {
9204 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9205 } else {
9206 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9207 if (!changed)
9208 changed = hci_dev_test_and_clear_flag(hdev,
9209 HCI_HS_ENABLED);
9210 else
9211 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9212 }
9213
9214 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9215
9216 if (changed)
9217 new_settings(hdev, match.sk);
9218
9219 if (match.sk)
9220 sock_put(match.sk);
9221
9222 hci_req_init(&req, hdev);
9223
9224 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9225 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9226 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9227 sizeof(enable), &enable);
9228 __hci_req_update_eir(&req);
9229 } else {
9230 clear_eir(&req);
9231 }
9232
9233 hci_req_run(&req, NULL);
9234 }
9235
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9236 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9237 {
9238 struct cmd_lookup *match = data;
9239
9240 if (match->sk == NULL) {
9241 match->sk = cmd->sk;
9242 sock_hold(match->sk);
9243 }
9244 }
9245
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9246 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9247 u8 status)
9248 {
9249 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9250
9251 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9252 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9253 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9254
9255 if (!status) {
9256 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9257 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9258 ext_info_changed(hdev, NULL);
9259 }
9260
9261 if (match.sk)
9262 sock_put(match.sk);
9263 }
9264
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9265 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9266 {
9267 struct mgmt_cp_set_local_name ev;
9268 struct mgmt_pending_cmd *cmd;
9269
9270 if (status)
9271 return;
9272
9273 memset(&ev, 0, sizeof(ev));
9274 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9275 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9276
9277 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9278 if (!cmd) {
9279 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9280
9281 /* If this is a HCI command related to powering on the
9282 * HCI dev don't send any mgmt signals.
9283 */
9284 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9285 return;
9286 }
9287
9288 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9289 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9290 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9291 }
9292
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9293 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9294 {
9295 int i;
9296
9297 for (i = 0; i < uuid_count; i++) {
9298 if (!memcmp(uuid, uuids[i], 16))
9299 return true;
9300 }
9301
9302 return false;
9303 }
9304
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9305 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9306 {
9307 u16 parsed = 0;
9308
9309 while (parsed < eir_len) {
9310 u8 field_len = eir[0];
9311 u8 uuid[16];
9312 int i;
9313
9314 if (field_len == 0)
9315 break;
9316
9317 if (eir_len - parsed < field_len + 1)
9318 break;
9319
9320 switch (eir[1]) {
9321 case EIR_UUID16_ALL:
9322 case EIR_UUID16_SOME:
9323 for (i = 0; i + 3 <= field_len; i += 2) {
9324 memcpy(uuid, bluetooth_base_uuid, 16);
9325 uuid[13] = eir[i + 3];
9326 uuid[12] = eir[i + 2];
9327 if (has_uuid(uuid, uuid_count, uuids))
9328 return true;
9329 }
9330 break;
9331 case EIR_UUID32_ALL:
9332 case EIR_UUID32_SOME:
9333 for (i = 0; i + 5 <= field_len; i += 4) {
9334 memcpy(uuid, bluetooth_base_uuid, 16);
9335 uuid[15] = eir[i + 5];
9336 uuid[14] = eir[i + 4];
9337 uuid[13] = eir[i + 3];
9338 uuid[12] = eir[i + 2];
9339 if (has_uuid(uuid, uuid_count, uuids))
9340 return true;
9341 }
9342 break;
9343 case EIR_UUID128_ALL:
9344 case EIR_UUID128_SOME:
9345 for (i = 0; i + 17 <= field_len; i += 16) {
9346 memcpy(uuid, eir + i + 2, 16);
9347 if (has_uuid(uuid, uuid_count, uuids))
9348 return true;
9349 }
9350 break;
9351 }
9352
9353 parsed += field_len + 1;
9354 eir += field_len + 1;
9355 }
9356
9357 return false;
9358 }
9359
restart_le_scan(struct hci_dev * hdev)9360 static void restart_le_scan(struct hci_dev *hdev)
9361 {
9362 /* If controller is not scanning we are done. */
9363 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9364 return;
9365
9366 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9367 hdev->discovery.scan_start +
9368 hdev->discovery.scan_duration))
9369 return;
9370
9371 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9372 DISCOV_LE_RESTART_DELAY);
9373 }
9374
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)9375 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9376 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9377 {
9378 /* If a RSSI threshold has been specified, and
9379 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9380 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9381 * is set, let it through for further processing, as we might need to
9382 * restart the scan.
9383 *
9384 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9385 * the results are also dropped.
9386 */
9387 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9388 (rssi == HCI_RSSI_INVALID ||
9389 (rssi < hdev->discovery.rssi &&
9390 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9391 return false;
9392
9393 if (hdev->discovery.uuid_count != 0) {
9394 /* If a list of UUIDs is provided in filter, results with no
9395 * matching UUID should be dropped.
9396 */
9397 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9398 hdev->discovery.uuids) &&
9399 !eir_has_uuids(scan_rsp, scan_rsp_len,
9400 hdev->discovery.uuid_count,
9401 hdev->discovery.uuids))
9402 return false;
9403 }
9404
9405 /* If duplicate filtering does not report RSSI changes, then restart
9406 * scanning to ensure updated result with updated RSSI values.
9407 */
9408 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9409 restart_le_scan(hdev);
9410
9411 /* Validate RSSI value against the RSSI threshold once more. */
9412 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9413 rssi < hdev->discovery.rssi)
9414 return false;
9415 }
9416
9417 return true;
9418 }
9419
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)9420 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9421 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9422 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9423 {
9424 char buf[512];
9425 struct mgmt_ev_device_found *ev = (void *)buf;
9426 size_t ev_size;
9427
9428 /* Don't send events for a non-kernel initiated discovery. With
9429 * LE one exception is if we have pend_le_reports > 0 in which
9430 * case we're doing passive scanning and want these events.
9431 */
9432 if (!hci_discovery_active(hdev)) {
9433 if (link_type == ACL_LINK)
9434 return;
9435 if (link_type == LE_LINK &&
9436 list_empty(&hdev->pend_le_reports) &&
9437 !hci_is_adv_monitoring(hdev)) {
9438 return;
9439 }
9440 }
9441
9442 if (hdev->discovery.result_filtering) {
9443 /* We are using service discovery */
9444 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9445 scan_rsp_len))
9446 return;
9447 }
9448
9449 if (hdev->discovery.limited) {
9450 /* Check for limited discoverable bit */
9451 if (dev_class) {
9452 if (!(dev_class[1] & 0x20))
9453 return;
9454 } else {
9455 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9456 if (!flags || !(flags[0] & LE_AD_LIMITED))
9457 return;
9458 }
9459 }
9460
9461 /* Make sure that the buffer is big enough. The 5 extra bytes
9462 * are for the potential CoD field.
9463 */
9464 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9465 return;
9466
9467 memset(buf, 0, sizeof(buf));
9468
9469 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9470 * RSSI value was reported as 0 when not available. This behavior
9471 * is kept when using device discovery. This is required for full
9472 * backwards compatibility with the API.
9473 *
9474 * However when using service discovery, the value 127 will be
9475 * returned when the RSSI is not available.
9476 */
9477 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9478 link_type == ACL_LINK)
9479 rssi = 0;
9480
9481 bacpy(&ev->addr.bdaddr, bdaddr);
9482 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9483 ev->rssi = rssi;
9484 ev->flags = cpu_to_le32(flags);
9485
9486 if (eir_len > 0)
9487 /* Copy EIR or advertising data into event */
9488 memcpy(ev->eir, eir, eir_len);
9489
9490 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9491 NULL))
9492 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9493 dev_class, 3);
9494
9495 if (scan_rsp_len > 0)
9496 /* Append scan response data to event */
9497 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9498
9499 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9500 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9501
9502 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9503 }
9504
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)9505 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9506 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9507 {
9508 struct mgmt_ev_device_found *ev;
9509 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9510 u16 eir_len;
9511
9512 ev = (struct mgmt_ev_device_found *) buf;
9513
9514 memset(buf, 0, sizeof(buf));
9515
9516 bacpy(&ev->addr.bdaddr, bdaddr);
9517 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9518 ev->rssi = rssi;
9519
9520 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9521 name_len);
9522
9523 ev->eir_len = cpu_to_le16(eir_len);
9524
9525 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9526 }
9527
mgmt_discovering(struct hci_dev * hdev,u8 discovering)9528 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9529 {
9530 struct mgmt_ev_discovering ev;
9531
9532 bt_dev_dbg(hdev, "discovering %u", discovering);
9533
9534 memset(&ev, 0, sizeof(ev));
9535 ev.type = hdev->discovery.type;
9536 ev.discovering = discovering;
9537
9538 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9539 }
9540
mgmt_suspending(struct hci_dev * hdev,u8 state)9541 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9542 {
9543 struct mgmt_ev_controller_suspend ev;
9544
9545 ev.suspend_state = state;
9546 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9547 }
9548
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)9549 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9550 u8 addr_type)
9551 {
9552 struct mgmt_ev_controller_resume ev;
9553
9554 ev.wake_reason = reason;
9555 if (bdaddr) {
9556 bacpy(&ev.addr.bdaddr, bdaddr);
9557 ev.addr.type = addr_type;
9558 } else {
9559 memset(&ev.addr, 0, sizeof(ev.addr));
9560 }
9561
9562 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9563 }
9564
9565 static struct hci_mgmt_chan chan = {
9566 .channel = HCI_CHANNEL_CONTROL,
9567 .handler_count = ARRAY_SIZE(mgmt_handlers),
9568 .handlers = mgmt_handlers,
9569 .hdev_init = mgmt_init_hdev,
9570 };
9571
mgmt_init(void)9572 int mgmt_init(void)
9573 {
9574 return hci_mgmt_chan_register(&chan);
9575 }
9576
mgmt_exit(void)9577 void mgmt_exit(void)
9578 {
9579 hci_mgmt_chan_unregister(&chan);
9580 }
9581