1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 };
136
137 static const u16 mgmt_events[] = {
138 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_ADDED,
140 MGMT_EV_INDEX_REMOVED,
141 MGMT_EV_NEW_SETTINGS,
142 MGMT_EV_CLASS_OF_DEV_CHANGED,
143 MGMT_EV_LOCAL_NAME_CHANGED,
144 MGMT_EV_NEW_LINK_KEY,
145 MGMT_EV_NEW_LONG_TERM_KEY,
146 MGMT_EV_DEVICE_CONNECTED,
147 MGMT_EV_DEVICE_DISCONNECTED,
148 MGMT_EV_CONNECT_FAILED,
149 MGMT_EV_PIN_CODE_REQUEST,
150 MGMT_EV_USER_CONFIRM_REQUEST,
151 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_AUTH_FAILED,
153 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DISCOVERING,
155 MGMT_EV_DEVICE_BLOCKED,
156 MGMT_EV_DEVICE_UNBLOCKED,
157 MGMT_EV_DEVICE_UNPAIRED,
158 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_NEW_IRK,
160 MGMT_EV_NEW_CSRK,
161 MGMT_EV_DEVICE_ADDED,
162 MGMT_EV_DEVICE_REMOVED,
163 MGMT_EV_NEW_CONN_PARAM,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 MGMT_EV_ADVERTISING_ADDED,
171 MGMT_EV_ADVERTISING_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
173 MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 MGMT_EV_EXP_FEATURE_CHANGED,
175 MGMT_EV_DEVICE_FLAGS_CHANGED,
176 MGMT_EV_ADV_MONITOR_ADDED,
177 MGMT_EV_ADV_MONITOR_REMOVED,
178 MGMT_EV_CONTROLLER_SUSPEND,
179 MGMT_EV_CONTROLLER_RESUME,
180 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
182 };
183
184 static const u16 mgmt_untrusted_commands[] = {
185 MGMT_OP_READ_INDEX_LIST,
186 MGMT_OP_READ_INFO,
187 MGMT_OP_READ_UNCONF_INDEX_LIST,
188 MGMT_OP_READ_CONFIG_INFO,
189 MGMT_OP_READ_EXT_INDEX_LIST,
190 MGMT_OP_READ_EXT_INFO,
191 MGMT_OP_READ_CONTROLLER_CAP,
192 MGMT_OP_READ_EXP_FEATURES_INFO,
193 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
195 };
196
197 static const u16 mgmt_untrusted_events[] = {
198 MGMT_EV_INDEX_ADDED,
199 MGMT_EV_INDEX_REMOVED,
200 MGMT_EV_NEW_SETTINGS,
201 MGMT_EV_CLASS_OF_DEV_CHANGED,
202 MGMT_EV_LOCAL_NAME_CHANGED,
203 MGMT_EV_UNCONF_INDEX_ADDED,
204 MGMT_EV_UNCONF_INDEX_REMOVED,
205 MGMT_EV_NEW_CONFIG_OPTIONS,
206 MGMT_EV_EXT_INDEX_ADDED,
207 MGMT_EV_EXT_INDEX_REMOVED,
208 MGMT_EV_EXT_INFO_CHANGED,
209 MGMT_EV_EXP_FEATURE_CHANGED,
210 };
211
212 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
213
214 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 "\x00\x00\x00\x00\x00\x00\x00\x00"
216
217 /* HCI to MGMT error code conversion table */
218 static const u8 mgmt_status_table[] = {
219 MGMT_STATUS_SUCCESS,
220 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
221 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
222 MGMT_STATUS_FAILED, /* Hardware Failure */
223 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
224 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
225 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
226 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
227 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
228 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
230 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
231 MGMT_STATUS_BUSY, /* Command Disallowed */
232 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
233 MGMT_STATUS_REJECTED, /* Rejected Security */
234 MGMT_STATUS_REJECTED, /* Rejected Personal */
235 MGMT_STATUS_TIMEOUT, /* Host Timeout */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
238 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
239 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
240 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
241 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
242 MGMT_STATUS_BUSY, /* Repeated Attempts */
243 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
244 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
246 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
247 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
248 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
249 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
250 MGMT_STATUS_FAILED, /* Unspecified Error */
251 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
252 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
253 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
254 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
255 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
256 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
257 MGMT_STATUS_FAILED, /* Unit Link Key Used */
258 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
259 MGMT_STATUS_TIMEOUT, /* Instant Passed */
260 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
261 MGMT_STATUS_FAILED, /* Transaction Collision */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
264 MGMT_STATUS_REJECTED, /* QoS Rejected */
265 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
266 MGMT_STATUS_REJECTED, /* Insufficient Security */
267 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
268 MGMT_STATUS_FAILED, /* Reserved for future use */
269 MGMT_STATUS_BUSY, /* Role Switch Pending */
270 MGMT_STATUS_FAILED, /* Reserved for future use */
271 MGMT_STATUS_FAILED, /* Slot Violation */
272 MGMT_STATUS_FAILED, /* Role Switch Failed */
273 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
274 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
275 MGMT_STATUS_BUSY, /* Host Busy Pairing */
276 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
277 MGMT_STATUS_BUSY, /* Controller Busy */
278 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
279 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
280 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
281 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
282 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
283 };
284
mgmt_errno_status(int err)285 static u8 mgmt_errno_status(int err)
286 {
287 switch (err) {
288 case 0:
289 return MGMT_STATUS_SUCCESS;
290 case -EPERM:
291 return MGMT_STATUS_REJECTED;
292 case -EINVAL:
293 return MGMT_STATUS_INVALID_PARAMS;
294 case -EOPNOTSUPP:
295 return MGMT_STATUS_NOT_SUPPORTED;
296 case -EBUSY:
297 return MGMT_STATUS_BUSY;
298 case -ETIMEDOUT:
299 return MGMT_STATUS_AUTH_FAILED;
300 case -ENOMEM:
301 return MGMT_STATUS_NO_RESOURCES;
302 case -EISCONN:
303 return MGMT_STATUS_ALREADY_CONNECTED;
304 case -ENOTCONN:
305 return MGMT_STATUS_DISCONNECTED;
306 }
307
308 return MGMT_STATUS_FAILED;
309 }
310
mgmt_status(int err)311 static u8 mgmt_status(int err)
312 {
313 if (err < 0)
314 return mgmt_errno_status(err);
315
316 if (err < ARRAY_SIZE(mgmt_status_table))
317 return mgmt_status_table[err];
318
319 return MGMT_STATUS_FAILED;
320 }
321
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)322 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 u16 len, int flag)
324 {
325 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 flag, NULL);
327 }
328
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)329 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 u16 len, int flag, struct sock *skip_sk)
331 {
332 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 flag, skip_sk);
334 }
335
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)336 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 struct sock *skip_sk)
338 {
339 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 HCI_SOCK_TRUSTED, skip_sk);
341 }
342
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)343 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
344 {
345 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 skip_sk);
347 }
348
le_addr_type(u8 mgmt_addr_type)349 static u8 le_addr_type(u8 mgmt_addr_type)
350 {
351 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 return ADDR_LE_DEV_PUBLIC;
353 else
354 return ADDR_LE_DEV_RANDOM;
355 }
356
mgmt_fill_version_info(void * ver)357 void mgmt_fill_version_info(void *ver)
358 {
359 struct mgmt_rp_read_version *rp = ver;
360
361 rp->version = MGMT_VERSION;
362 rp->revision = cpu_to_le16(MGMT_REVISION);
363 }
364
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)365 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 u16 data_len)
367 {
368 struct mgmt_rp_read_version rp;
369
370 bt_dev_dbg(hdev, "sock %p", sk);
371
372 mgmt_fill_version_info(&rp);
373
374 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 &rp, sizeof(rp));
376 }
377
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)378 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 u16 data_len)
380 {
381 struct mgmt_rp_read_commands *rp;
382 u16 num_commands, num_events;
383 size_t rp_size;
384 int i, err;
385
386 bt_dev_dbg(hdev, "sock %p", sk);
387
388 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 num_commands = ARRAY_SIZE(mgmt_commands);
390 num_events = ARRAY_SIZE(mgmt_events);
391 } else {
392 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 num_events = ARRAY_SIZE(mgmt_untrusted_events);
394 }
395
396 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
397
398 rp = kmalloc(rp_size, GFP_KERNEL);
399 if (!rp)
400 return -ENOMEM;
401
402 rp->num_commands = cpu_to_le16(num_commands);
403 rp->num_events = cpu_to_le16(num_events);
404
405 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 __le16 *opcode = rp->opcodes;
407
408 for (i = 0; i < num_commands; i++, opcode++)
409 put_unaligned_le16(mgmt_commands[i], opcode);
410
411 for (i = 0; i < num_events; i++, opcode++)
412 put_unaligned_le16(mgmt_events[i], opcode);
413 } else {
414 __le16 *opcode = rp->opcodes;
415
416 for (i = 0; i < num_commands; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
418
419 for (i = 0; i < num_events; i++, opcode++)
420 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
421 }
422
423 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 rp, rp_size);
425 kfree(rp);
426
427 return err;
428 }
429
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)430 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 u16 data_len)
432 {
433 struct mgmt_rp_read_index_list *rp;
434 struct hci_dev *d;
435 size_t rp_len;
436 u16 count;
437 int err;
438
439 bt_dev_dbg(hdev, "sock %p", sk);
440
441 read_lock(&hci_dev_list_lock);
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 count++;
447 }
448
449 rp_len = sizeof(*rp) + (2 * count);
450 rp = kmalloc(rp_len, GFP_ATOMIC);
451 if (!rp) {
452 read_unlock(&hci_dev_list_lock);
453 return -ENOMEM;
454 }
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (hci_dev_test_flag(d, HCI_SETUP) ||
459 hci_dev_test_flag(d, HCI_CONFIG) ||
460 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 continue;
462
463 /* Devices marked as raw-only are neither configured
464 * nor unconfigured controllers.
465 */
466 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 continue;
468
469 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 rp->index[count++] = cpu_to_le16(d->id);
471 bt_dev_dbg(hdev, "Added hci%u", d->id);
472 }
473 }
474
475 rp->num_controllers = cpu_to_le16(count);
476 rp_len = sizeof(*rp) + (2 * count);
477
478 read_unlock(&hci_dev_list_lock);
479
480 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 0, rp, rp_len);
482
483 kfree(rp);
484
485 return err;
486 }
487
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)488 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 void *data, u16 data_len)
490 {
491 struct mgmt_rp_read_unconf_index_list *rp;
492 struct hci_dev *d;
493 size_t rp_len;
494 u16 count;
495 int err;
496
497 bt_dev_dbg(hdev, "sock %p", sk);
498
499 read_lock(&hci_dev_list_lock);
500
501 count = 0;
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 count++;
505 }
506
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
509 if (!rp) {
510 read_unlock(&hci_dev_list_lock);
511 return -ENOMEM;
512 }
513
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 continue;
520
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
523 */
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 continue;
526
527 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 rp->index[count++] = cpu_to_le16(d->id);
529 bt_dev_dbg(hdev, "Added hci%u", d->id);
530 }
531 }
532
533 rp->num_controllers = cpu_to_le16(count);
534 rp_len = sizeof(*rp) + (2 * count);
535
536 read_unlock(&hci_dev_list_lock);
537
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
540
541 kfree(rp);
542
543 return err;
544 }
545
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)546 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 void *data, u16 data_len)
548 {
549 struct mgmt_rp_read_ext_index_list *rp;
550 struct hci_dev *d;
551 u16 count;
552 int err;
553
554 bt_dev_dbg(hdev, "sock %p", sk);
555
556 read_lock(&hci_dev_list_lock);
557
558 count = 0;
559 list_for_each_entry(d, &hci_dev_list, list)
560 count++;
561
562 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 if (!rp) {
564 read_unlock(&hci_dev_list_lock);
565 return -ENOMEM;
566 }
567
568 count = 0;
569 list_for_each_entry(d, &hci_dev_list, list) {
570 if (hci_dev_test_flag(d, HCI_SETUP) ||
571 hci_dev_test_flag(d, HCI_CONFIG) ||
572 hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 continue;
574
575 /* Devices marked as raw-only are neither configured
576 * nor unconfigured controllers.
577 */
578 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 continue;
580
581 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 rp->entry[count].type = 0x01;
583 else
584 rp->entry[count].type = 0x00;
585
586 rp->entry[count].bus = d->bus;
587 rp->entry[count++].index = cpu_to_le16(d->id);
588 bt_dev_dbg(hdev, "Added hci%u", d->id);
589 }
590
591 rp->num_controllers = cpu_to_le16(count);
592
593 read_unlock(&hci_dev_list_lock);
594
595 /* If this command is called at least once, then all the
596 * default index and unconfigured index events are disabled
597 * and from now on only extended index events are used.
598 */
599 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602
603 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 struct_size(rp, entry, count));
606
607 kfree(rp);
608
609 return err;
610 }
611
is_configured(struct hci_dev * hdev)612 static bool is_configured(struct hci_dev *hdev)
613 {
614 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 return false;
617
618 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 return false;
622
623 return true;
624 }
625
get_missing_options(struct hci_dev * hdev)626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 u32 options = 0;
629
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 options |= MGMT_OPTION_EXTERNAL_CONFIG;
633
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
638
639 return cpu_to_le32(options);
640 }
641
new_options(struct hci_dev * hdev,struct sock * skip)642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 __le32 options = get_missing_options(hdev);
645
646 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 __le32 options = get_missing_options(hdev);
653
654 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 sizeof(options));
656 }
657
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
660 {
661 struct mgmt_rp_read_config_info rp;
662 u32 options = 0;
663
664 bt_dev_dbg(hdev, "sock %p", sk);
665
666 hci_dev_lock(hdev);
667
668 memset(&rp, 0, sizeof(rp));
669 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670
671 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 options |= MGMT_OPTION_EXTERNAL_CONFIG;
673
674 if (hdev->set_bdaddr)
675 options |= MGMT_OPTION_PUBLIC_ADDRESS;
676
677 rp.supported_options = cpu_to_le32(options);
678 rp.missing_options = get_missing_options(hdev);
679
680 hci_dev_unlock(hdev);
681
682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 &rp, sizeof(rp));
684 }
685
get_supported_phys(struct hci_dev * hdev)686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 u32 supported_phys = 0;
689
690 if (lmp_bredr_capable(hdev)) {
691 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692
693 if (hdev->features[0][0] & LMP_3SLOT)
694 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695
696 if (hdev->features[0][0] & LMP_5SLOT)
697 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698
699 if (lmp_edr_2m_capable(hdev)) {
700 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701
702 if (lmp_edr_3slot_capable(hdev))
703 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704
705 if (lmp_edr_5slot_capable(hdev))
706 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707
708 if (lmp_edr_3m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 }
717 }
718 }
719
720 if (lmp_le_capable(hdev)) {
721 supported_phys |= MGMT_PHY_LE_1M_TX;
722 supported_phys |= MGMT_PHY_LE_1M_RX;
723
724 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 supported_phys |= MGMT_PHY_LE_2M_TX;
726 supported_phys |= MGMT_PHY_LE_2M_RX;
727 }
728
729 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 supported_phys |= MGMT_PHY_LE_CODED_TX;
731 supported_phys |= MGMT_PHY_LE_CODED_RX;
732 }
733 }
734
735 return supported_phys;
736 }
737
get_selected_phys(struct hci_dev * hdev)738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 u32 selected_phys = 0;
741
742 if (lmp_bredr_capable(hdev)) {
743 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744
745 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747
748 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750
751 if (lmp_edr_2m_capable(hdev)) {
752 if (!(hdev->pkt_type & HCI_2DH1))
753 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754
755 if (lmp_edr_3slot_capable(hdev) &&
756 !(hdev->pkt_type & HCI_2DH3))
757 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758
759 if (lmp_edr_5slot_capable(hdev) &&
760 !(hdev->pkt_type & HCI_2DH5))
761 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762
763 if (lmp_edr_3m_capable(hdev)) {
764 if (!(hdev->pkt_type & HCI_3DH1))
765 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766
767 if (lmp_edr_3slot_capable(hdev) &&
768 !(hdev->pkt_type & HCI_3DH3))
769 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770
771 if (lmp_edr_5slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_3DH5))
773 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 }
775 }
776 }
777
778 if (lmp_le_capable(hdev)) {
779 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 selected_phys |= MGMT_PHY_LE_1M_TX;
781
782 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 selected_phys |= MGMT_PHY_LE_1M_RX;
784
785 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 selected_phys |= MGMT_PHY_LE_2M_TX;
787
788 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 selected_phys |= MGMT_PHY_LE_2M_RX;
790
791 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 selected_phys |= MGMT_PHY_LE_CODED_TX;
793
794 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 selected_phys |= MGMT_PHY_LE_CODED_RX;
796 }
797
798 return selected_phys;
799 }
800
get_configurable_phys(struct hci_dev * hdev)801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806
get_supported_settings(struct hci_dev * hdev)807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 u32 settings = 0;
810
811 settings |= MGMT_SETTING_POWERED;
812 settings |= MGMT_SETTING_BONDABLE;
813 settings |= MGMT_SETTING_DEBUG_KEYS;
814 settings |= MGMT_SETTING_CONNECTABLE;
815 settings |= MGMT_SETTING_DISCOVERABLE;
816
817 if (lmp_bredr_capable(hdev)) {
818 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 settings |= MGMT_SETTING_BREDR;
821 settings |= MGMT_SETTING_LINK_SECURITY;
822
823 if (lmp_ssp_capable(hdev)) {
824 settings |= MGMT_SETTING_SSP;
825 }
826
827 if (lmp_sc_capable(hdev))
828 settings |= MGMT_SETTING_SECURE_CONN;
829
830 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 &hdev->quirks))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 hdev->set_bdaddr)
845 settings |= MGMT_SETTING_CONFIGURATION;
846
847 if (cis_central_capable(hdev))
848 settings |= MGMT_SETTING_CIS_CENTRAL;
849
850 if (cis_peripheral_capable(hdev))
851 settings |= MGMT_SETTING_CIS_PERIPHERAL;
852
853 settings |= MGMT_SETTING_PHY_CONFIGURATION;
854
855 return settings;
856 }
857
get_current_settings(struct hci_dev * hdev)858 static u32 get_current_settings(struct hci_dev *hdev)
859 {
860 u32 settings = 0;
861
862 if (hdev_is_powered(hdev))
863 settings |= MGMT_SETTING_POWERED;
864
865 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 settings |= MGMT_SETTING_CONNECTABLE;
867
868 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 settings |= MGMT_SETTING_FAST_CONNECTABLE;
870
871 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 settings |= MGMT_SETTING_DISCOVERABLE;
873
874 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 settings |= MGMT_SETTING_BONDABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 settings |= MGMT_SETTING_BREDR;
879
880 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 settings |= MGMT_SETTING_LE;
882
883 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 settings |= MGMT_SETTING_LINK_SECURITY;
885
886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 settings |= MGMT_SETTING_SSP;
888
889 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 settings |= MGMT_SETTING_ADVERTISING;
891
892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 settings |= MGMT_SETTING_SECURE_CONN;
894
895 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 settings |= MGMT_SETTING_DEBUG_KEYS;
897
898 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 settings |= MGMT_SETTING_PRIVACY;
900
901 /* The current setting for static address has two purposes. The
902 * first is to indicate if the static address will be used and
903 * the second is to indicate if it is actually set.
904 *
905 * This means if the static address is not configured, this flag
906 * will never be set. If the address is configured, then if the
907 * address is actually used decides if the flag is set or not.
908 *
909 * For single mode LE only controllers and dual-mode controllers
910 * with BR/EDR disabled, the existence of the static address will
911 * be evaluated.
912 */
913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 settings |= MGMT_SETTING_STATIC_ADDRESS;
918 }
919
920 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922
923 if (cis_central_capable(hdev))
924 settings |= MGMT_SETTING_CIS_CENTRAL;
925
926 if (cis_peripheral_capable(hdev))
927 settings |= MGMT_SETTING_CIS_PERIPHERAL;
928
929 if (bis_capable(hdev))
930 settings |= MGMT_SETTING_ISO_BROADCASTER;
931
932 if (sync_recv_capable(hdev))
933 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
934
935 return settings;
936 }
937
pending_find(u16 opcode,struct hci_dev * hdev)938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939 {
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941 }
942
mgmt_get_adv_discov_flags(struct hci_dev * hdev)943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944 {
945 struct mgmt_pending_cmd *cmd;
946
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
949 */
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 if (cmd) {
952 struct mgmt_mode *cp = cmd->param;
953 if (cp->val == 0x01)
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
957 } else {
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
962 }
963
964 return 0;
965 }
966
mgmt_get_connectable(struct hci_dev * hdev)967 bool mgmt_get_connectable(struct hci_dev *hdev)
968 {
969 struct mgmt_pending_cmd *cmd;
970
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
973 */
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 if (cmd) {
976 struct mgmt_mode *cp = cmd->param;
977
978 return cp->val;
979 }
980
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982 }
983
service_cache_sync(struct hci_dev * hdev,void * data)984 static int service_cache_sync(struct hci_dev *hdev, void *data)
985 {
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
988
989 return 0;
990 }
991
service_cache_off(struct work_struct * work)992 static void service_cache_off(struct work_struct *work)
993 {
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
995 service_cache.work);
996
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 return;
999
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001 }
1002
rpa_expired_sync(struct hci_dev * hdev,void * data)1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004 {
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1007 * function.
1008 */
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 else
1012 return hci_enable_advertising_sync(hdev);
1013 }
1014
rpa_expired(struct work_struct * work)1015 static void rpa_expired(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 rpa_expired.work);
1019
1020 bt_dev_dbg(hdev, "");
1021
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 return;
1026
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028 }
1029
1030 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1031
discov_off(struct work_struct * work)1032 static void discov_off(struct work_struct *work)
1033 {
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 discov_off.work);
1036
1037 bt_dev_dbg(hdev, "");
1038
1039 hci_dev_lock(hdev);
1040
1041 /* When discoverable timeout triggers, then just make sure
1042 * the limited discoverable flag is cleared. Even in the case
1043 * of a timeout triggered from general discoverable, it is
1044 * safe to unconditionally clear the flag.
1045 */
1046 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 hdev->discov_timeout = 0;
1049
1050 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1051
1052 mgmt_new_settings(hdev);
1053
1054 hci_dev_unlock(hdev);
1055 }
1056
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1058
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1059 static void mesh_send_complete(struct hci_dev *hdev,
1060 struct mgmt_mesh_tx *mesh_tx, bool silent)
1061 {
1062 u8 handle = mesh_tx->handle;
1063
1064 if (!silent)
1065 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 sizeof(handle), NULL);
1067
1068 mgmt_mesh_remove(mesh_tx);
1069 }
1070
mesh_send_done_sync(struct hci_dev * hdev,void * data)1071 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1072 {
1073 struct mgmt_mesh_tx *mesh_tx;
1074
1075 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 if (list_empty(&hdev->adv_instances))
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079
1080 if (mesh_tx)
1081 mesh_send_complete(hdev, mesh_tx, false);
1082
1083 return 0;
1084 }
1085
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (!mesh_tx)
1093 return;
1094
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1097
1098 if (err < 0)
1099 mesh_send_complete(hdev, mesh_tx, false);
1100 else
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103
mesh_send_done(struct work_struct * work)1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1108
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 return;
1111
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 return;
1119
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1130 * it
1131 */
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1139 {
1140 struct mgmt_rp_read_info rp;
1141
1142 bt_dev_dbg(hdev, "sock %p", sk);
1143
1144 hci_dev_lock(hdev);
1145
1146 memset(&rp, 0, sizeof(rp));
1147
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1157
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160
1161 hci_dev_unlock(hdev);
1162
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 sizeof(rp));
1165 }
1166
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 u16 eir_len = 0;
1170 size_t name_len;
1171
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1175
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 hdev->appearance);
1179
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1183
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1187
1188 return eir_len;
1189 }
1190
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1193 {
1194 char buf[512];
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 u16 eir_len;
1197
1198 bt_dev_dbg(hdev, "sock %p", sk);
1199
1200 memset(&buf, 0, sizeof(buf));
1201
1202 hci_dev_lock(hdev);
1203
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211
1212
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1215
1216 hci_dev_unlock(hdev);
1217
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1221 * is used.
1222 */
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1229 }
1230
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 char buf[512];
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 u16 eir_len;
1236
1237 memset(buf, 0, sizeof(buf));
1238
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1241
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 struct mgmt_ev_advertising_added ev;
1258
1259 ev.instance = instance;
1260
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 u8 instance)
1266 {
1267 struct mgmt_ev_advertising_removed ev;
1268
1269 ev.instance = instance;
1270
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273
cancel_adv_timeout(struct hci_dev * hdev)1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1279 }
1280 }
1281
1282 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 struct hci_conn_params *p;
1286
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1290 */
1291 hci_pend_le_list_del_init(p);
1292
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 break;
1298 case HCI_AUTO_CONN_REPORT:
1299 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 break;
1301 default:
1302 break;
1303 }
1304 }
1305 }
1306
new_settings(struct hci_dev * hdev,struct sock * skip)1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1319
1320 /* Make sure cmd still outstanding. */
1321 if (err == -ECANCELED ||
1322 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 return;
1324
1325 cp = cmd->param;
1326
1327 bt_dev_dbg(hdev, "err %d", err);
1328
1329 if (!err) {
1330 if (cp->val) {
1331 hci_dev_lock(hdev);
1332 restart_le_actions(hdev);
1333 hci_update_passive_scan(hdev);
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1341 */
1342 if (cp->val)
1343 new_settings(hdev, cmd->sk);
1344 } else {
1345 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 mgmt_status(err));
1347 }
1348
1349 mgmt_pending_remove(cmd);
1350 }
1351
set_powered_sync(struct hci_dev * hdev,void * data)1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 struct mgmt_pending_cmd *cmd = data;
1355 struct mgmt_mode *cp;
1356
1357 /* Make sure cmd still outstanding. */
1358 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 return -ECANCELED;
1360
1361 cp = cmd->param;
1362
1363 BT_DBG("%s", hdev->name);
1364
1365 return hci_set_powered_sync(hdev, cp->val);
1366 }
1367
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 u16 len)
1370 {
1371 struct mgmt_mode *cp = data;
1372 struct mgmt_pending_cmd *cmd;
1373 int err;
1374
1375 bt_dev_dbg(hdev, "sock %p", sk);
1376
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1380
1381 hci_dev_lock(hdev);
1382
1383 if (!cp->val) {
1384 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1385 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY);
1387 goto failed;
1388 }
1389 }
1390
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1413 } else {
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1417 }
1418
1419 if (err < 0)
1420 mgmt_pending_remove(cmd);
1421
1422 failed:
1423 hci_dev_unlock(hdev);
1424 return err;
1425 }
1426
mgmt_new_settings(struct hci_dev * hdev)1427 int mgmt_new_settings(struct hci_dev *hdev)
1428 {
1429 return new_settings(hdev, NULL);
1430 }
1431
1432 struct cmd_lookup {
1433 struct sock *sk;
1434 struct hci_dev *hdev;
1435 u8 mgmt_status;
1436 };
1437
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439 {
1440 struct cmd_lookup *match = data;
1441
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1443
1444 list_del(&cmd->list);
1445
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1449 }
1450
1451 mgmt_pending_free(cmd);
1452 }
1453
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 u8 *status = data;
1457
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 struct cmd_lookup *match = data;
1465
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1467 * removed/freed.
1468 */
1469 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470
1471 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, match->mgmt_status);
1473 mgmt_pending_remove(cmd);
1474
1475 return;
1476 }
1477
1478 cmd_status_rsp(cmd, data);
1479 }
1480
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 {
1483 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484 cmd->param, cmd->param_len);
1485 }
1486
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, sizeof(struct mgmt_addr_info));
1491 }
1492
mgmt_bredr_support(struct hci_dev * hdev)1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 {
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1499 else
1500 return MGMT_STATUS_SUCCESS;
1501 }
1502
mgmt_le_support(struct hci_dev * hdev)1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 {
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1509 else
1510 return MGMT_STATUS_SUCCESS;
1511 }
1512
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 int err)
1515 {
1516 struct mgmt_pending_cmd *cmd = data;
1517
1518 bt_dev_dbg(hdev, "err %d", err);
1519
1520 /* Make sure cmd still outstanding. */
1521 if (err == -ECANCELED ||
1522 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 if (err) {
1528 u8 mgmt_err = mgmt_status(err);
1529 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1530 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 goto done;
1532 }
1533
1534 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1535 hdev->discov_timeout > 0) {
1536 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1537 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 }
1539
1540 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1541 new_settings(hdev, cmd->sk);
1542
1543 done:
1544 mgmt_pending_remove(cmd);
1545 hci_dev_unlock(hdev);
1546 }
1547
set_discoverable_sync(struct hci_dev * hdev,void * data)1548 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 {
1550 BT_DBG("%s", hdev->name);
1551
1552 return hci_update_discoverable_sync(hdev);
1553 }
1554
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1555 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 u16 len)
1557 {
1558 struct mgmt_cp_set_discoverable *cp = data;
1559 struct mgmt_pending_cmd *cmd;
1560 u16 timeout;
1561 int err;
1562
1563 bt_dev_dbg(hdev, "sock %p", sk);
1564
1565 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1566 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_REJECTED);
1569
1570 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_INVALID_PARAMS);
1573
1574 timeout = __le16_to_cpu(cp->timeout);
1575
1576 /* Disabling discoverable requires that no timeout is set,
1577 * and enabling limited discoverable requires a timeout.
1578 */
1579 if ((cp->val == 0x00 && timeout > 0) ||
1580 (cp->val == 0x02 && timeout == 0))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_INVALID_PARAMS);
1583
1584 hci_dev_lock(hdev);
1585
1586 if (!hdev_is_powered(hdev) && timeout > 0) {
1587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_NOT_POWERED);
1589 goto failed;
1590 }
1591
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_BUSY);
1596 goto failed;
1597 }
1598
1599 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_REJECTED);
1602 goto failed;
1603 }
1604
1605 if (hdev->advertising_paused) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_BUSY);
1608 goto failed;
1609 }
1610
1611 if (!hdev_is_powered(hdev)) {
1612 bool changed = false;
1613
1614 /* Setting limited discoverable when powered off is
1615 * not a valid operation since it requires a timeout
1616 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 */
1618 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1619 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1620 changed = true;
1621 }
1622
1623 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 if (err < 0)
1625 goto failed;
1626
1627 if (changed)
1628 err = new_settings(hdev, sk);
1629
1630 goto failed;
1631 }
1632
1633 /* If the current mode is the same, then just update the timeout
1634 * value with the new value. And if only the timeout gets updated,
1635 * then no need for any HCI transactions.
1636 */
1637 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1638 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1639 HCI_LIMITED_DISCOVERABLE)) {
1640 cancel_delayed_work(&hdev->discov_off);
1641 hdev->discov_timeout = timeout;
1642
1643 if (cp->val && hdev->discov_timeout > 0) {
1644 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1645 queue_delayed_work(hdev->req_workqueue,
1646 &hdev->discov_off, to);
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 goto failed;
1651 }
1652
1653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1654 if (!cmd) {
1655 err = -ENOMEM;
1656 goto failed;
1657 }
1658
1659 /* Cancel any potential discoverable timeout that might be
1660 * still active and store new timeout value. The arming of
1661 * the timeout happens in the complete handler.
1662 */
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val)
1667 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670
1671 /* Limited discoverable mode */
1672 if (cp->val == 0x02)
1673 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676
1677 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1678 mgmt_set_discoverable_complete);
1679
1680 if (err < 0)
1681 mgmt_pending_remove(cmd);
1682
1683 failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686 }
1687
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1688 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 int err)
1690 {
1691 struct mgmt_pending_cmd *cmd = data;
1692
1693 bt_dev_dbg(hdev, "err %d", err);
1694
1695 /* Make sure cmd still outstanding. */
1696 if (err == -ECANCELED ||
1697 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 if (err) {
1703 u8 mgmt_err = mgmt_status(err);
1704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 goto done;
1706 }
1707
1708 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 new_settings(hdev, cmd->sk);
1710
1711 done:
1712 mgmt_pending_remove(cmd);
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1717 static int set_connectable_update_settings(struct hci_dev *hdev,
1718 struct sock *sk, u8 val)
1719 {
1720 bool changed = false;
1721 int err;
1722
1723 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1724 changed = true;
1725
1726 if (val) {
1727 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1728 } else {
1729 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1730 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 }
1732
1733 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1734 if (err < 0)
1735 return err;
1736
1737 if (changed) {
1738 hci_update_scan(hdev);
1739 hci_update_passive_scan(hdev);
1740 return new_settings(hdev, sk);
1741 }
1742
1743 return 0;
1744 }
1745
set_connectable_sync(struct hci_dev * hdev,void * data)1746 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1747 {
1748 BT_DBG("%s", hdev->name);
1749
1750 return hci_update_connectable_sync(hdev);
1751 }
1752
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1753 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 u16 len)
1755 {
1756 struct mgmt_mode *cp = data;
1757 struct mgmt_pending_cmd *cmd;
1758 int err;
1759
1760 bt_dev_dbg(hdev, "sock %p", sk);
1761
1762 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 MGMT_STATUS_REJECTED);
1766
1767 if (cp->val != 0x00 && cp->val != 0x01)
1768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 MGMT_STATUS_INVALID_PARAMS);
1770
1771 hci_dev_lock(hdev);
1772
1773 if (!hdev_is_powered(hdev)) {
1774 err = set_connectable_update_settings(hdev, sk, cp->val);
1775 goto failed;
1776 }
1777
1778 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1779 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1781 MGMT_STATUS_BUSY);
1782 goto failed;
1783 }
1784
1785 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 if (!cmd) {
1787 err = -ENOMEM;
1788 goto failed;
1789 }
1790
1791 if (cp->val) {
1792 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1793 } else {
1794 if (hdev->discov_timeout > 0)
1795 cancel_delayed_work(&hdev->discov_off);
1796
1797 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1798 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1799 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 }
1801
1802 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1803 mgmt_set_connectable_complete);
1804
1805 if (err < 0)
1806 mgmt_pending_remove(cmd);
1807
1808 failed:
1809 hci_dev_unlock(hdev);
1810 return err;
1811 }
1812
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1813 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 u16 len)
1815 {
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 int err;
1819
1820 bt_dev_dbg(hdev, "sock %p", sk);
1821
1822 if (cp->val != 0x00 && cp->val != 0x01)
1823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1824 MGMT_STATUS_INVALID_PARAMS);
1825
1826 hci_dev_lock(hdev);
1827
1828 if (cp->val)
1829 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1830 else
1831 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1832
1833 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1834 if (err < 0)
1835 goto unlock;
1836
1837 if (changed) {
1838 /* In limited privacy mode the change of bondable mode
1839 * may affect the local advertising address.
1840 */
1841 hci_update_discoverable(hdev);
1842
1843 err = new_settings(hdev, sk);
1844 }
1845
1846 unlock:
1847 hci_dev_unlock(hdev);
1848 return err;
1849 }
1850
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1851 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 u16 len)
1853 {
1854 struct mgmt_mode *cp = data;
1855 struct mgmt_pending_cmd *cmd;
1856 u8 val, status;
1857 int err;
1858
1859 bt_dev_dbg(hdev, "sock %p", sk);
1860
1861 status = mgmt_bredr_support(hdev);
1862 if (status)
1863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 status);
1865
1866 if (cp->val != 0x00 && cp->val != 0x01)
1867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1868 MGMT_STATUS_INVALID_PARAMS);
1869
1870 hci_dev_lock(hdev);
1871
1872 if (!hdev_is_powered(hdev)) {
1873 bool changed = false;
1874
1875 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1876 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1877 changed = true;
1878 }
1879
1880 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1881 if (err < 0)
1882 goto failed;
1883
1884 if (changed)
1885 err = new_settings(hdev, sk);
1886
1887 goto failed;
1888 }
1889
1890 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1891 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 MGMT_STATUS_BUSY);
1893 goto failed;
1894 }
1895
1896 val = !!cp->val;
1897
1898 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1899 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 goto failed;
1901 }
1902
1903 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1904 if (!cmd) {
1905 err = -ENOMEM;
1906 goto failed;
1907 }
1908
1909 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1910 if (err < 0) {
1911 mgmt_pending_remove(cmd);
1912 goto failed;
1913 }
1914
1915 failed:
1916 hci_dev_unlock(hdev);
1917 return err;
1918 }
1919
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1920 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1921 {
1922 struct cmd_lookup match = { NULL, hdev };
1923 struct mgmt_pending_cmd *cmd = data;
1924 struct mgmt_mode *cp = cmd->param;
1925 u8 enable = cp->val;
1926 bool changed;
1927
1928 /* Make sure cmd still outstanding. */
1929 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1930 return;
1931
1932 if (err) {
1933 u8 mgmt_err = mgmt_status(err);
1934
1935 if (enable && hci_dev_test_and_clear_flag(hdev,
1936 HCI_SSP_ENABLED)) {
1937 new_settings(hdev, NULL);
1938 }
1939
1940 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 &mgmt_err);
1942 return;
1943 }
1944
1945 if (enable) {
1946 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 } else {
1948 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 }
1950
1951 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1952
1953 if (changed)
1954 new_settings(hdev, match.sk);
1955
1956 if (match.sk)
1957 sock_put(match.sk);
1958
1959 hci_update_eir_sync(hdev);
1960 }
1961
set_ssp_sync(struct hci_dev * hdev,void * data)1962 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1963 {
1964 struct mgmt_pending_cmd *cmd = data;
1965 struct mgmt_mode *cp = cmd->param;
1966 bool changed = false;
1967 int err;
1968
1969 if (cp->val)
1970 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1971
1972 err = hci_write_ssp_mode_sync(hdev, cp->val);
1973
1974 if (!err && changed)
1975 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1976
1977 return err;
1978 }
1979
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1980 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 struct mgmt_mode *cp = data;
1983 struct mgmt_pending_cmd *cmd;
1984 u8 status;
1985 int err;
1986
1987 bt_dev_dbg(hdev, "sock %p", sk);
1988
1989 status = mgmt_bredr_support(hdev);
1990 if (status)
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1992
1993 if (!lmp_ssp_capable(hdev))
1994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1995 MGMT_STATUS_NOT_SUPPORTED);
1996
1997 if (cp->val != 0x00 && cp->val != 0x01)
1998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1999 MGMT_STATUS_INVALID_PARAMS);
2000
2001 hci_dev_lock(hdev);
2002
2003 if (!hdev_is_powered(hdev)) {
2004 bool changed;
2005
2006 if (cp->val) {
2007 changed = !hci_dev_test_and_set_flag(hdev,
2008 HCI_SSP_ENABLED);
2009 } else {
2010 changed = hci_dev_test_and_clear_flag(hdev,
2011 HCI_SSP_ENABLED);
2012 }
2013
2014 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 if (err < 0)
2016 goto failed;
2017
2018 if (changed)
2019 err = new_settings(hdev, sk);
2020
2021 goto failed;
2022 }
2023
2024 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 goto failed;
2033 }
2034
2035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 if (!cmd)
2037 err = -ENOMEM;
2038 else
2039 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 set_ssp_complete);
2041
2042 if (err < 0) {
2043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 MGMT_STATUS_FAILED);
2045
2046 if (cmd)
2047 mgmt_pending_remove(cmd);
2048 }
2049
2050 failed:
2051 hci_dev_unlock(hdev);
2052 return err;
2053 }
2054
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 bt_dev_dbg(hdev, "sock %p", sk);
2058
2059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2060 MGMT_STATUS_NOT_SUPPORTED);
2061 }
2062
set_le_complete(struct hci_dev * hdev,void * data,int err)2063 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2064 {
2065 struct cmd_lookup match = { NULL, hdev };
2066 u8 status = mgmt_status(err);
2067
2068 bt_dev_dbg(hdev, "err %d", err);
2069
2070 if (status) {
2071 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2072 &status);
2073 return;
2074 }
2075
2076 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2077
2078 new_settings(hdev, match.sk);
2079
2080 if (match.sk)
2081 sock_put(match.sk);
2082 }
2083
set_le_sync(struct hci_dev * hdev,void * data)2084 static int set_le_sync(struct hci_dev *hdev, void *data)
2085 {
2086 struct mgmt_pending_cmd *cmd = data;
2087 struct mgmt_mode *cp = cmd->param;
2088 u8 val = !!cp->val;
2089 int err;
2090
2091 if (!val) {
2092 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2093
2094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2095 hci_disable_advertising_sync(hdev);
2096
2097 if (ext_adv_capable(hdev))
2098 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2099 } else {
2100 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 }
2102
2103 err = hci_write_le_host_supported_sync(hdev, val, 0);
2104
2105 /* Make sure the controller has a good default for
2106 * advertising data. Restrict the update to when LE
2107 * has actually been enabled. During power on, the
2108 * update in powered_update_hci will take care of it.
2109 */
2110 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 if (ext_adv_capable(hdev)) {
2112 int status;
2113
2114 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2115 if (!status)
2116 hci_update_scan_rsp_data_sync(hdev, 0x00);
2117 } else {
2118 hci_update_adv_data_sync(hdev, 0x00);
2119 hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 }
2121
2122 hci_update_passive_scan(hdev);
2123 }
2124
2125 return err;
2126 }
2127
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2128 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 struct mgmt_pending_cmd *cmd = data;
2131 u8 status = mgmt_status(err);
2132 struct sock *sk = cmd->sk;
2133
2134 if (status) {
2135 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2136 cmd_status_rsp, &status);
2137 return;
2138 }
2139
2140 mgmt_pending_remove(cmd);
2141 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 }
2143
set_mesh_sync(struct hci_dev * hdev,void * data)2144 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2145 {
2146 struct mgmt_pending_cmd *cmd = data;
2147 struct mgmt_cp_set_mesh *cp = cmd->param;
2148 size_t len = cmd->param_len;
2149
2150 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151
2152 if (cp->enable)
2153 hci_dev_set_flag(hdev, HCI_MESH);
2154 else
2155 hci_dev_clear_flag(hdev, HCI_MESH);
2156
2157 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2158 hdev->le_scan_window = __le16_to_cpu(cp->window);
2159
2160 len -= sizeof(*cp);
2161
2162 /* If filters don't fit, forward all adv pkts */
2163 if (len <= sizeof(hdev->mesh_ad_types))
2164 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2165
2166 hci_update_passive_scan_sync(hdev);
2167 return 0;
2168 }
2169
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2170 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2171 {
2172 struct mgmt_cp_set_mesh *cp = data;
2173 struct mgmt_pending_cmd *cmd;
2174 __u16 period, window;
2175 int err = 0;
2176
2177 bt_dev_dbg(hdev, "sock %p", sk);
2178
2179 if (!lmp_le_capable(hdev) ||
2180 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2182 MGMT_STATUS_NOT_SUPPORTED);
2183
2184 if (cp->enable != 0x00 && cp->enable != 0x01)
2185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 MGMT_STATUS_INVALID_PARAMS);
2187
2188 /* Keep allowed ranges in sync with set_scan_params() */
2189 period = __le16_to_cpu(cp->period);
2190
2191 if (period < 0x0004 || period > 0x4000)
2192 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2193 MGMT_STATUS_INVALID_PARAMS);
2194
2195 window = __le16_to_cpu(cp->window);
2196
2197 if (window < 0x0004 || window > 0x4000)
2198 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2199 MGMT_STATUS_INVALID_PARAMS);
2200
2201 if (window > period)
2202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2203 MGMT_STATUS_INVALID_PARAMS);
2204
2205 hci_dev_lock(hdev);
2206
2207 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2208 if (!cmd)
2209 err = -ENOMEM;
2210 else
2211 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2212 set_mesh_complete);
2213
2214 if (err < 0) {
2215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2216 MGMT_STATUS_FAILED);
2217
2218 if (cmd)
2219 mgmt_pending_remove(cmd);
2220 }
2221
2222 hci_dev_unlock(hdev);
2223 return err;
2224 }
2225
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2226 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2227 {
2228 struct mgmt_mesh_tx *mesh_tx = data;
2229 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2230 unsigned long mesh_send_interval;
2231 u8 mgmt_err = mgmt_status(err);
2232
2233 /* Report any errors here, but don't report completion */
2234
2235 if (mgmt_err) {
2236 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2237 /* Send Complete Error Code for handle */
2238 mesh_send_complete(hdev, mesh_tx, false);
2239 return;
2240 }
2241
2242 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2243 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2244 mesh_send_interval);
2245 }
2246
mesh_send_sync(struct hci_dev * hdev,void * data)2247 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2248 {
2249 struct mgmt_mesh_tx *mesh_tx = data;
2250 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2251 struct adv_info *adv, *next_instance;
2252 u8 instance = hdev->le_num_of_adv_sets + 1;
2253 u16 timeout, duration;
2254 int err = 0;
2255
2256 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2257 return MGMT_STATUS_BUSY;
2258
2259 timeout = 1000;
2260 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2261 adv = hci_add_adv_instance(hdev, instance, 0,
2262 send->adv_data_len, send->adv_data,
2263 0, NULL,
2264 timeout, duration,
2265 HCI_ADV_TX_POWER_NO_PREFERENCE,
2266 hdev->le_adv_min_interval,
2267 hdev->le_adv_max_interval,
2268 mesh_tx->handle);
2269
2270 if (!IS_ERR(adv))
2271 mesh_tx->instance = instance;
2272 else
2273 err = PTR_ERR(adv);
2274
2275 if (hdev->cur_adv_instance == instance) {
2276 /* If the currently advertised instance is being changed then
2277 * cancel the current advertising and schedule the next
2278 * instance. If there is only one instance then the overridden
2279 * advertising data will be visible right away.
2280 */
2281 cancel_adv_timeout(hdev);
2282
2283 next_instance = hci_get_next_instance(hdev, instance);
2284 if (next_instance)
2285 instance = next_instance->instance;
2286 else
2287 instance = 0;
2288 } else if (hdev->adv_instance_timeout) {
2289 /* Immediately advertise the new instance if no other, or
2290 * let it go naturally from queue if ADV is already happening
2291 */
2292 instance = 0;
2293 }
2294
2295 if (instance)
2296 return hci_schedule_adv_instance_sync(hdev, instance, true);
2297
2298 return err;
2299 }
2300
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2301 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2302 {
2303 struct mgmt_rp_mesh_read_features *rp = data;
2304
2305 if (rp->used_handles >= rp->max_handles)
2306 return;
2307
2308 rp->handles[rp->used_handles++] = mesh_tx->handle;
2309 }
2310
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2311 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2312 void *data, u16 len)
2313 {
2314 struct mgmt_rp_mesh_read_features rp;
2315
2316 if (!lmp_le_capable(hdev) ||
2317 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2319 MGMT_STATUS_NOT_SUPPORTED);
2320
2321 memset(&rp, 0, sizeof(rp));
2322 rp.index = cpu_to_le16(hdev->id);
2323 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2324 rp.max_handles = MESH_HANDLES_MAX;
2325
2326 hci_dev_lock(hdev);
2327
2328 if (rp.max_handles)
2329 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2330
2331 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2332 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2333
2334 hci_dev_unlock(hdev);
2335 return 0;
2336 }
2337
send_cancel(struct hci_dev * hdev,void * data)2338 static int send_cancel(struct hci_dev *hdev, void *data)
2339 {
2340 struct mgmt_pending_cmd *cmd = data;
2341 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2342 struct mgmt_mesh_tx *mesh_tx;
2343
2344 if (!cancel->handle) {
2345 do {
2346 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2347
2348 if (mesh_tx)
2349 mesh_send_complete(hdev, mesh_tx, false);
2350 } while (mesh_tx);
2351 } else {
2352 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2353
2354 if (mesh_tx && mesh_tx->sk == cmd->sk)
2355 mesh_send_complete(hdev, mesh_tx, false);
2356 }
2357
2358 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 0, NULL, 0);
2360 mgmt_pending_free(cmd);
2361
2362 return 0;
2363 }
2364
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2365 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2366 void *data, u16 len)
2367 {
2368 struct mgmt_pending_cmd *cmd;
2369 int err;
2370
2371 if (!lmp_le_capable(hdev) ||
2372 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 MGMT_STATUS_NOT_SUPPORTED);
2375
2376 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2378 MGMT_STATUS_REJECTED);
2379
2380 hci_dev_lock(hdev);
2381 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2382 if (!cmd)
2383 err = -ENOMEM;
2384 else
2385 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2386
2387 if (err < 0) {
2388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2389 MGMT_STATUS_FAILED);
2390
2391 if (cmd)
2392 mgmt_pending_free(cmd);
2393 }
2394
2395 hci_dev_unlock(hdev);
2396 return err;
2397 }
2398
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2399 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2400 {
2401 struct mgmt_mesh_tx *mesh_tx;
2402 struct mgmt_cp_mesh_send *send = data;
2403 struct mgmt_rp_mesh_read_features rp;
2404 bool sending;
2405 int err = 0;
2406
2407 if (!lmp_le_capable(hdev) ||
2408 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2410 MGMT_STATUS_NOT_SUPPORTED);
2411 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2412 len <= MGMT_MESH_SEND_SIZE ||
2413 len > (MGMT_MESH_SEND_SIZE + 31))
2414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2415 MGMT_STATUS_REJECTED);
2416
2417 hci_dev_lock(hdev);
2418
2419 memset(&rp, 0, sizeof(rp));
2420 rp.max_handles = MESH_HANDLES_MAX;
2421
2422 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2423
2424 if (rp.max_handles <= rp.used_handles) {
2425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2426 MGMT_STATUS_BUSY);
2427 goto done;
2428 }
2429
2430 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2431 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2432
2433 if (!mesh_tx)
2434 err = -ENOMEM;
2435 else if (!sending)
2436 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2437 mesh_send_start_complete);
2438
2439 if (err < 0) {
2440 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442 MGMT_STATUS_FAILED);
2443
2444 if (mesh_tx) {
2445 if (sending)
2446 mgmt_mesh_remove(mesh_tx);
2447 }
2448 } else {
2449 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2450
2451 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2452 &mesh_tx->handle, 1);
2453 }
2454
2455 done:
2456 hci_dev_unlock(hdev);
2457 return err;
2458 }
2459
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2460 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2461 {
2462 struct mgmt_mode *cp = data;
2463 struct mgmt_pending_cmd *cmd;
2464 int err;
2465 u8 val, enabled;
2466
2467 bt_dev_dbg(hdev, "sock %p", sk);
2468
2469 if (!lmp_le_capable(hdev))
2470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2471 MGMT_STATUS_NOT_SUPPORTED);
2472
2473 if (cp->val != 0x00 && cp->val != 0x01)
2474 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2475 MGMT_STATUS_INVALID_PARAMS);
2476
2477 /* Bluetooth single mode LE only controllers or dual-mode
2478 * controllers configured as LE only devices, do not allow
2479 * switching LE off. These have either LE enabled explicitly
2480 * or BR/EDR has been previously switched off.
2481 *
2482 * When trying to enable an already enabled LE, then gracefully
2483 * send a positive response. Trying to disable it however will
2484 * result into rejection.
2485 */
2486 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2487 if (cp->val == 0x01)
2488 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2489
2490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2491 MGMT_STATUS_REJECTED);
2492 }
2493
2494 hci_dev_lock(hdev);
2495
2496 val = !!cp->val;
2497 enabled = lmp_host_le_capable(hdev);
2498
2499 if (!hdev_is_powered(hdev) || val == enabled) {
2500 bool changed = false;
2501
2502 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2503 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2504 changed = true;
2505 }
2506
2507 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2508 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2509 changed = true;
2510 }
2511
2512 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2513 if (err < 0)
2514 goto unlock;
2515
2516 if (changed)
2517 err = new_settings(hdev, sk);
2518
2519 goto unlock;
2520 }
2521
2522 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2523 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2524 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2525 MGMT_STATUS_BUSY);
2526 goto unlock;
2527 }
2528
2529 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2530 if (!cmd)
2531 err = -ENOMEM;
2532 else
2533 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2534 set_le_complete);
2535
2536 if (err < 0) {
2537 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2538 MGMT_STATUS_FAILED);
2539
2540 if (cmd)
2541 mgmt_pending_remove(cmd);
2542 }
2543
2544 unlock:
2545 hci_dev_unlock(hdev);
2546 return err;
2547 }
2548
2549 /* This is a helper function to test for pending mgmt commands that can
2550 * cause CoD or EIR HCI commands. We can only allow one such pending
2551 * mgmt command at a time since otherwise we cannot easily track what
2552 * the current values are, will be, and based on that calculate if a new
2553 * HCI command needs to be sent and if yes with what value.
2554 */
pending_eir_or_class(struct hci_dev * hdev)2555 static bool pending_eir_or_class(struct hci_dev *hdev)
2556 {
2557 struct mgmt_pending_cmd *cmd;
2558
2559 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2560 switch (cmd->opcode) {
2561 case MGMT_OP_ADD_UUID:
2562 case MGMT_OP_REMOVE_UUID:
2563 case MGMT_OP_SET_DEV_CLASS:
2564 case MGMT_OP_SET_POWERED:
2565 return true;
2566 }
2567 }
2568
2569 return false;
2570 }
2571
2572 static const u8 bluetooth_base_uuid[] = {
2573 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2574 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2575 };
2576
get_uuid_size(const u8 * uuid)2577 static u8 get_uuid_size(const u8 *uuid)
2578 {
2579 u32 val;
2580
2581 if (memcmp(uuid, bluetooth_base_uuid, 12))
2582 return 128;
2583
2584 val = get_unaligned_le32(&uuid[12]);
2585 if (val > 0xffff)
2586 return 32;
2587
2588 return 16;
2589 }
2590
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2591 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2592 {
2593 struct mgmt_pending_cmd *cmd = data;
2594
2595 bt_dev_dbg(hdev, "err %d", err);
2596
2597 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2598 mgmt_status(err), hdev->dev_class, 3);
2599
2600 mgmt_pending_free(cmd);
2601 }
2602
add_uuid_sync(struct hci_dev * hdev,void * data)2603 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2604 {
2605 int err;
2606
2607 err = hci_update_class_sync(hdev);
2608 if (err)
2609 return err;
2610
2611 return hci_update_eir_sync(hdev);
2612 }
2613
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2614 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2615 {
2616 struct mgmt_cp_add_uuid *cp = data;
2617 struct mgmt_pending_cmd *cmd;
2618 struct bt_uuid *uuid;
2619 int err;
2620
2621 bt_dev_dbg(hdev, "sock %p", sk);
2622
2623 hci_dev_lock(hdev);
2624
2625 if (pending_eir_or_class(hdev)) {
2626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2627 MGMT_STATUS_BUSY);
2628 goto failed;
2629 }
2630
2631 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2632 if (!uuid) {
2633 err = -ENOMEM;
2634 goto failed;
2635 }
2636
2637 memcpy(uuid->uuid, cp->uuid, 16);
2638 uuid->svc_hint = cp->svc_hint;
2639 uuid->size = get_uuid_size(cp->uuid);
2640
2641 list_add_tail(&uuid->list, &hdev->uuids);
2642
2643 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2644 if (!cmd) {
2645 err = -ENOMEM;
2646 goto failed;
2647 }
2648
2649 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2650 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2651 */
2652 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2653 mgmt_class_complete);
2654 if (err < 0) {
2655 mgmt_pending_free(cmd);
2656 goto failed;
2657 }
2658
2659 failed:
2660 hci_dev_unlock(hdev);
2661 return err;
2662 }
2663
enable_service_cache(struct hci_dev * hdev)2664 static bool enable_service_cache(struct hci_dev *hdev)
2665 {
2666 if (!hdev_is_powered(hdev))
2667 return false;
2668
2669 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2670 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2671 CACHE_TIMEOUT);
2672 return true;
2673 }
2674
2675 return false;
2676 }
2677
remove_uuid_sync(struct hci_dev * hdev,void * data)2678 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2679 {
2680 int err;
2681
2682 err = hci_update_class_sync(hdev);
2683 if (err)
2684 return err;
2685
2686 return hci_update_eir_sync(hdev);
2687 }
2688
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2689 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2690 u16 len)
2691 {
2692 struct mgmt_cp_remove_uuid *cp = data;
2693 struct mgmt_pending_cmd *cmd;
2694 struct bt_uuid *match, *tmp;
2695 static const u8 bt_uuid_any[] = {
2696 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2697 };
2698 int err, found;
2699
2700 bt_dev_dbg(hdev, "sock %p", sk);
2701
2702 hci_dev_lock(hdev);
2703
2704 if (pending_eir_or_class(hdev)) {
2705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706 MGMT_STATUS_BUSY);
2707 goto unlock;
2708 }
2709
2710 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2711 hci_uuids_clear(hdev);
2712
2713 if (enable_service_cache(hdev)) {
2714 err = mgmt_cmd_complete(sk, hdev->id,
2715 MGMT_OP_REMOVE_UUID,
2716 0, hdev->dev_class, 3);
2717 goto unlock;
2718 }
2719
2720 goto update_class;
2721 }
2722
2723 found = 0;
2724
2725 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2726 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2727 continue;
2728
2729 list_del(&match->list);
2730 kfree(match);
2731 found++;
2732 }
2733
2734 if (found == 0) {
2735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2736 MGMT_STATUS_INVALID_PARAMS);
2737 goto unlock;
2738 }
2739
2740 update_class:
2741 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2742 if (!cmd) {
2743 err = -ENOMEM;
2744 goto unlock;
2745 }
2746
2747 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2748 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2749 */
2750 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2751 mgmt_class_complete);
2752 if (err < 0)
2753 mgmt_pending_free(cmd);
2754
2755 unlock:
2756 hci_dev_unlock(hdev);
2757 return err;
2758 }
2759
set_class_sync(struct hci_dev * hdev,void * data)2760 static int set_class_sync(struct hci_dev *hdev, void *data)
2761 {
2762 int err = 0;
2763
2764 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2765 cancel_delayed_work_sync(&hdev->service_cache);
2766 err = hci_update_eir_sync(hdev);
2767 }
2768
2769 if (err)
2770 return err;
2771
2772 return hci_update_class_sync(hdev);
2773 }
2774
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2775 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2776 u16 len)
2777 {
2778 struct mgmt_cp_set_dev_class *cp = data;
2779 struct mgmt_pending_cmd *cmd;
2780 int err;
2781
2782 bt_dev_dbg(hdev, "sock %p", sk);
2783
2784 if (!lmp_bredr_capable(hdev))
2785 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2786 MGMT_STATUS_NOT_SUPPORTED);
2787
2788 hci_dev_lock(hdev);
2789
2790 if (pending_eir_or_class(hdev)) {
2791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2792 MGMT_STATUS_BUSY);
2793 goto unlock;
2794 }
2795
2796 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2798 MGMT_STATUS_INVALID_PARAMS);
2799 goto unlock;
2800 }
2801
2802 hdev->major_class = cp->major;
2803 hdev->minor_class = cp->minor;
2804
2805 if (!hdev_is_powered(hdev)) {
2806 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2807 hdev->dev_class, 3);
2808 goto unlock;
2809 }
2810
2811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2812 if (!cmd) {
2813 err = -ENOMEM;
2814 goto unlock;
2815 }
2816
2817 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2818 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2819 */
2820 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2821 mgmt_class_complete);
2822 if (err < 0)
2823 mgmt_pending_free(cmd);
2824
2825 unlock:
2826 hci_dev_unlock(hdev);
2827 return err;
2828 }
2829
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2830 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2831 u16 len)
2832 {
2833 struct mgmt_cp_load_link_keys *cp = data;
2834 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2835 sizeof(struct mgmt_link_key_info));
2836 u16 key_count, expected_len;
2837 bool changed;
2838 int i;
2839
2840 bt_dev_dbg(hdev, "sock %p", sk);
2841
2842 if (!lmp_bredr_capable(hdev))
2843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2844 MGMT_STATUS_NOT_SUPPORTED);
2845
2846 key_count = __le16_to_cpu(cp->key_count);
2847 if (key_count > max_key_count) {
2848 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2849 key_count);
2850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2851 MGMT_STATUS_INVALID_PARAMS);
2852 }
2853
2854 expected_len = struct_size(cp, keys, key_count);
2855 if (expected_len != len) {
2856 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2857 expected_len, len);
2858 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2859 MGMT_STATUS_INVALID_PARAMS);
2860 }
2861
2862 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2864 MGMT_STATUS_INVALID_PARAMS);
2865
2866 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2867 key_count);
2868
2869 hci_dev_lock(hdev);
2870
2871 hci_link_keys_clear(hdev);
2872
2873 if (cp->debug_keys)
2874 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2875 else
2876 changed = hci_dev_test_and_clear_flag(hdev,
2877 HCI_KEEP_DEBUG_KEYS);
2878
2879 if (changed)
2880 new_settings(hdev, NULL);
2881
2882 for (i = 0; i < key_count; i++) {
2883 struct mgmt_link_key_info *key = &cp->keys[i];
2884
2885 if (hci_is_blocked_key(hdev,
2886 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2887 key->val)) {
2888 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2889 &key->addr.bdaddr);
2890 continue;
2891 }
2892
2893 if (key->addr.type != BDADDR_BREDR) {
2894 bt_dev_warn(hdev,
2895 "Invalid link address type %u for %pMR",
2896 key->addr.type, &key->addr.bdaddr);
2897 continue;
2898 }
2899
2900 if (key->type > 0x08) {
2901 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2902 key->type, &key->addr.bdaddr);
2903 continue;
2904 }
2905
2906 /* Always ignore debug keys and require a new pairing if
2907 * the user wants to use them.
2908 */
2909 if (key->type == HCI_LK_DEBUG_COMBINATION)
2910 continue;
2911
2912 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2913 key->type, key->pin_len, NULL);
2914 }
2915
2916 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2917
2918 hci_dev_unlock(hdev);
2919
2920 return 0;
2921 }
2922
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2923 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2924 u8 addr_type, struct sock *skip_sk)
2925 {
2926 struct mgmt_ev_device_unpaired ev;
2927
2928 bacpy(&ev.addr.bdaddr, bdaddr);
2929 ev.addr.type = addr_type;
2930
2931 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2932 skip_sk);
2933 }
2934
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2935 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2936 {
2937 struct mgmt_pending_cmd *cmd = data;
2938 struct mgmt_cp_unpair_device *cp = cmd->param;
2939
2940 if (!err)
2941 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2942
2943 cmd->cmd_complete(cmd, err);
2944 mgmt_pending_free(cmd);
2945 }
2946
unpair_device_sync(struct hci_dev * hdev,void * data)2947 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2948 {
2949 struct mgmt_pending_cmd *cmd = data;
2950 struct mgmt_cp_unpair_device *cp = cmd->param;
2951 struct hci_conn *conn;
2952
2953 if (cp->addr.type == BDADDR_BREDR)
2954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2955 &cp->addr.bdaddr);
2956 else
2957 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2958 le_addr_type(cp->addr.type));
2959
2960 if (!conn)
2961 return 0;
2962
2963 /* Disregard any possible error since the likes of hci_abort_conn_sync
2964 * will clean up the connection no matter the error.
2965 */
2966 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2967
2968 return 0;
2969 }
2970
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2971 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2972 u16 len)
2973 {
2974 struct mgmt_cp_unpair_device *cp = data;
2975 struct mgmt_rp_unpair_device rp;
2976 struct hci_conn_params *params;
2977 struct mgmt_pending_cmd *cmd;
2978 struct hci_conn *conn;
2979 u8 addr_type;
2980 int err;
2981
2982 memset(&rp, 0, sizeof(rp));
2983 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2984 rp.addr.type = cp->addr.type;
2985
2986 if (!bdaddr_type_is_valid(cp->addr.type))
2987 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2988 MGMT_STATUS_INVALID_PARAMS,
2989 &rp, sizeof(rp));
2990
2991 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2992 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 MGMT_STATUS_INVALID_PARAMS,
2994 &rp, sizeof(rp));
2995
2996 hci_dev_lock(hdev);
2997
2998 if (!hdev_is_powered(hdev)) {
2999 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3000 MGMT_STATUS_NOT_POWERED, &rp,
3001 sizeof(rp));
3002 goto unlock;
3003 }
3004
3005 if (cp->addr.type == BDADDR_BREDR) {
3006 /* If disconnection is requested, then look up the
3007 * connection. If the remote device is connected, it
3008 * will be later used to terminate the link.
3009 *
3010 * Setting it to NULL explicitly will cause no
3011 * termination of the link.
3012 */
3013 if (cp->disconnect)
3014 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3015 &cp->addr.bdaddr);
3016 else
3017 conn = NULL;
3018
3019 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3020 if (err < 0) {
3021 err = mgmt_cmd_complete(sk, hdev->id,
3022 MGMT_OP_UNPAIR_DEVICE,
3023 MGMT_STATUS_NOT_PAIRED, &rp,
3024 sizeof(rp));
3025 goto unlock;
3026 }
3027
3028 goto done;
3029 }
3030
3031 /* LE address type */
3032 addr_type = le_addr_type(cp->addr.type);
3033
3034 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3035 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3036 if (err < 0) {
3037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3038 MGMT_STATUS_NOT_PAIRED, &rp,
3039 sizeof(rp));
3040 goto unlock;
3041 }
3042
3043 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3044 if (!conn) {
3045 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3046 goto done;
3047 }
3048
3049
3050 /* Defer clearing up the connection parameters until closing to
3051 * give a chance of keeping them if a repairing happens.
3052 */
3053 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3054
3055 /* Disable auto-connection parameters if present */
3056 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3057 if (params) {
3058 if (params->explicit_connect)
3059 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3060 else
3061 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3062 }
3063
3064 /* If disconnection is not requested, then clear the connection
3065 * variable so that the link is not terminated.
3066 */
3067 if (!cp->disconnect)
3068 conn = NULL;
3069
3070 done:
3071 /* If the connection variable is set, then termination of the
3072 * link is requested.
3073 */
3074 if (!conn) {
3075 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3076 &rp, sizeof(rp));
3077 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3078 goto unlock;
3079 }
3080
3081 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3082 sizeof(*cp));
3083 if (!cmd) {
3084 err = -ENOMEM;
3085 goto unlock;
3086 }
3087
3088 cmd->cmd_complete = addr_cmd_complete;
3089
3090 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3091 unpair_device_complete);
3092 if (err < 0)
3093 mgmt_pending_free(cmd);
3094
3095 unlock:
3096 hci_dev_unlock(hdev);
3097 return err;
3098 }
3099
disconnect_complete(struct hci_dev * hdev,void * data,int err)3100 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3101 {
3102 struct mgmt_pending_cmd *cmd = data;
3103
3104 cmd->cmd_complete(cmd, mgmt_status(err));
3105 mgmt_pending_free(cmd);
3106 }
3107
disconnect_sync(struct hci_dev * hdev,void * data)3108 static int disconnect_sync(struct hci_dev *hdev, void *data)
3109 {
3110 struct mgmt_pending_cmd *cmd = data;
3111 struct mgmt_cp_disconnect *cp = cmd->param;
3112 struct hci_conn *conn;
3113
3114 if (cp->addr.type == BDADDR_BREDR)
3115 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3116 &cp->addr.bdaddr);
3117 else
3118 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3119 le_addr_type(cp->addr.type));
3120
3121 if (!conn)
3122 return -ENOTCONN;
3123
3124 /* Disregard any possible error since the likes of hci_abort_conn_sync
3125 * will clean up the connection no matter the error.
3126 */
3127 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3128
3129 return 0;
3130 }
3131
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3132 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3133 u16 len)
3134 {
3135 struct mgmt_cp_disconnect *cp = data;
3136 struct mgmt_rp_disconnect rp;
3137 struct mgmt_pending_cmd *cmd;
3138 int err;
3139
3140 bt_dev_dbg(hdev, "sock %p", sk);
3141
3142 memset(&rp, 0, sizeof(rp));
3143 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3144 rp.addr.type = cp->addr.type;
3145
3146 if (!bdaddr_type_is_valid(cp->addr.type))
3147 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3148 MGMT_STATUS_INVALID_PARAMS,
3149 &rp, sizeof(rp));
3150
3151 hci_dev_lock(hdev);
3152
3153 if (!test_bit(HCI_UP, &hdev->flags)) {
3154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155 MGMT_STATUS_NOT_POWERED, &rp,
3156 sizeof(rp));
3157 goto failed;
3158 }
3159
3160 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3161 if (!cmd) {
3162 err = -ENOMEM;
3163 goto failed;
3164 }
3165
3166 cmd->cmd_complete = generic_cmd_complete;
3167
3168 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3169 disconnect_complete);
3170 if (err < 0)
3171 mgmt_pending_free(cmd);
3172
3173 failed:
3174 hci_dev_unlock(hdev);
3175 return err;
3176 }
3177
link_to_bdaddr(u8 link_type,u8 addr_type)3178 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3179 {
3180 switch (link_type) {
3181 case ISO_LINK:
3182 case LE_LINK:
3183 switch (addr_type) {
3184 case ADDR_LE_DEV_PUBLIC:
3185 return BDADDR_LE_PUBLIC;
3186
3187 default:
3188 /* Fallback to LE Random address type */
3189 return BDADDR_LE_RANDOM;
3190 }
3191
3192 default:
3193 /* Fallback to BR/EDR type */
3194 return BDADDR_BREDR;
3195 }
3196 }
3197
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3198 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3199 u16 data_len)
3200 {
3201 struct mgmt_rp_get_connections *rp;
3202 struct hci_conn *c;
3203 int err;
3204 u16 i;
3205
3206 bt_dev_dbg(hdev, "sock %p", sk);
3207
3208 hci_dev_lock(hdev);
3209
3210 if (!hdev_is_powered(hdev)) {
3211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3212 MGMT_STATUS_NOT_POWERED);
3213 goto unlock;
3214 }
3215
3216 i = 0;
3217 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3218 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3219 i++;
3220 }
3221
3222 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3223 if (!rp) {
3224 err = -ENOMEM;
3225 goto unlock;
3226 }
3227
3228 i = 0;
3229 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3230 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3231 continue;
3232 bacpy(&rp->addr[i].bdaddr, &c->dst);
3233 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3234 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 continue;
3236 i++;
3237 }
3238
3239 rp->conn_count = cpu_to_le16(i);
3240
3241 /* Recalculate length in case of filtered SCO connections, etc */
3242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3243 struct_size(rp, addr, i));
3244
3245 kfree(rp);
3246
3247 unlock:
3248 hci_dev_unlock(hdev);
3249 return err;
3250 }
3251
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3252 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3253 struct mgmt_cp_pin_code_neg_reply *cp)
3254 {
3255 struct mgmt_pending_cmd *cmd;
3256 int err;
3257
3258 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259 sizeof(*cp));
3260 if (!cmd)
3261 return -ENOMEM;
3262
3263 cmd->cmd_complete = addr_cmd_complete;
3264
3265 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3266 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3267 if (err < 0)
3268 mgmt_pending_remove(cmd);
3269
3270 return err;
3271 }
3272
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3273 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3274 u16 len)
3275 {
3276 struct hci_conn *conn;
3277 struct mgmt_cp_pin_code_reply *cp = data;
3278 struct hci_cp_pin_code_reply reply;
3279 struct mgmt_pending_cmd *cmd;
3280 int err;
3281
3282 bt_dev_dbg(hdev, "sock %p", sk);
3283
3284 hci_dev_lock(hdev);
3285
3286 if (!hdev_is_powered(hdev)) {
3287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3288 MGMT_STATUS_NOT_POWERED);
3289 goto failed;
3290 }
3291
3292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3293 if (!conn) {
3294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295 MGMT_STATUS_NOT_CONNECTED);
3296 goto failed;
3297 }
3298
3299 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3300 struct mgmt_cp_pin_code_neg_reply ncp;
3301
3302 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3303
3304 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3305
3306 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3307 if (err >= 0)
3308 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3309 MGMT_STATUS_INVALID_PARAMS);
3310
3311 goto failed;
3312 }
3313
3314 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3315 if (!cmd) {
3316 err = -ENOMEM;
3317 goto failed;
3318 }
3319
3320 cmd->cmd_complete = addr_cmd_complete;
3321
3322 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3323 reply.pin_len = cp->pin_len;
3324 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3325
3326 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3327 if (err < 0)
3328 mgmt_pending_remove(cmd);
3329
3330 failed:
3331 hci_dev_unlock(hdev);
3332 return err;
3333 }
3334
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3335 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3336 u16 len)
3337 {
3338 struct mgmt_cp_set_io_capability *cp = data;
3339
3340 bt_dev_dbg(hdev, "sock %p", sk);
3341
3342 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3344 MGMT_STATUS_INVALID_PARAMS);
3345
3346 hci_dev_lock(hdev);
3347
3348 hdev->io_capability = cp->io_capability;
3349
3350 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3351
3352 hci_dev_unlock(hdev);
3353
3354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3355 NULL, 0);
3356 }
3357
find_pairing(struct hci_conn * conn)3358 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3359 {
3360 struct hci_dev *hdev = conn->hdev;
3361 struct mgmt_pending_cmd *cmd;
3362
3363 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3364 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3365 continue;
3366
3367 if (cmd->user_data != conn)
3368 continue;
3369
3370 return cmd;
3371 }
3372
3373 return NULL;
3374 }
3375
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3376 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3377 {
3378 struct mgmt_rp_pair_device rp;
3379 struct hci_conn *conn = cmd->user_data;
3380 int err;
3381
3382 bacpy(&rp.addr.bdaddr, &conn->dst);
3383 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3384
3385 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3386 status, &rp, sizeof(rp));
3387
3388 /* So we don't get further callbacks for this connection */
3389 conn->connect_cfm_cb = NULL;
3390 conn->security_cfm_cb = NULL;
3391 conn->disconn_cfm_cb = NULL;
3392
3393 hci_conn_drop(conn);
3394
3395 /* The device is paired so there is no need to remove
3396 * its connection parameters anymore.
3397 */
3398 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3399
3400 hci_conn_put(conn);
3401
3402 return err;
3403 }
3404
mgmt_smp_complete(struct hci_conn * conn,bool complete)3405 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3406 {
3407 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3408 struct mgmt_pending_cmd *cmd;
3409
3410 cmd = find_pairing(conn);
3411 if (cmd) {
3412 cmd->cmd_complete(cmd, status);
3413 mgmt_pending_remove(cmd);
3414 }
3415 }
3416
pairing_complete_cb(struct hci_conn * conn,u8 status)3417 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3418 {
3419 struct mgmt_pending_cmd *cmd;
3420
3421 BT_DBG("status %u", status);
3422
3423 cmd = find_pairing(conn);
3424 if (!cmd) {
3425 BT_DBG("Unable to find a pending command");
3426 return;
3427 }
3428
3429 cmd->cmd_complete(cmd, mgmt_status(status));
3430 mgmt_pending_remove(cmd);
3431 }
3432
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3433 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3434 {
3435 struct mgmt_pending_cmd *cmd;
3436
3437 BT_DBG("status %u", status);
3438
3439 if (!status)
3440 return;
3441
3442 cmd = find_pairing(conn);
3443 if (!cmd) {
3444 BT_DBG("Unable to find a pending command");
3445 return;
3446 }
3447
3448 cmd->cmd_complete(cmd, mgmt_status(status));
3449 mgmt_pending_remove(cmd);
3450 }
3451
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3452 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3453 u16 len)
3454 {
3455 struct mgmt_cp_pair_device *cp = data;
3456 struct mgmt_rp_pair_device rp;
3457 struct mgmt_pending_cmd *cmd;
3458 u8 sec_level, auth_type;
3459 struct hci_conn *conn;
3460 int err;
3461
3462 bt_dev_dbg(hdev, "sock %p", sk);
3463
3464 memset(&rp, 0, sizeof(rp));
3465 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3466 rp.addr.type = cp->addr.type;
3467
3468 if (!bdaddr_type_is_valid(cp->addr.type))
3469 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3470 MGMT_STATUS_INVALID_PARAMS,
3471 &rp, sizeof(rp));
3472
3473 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3474 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3475 MGMT_STATUS_INVALID_PARAMS,
3476 &rp, sizeof(rp));
3477
3478 hci_dev_lock(hdev);
3479
3480 if (!hdev_is_powered(hdev)) {
3481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482 MGMT_STATUS_NOT_POWERED, &rp,
3483 sizeof(rp));
3484 goto unlock;
3485 }
3486
3487 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3489 MGMT_STATUS_ALREADY_PAIRED, &rp,
3490 sizeof(rp));
3491 goto unlock;
3492 }
3493
3494 sec_level = BT_SECURITY_MEDIUM;
3495 auth_type = HCI_AT_DEDICATED_BONDING;
3496
3497 if (cp->addr.type == BDADDR_BREDR) {
3498 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3499 auth_type, CONN_REASON_PAIR_DEVICE,
3500 HCI_ACL_CONN_TIMEOUT);
3501 } else {
3502 u8 addr_type = le_addr_type(cp->addr.type);
3503 struct hci_conn_params *p;
3504
3505 /* When pairing a new device, it is expected to remember
3506 * this device for future connections. Adding the connection
3507 * parameter information ahead of time allows tracking
3508 * of the peripheral preferred values and will speed up any
3509 * further connection establishment.
3510 *
3511 * If connection parameters already exist, then they
3512 * will be kept and this function does nothing.
3513 */
3514 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3515 if (!p) {
3516 err = -EIO;
3517 goto unlock;
3518 }
3519
3520 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3521 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3522
3523 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3524 sec_level, HCI_LE_CONN_TIMEOUT,
3525 CONN_REASON_PAIR_DEVICE);
3526 }
3527
3528 if (IS_ERR(conn)) {
3529 int status;
3530
3531 if (PTR_ERR(conn) == -EBUSY)
3532 status = MGMT_STATUS_BUSY;
3533 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3534 status = MGMT_STATUS_NOT_SUPPORTED;
3535 else if (PTR_ERR(conn) == -ECONNREFUSED)
3536 status = MGMT_STATUS_REJECTED;
3537 else
3538 status = MGMT_STATUS_CONNECT_FAILED;
3539
3540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3541 status, &rp, sizeof(rp));
3542 goto unlock;
3543 }
3544
3545 if (conn->connect_cfm_cb) {
3546 hci_conn_drop(conn);
3547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3548 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3549 goto unlock;
3550 }
3551
3552 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3553 if (!cmd) {
3554 err = -ENOMEM;
3555 hci_conn_drop(conn);
3556 goto unlock;
3557 }
3558
3559 cmd->cmd_complete = pairing_complete;
3560
3561 /* For LE, just connecting isn't a proof that the pairing finished */
3562 if (cp->addr.type == BDADDR_BREDR) {
3563 conn->connect_cfm_cb = pairing_complete_cb;
3564 conn->security_cfm_cb = pairing_complete_cb;
3565 conn->disconn_cfm_cb = pairing_complete_cb;
3566 } else {
3567 conn->connect_cfm_cb = le_pairing_complete_cb;
3568 conn->security_cfm_cb = le_pairing_complete_cb;
3569 conn->disconn_cfm_cb = le_pairing_complete_cb;
3570 }
3571
3572 conn->io_capability = cp->io_cap;
3573 cmd->user_data = hci_conn_get(conn);
3574
3575 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3576 hci_conn_security(conn, sec_level, auth_type, true)) {
3577 cmd->cmd_complete(cmd, 0);
3578 mgmt_pending_remove(cmd);
3579 }
3580
3581 err = 0;
3582
3583 unlock:
3584 hci_dev_unlock(hdev);
3585 return err;
3586 }
3587
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3588 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3589 u16 len)
3590 {
3591 struct mgmt_addr_info *addr = data;
3592 struct mgmt_pending_cmd *cmd;
3593 struct hci_conn *conn;
3594 int err;
3595
3596 bt_dev_dbg(hdev, "sock %p", sk);
3597
3598 hci_dev_lock(hdev);
3599
3600 if (!hdev_is_powered(hdev)) {
3601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3602 MGMT_STATUS_NOT_POWERED);
3603 goto unlock;
3604 }
3605
3606 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3607 if (!cmd) {
3608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3609 MGMT_STATUS_INVALID_PARAMS);
3610 goto unlock;
3611 }
3612
3613 conn = cmd->user_data;
3614
3615 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3617 MGMT_STATUS_INVALID_PARAMS);
3618 goto unlock;
3619 }
3620
3621 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3622 mgmt_pending_remove(cmd);
3623
3624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3625 addr, sizeof(*addr));
3626
3627 /* Since user doesn't want to proceed with the connection, abort any
3628 * ongoing pairing and then terminate the link if it was created
3629 * because of the pair device action.
3630 */
3631 if (addr->type == BDADDR_BREDR)
3632 hci_remove_link_key(hdev, &addr->bdaddr);
3633 else
3634 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3635 le_addr_type(addr->type));
3636
3637 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3638 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3639
3640 unlock:
3641 hci_dev_unlock(hdev);
3642 return err;
3643 }
3644
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3645 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3646 struct mgmt_addr_info *addr, u16 mgmt_op,
3647 u16 hci_op, __le32 passkey)
3648 {
3649 struct mgmt_pending_cmd *cmd;
3650 struct hci_conn *conn;
3651 int err;
3652
3653 hci_dev_lock(hdev);
3654
3655 if (!hdev_is_powered(hdev)) {
3656 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3657 MGMT_STATUS_NOT_POWERED, addr,
3658 sizeof(*addr));
3659 goto done;
3660 }
3661
3662 if (addr->type == BDADDR_BREDR)
3663 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3664 else
3665 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3666 le_addr_type(addr->type));
3667
3668 if (!conn) {
3669 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3670 MGMT_STATUS_NOT_CONNECTED, addr,
3671 sizeof(*addr));
3672 goto done;
3673 }
3674
3675 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3676 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3677 if (!err)
3678 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3679 MGMT_STATUS_SUCCESS, addr,
3680 sizeof(*addr));
3681 else
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_FAILED, addr,
3684 sizeof(*addr));
3685
3686 goto done;
3687 }
3688
3689 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3690 if (!cmd) {
3691 err = -ENOMEM;
3692 goto done;
3693 }
3694
3695 cmd->cmd_complete = addr_cmd_complete;
3696
3697 /* Continue with pairing via HCI */
3698 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3699 struct hci_cp_user_passkey_reply cp;
3700
3701 bacpy(&cp.bdaddr, &addr->bdaddr);
3702 cp.passkey = passkey;
3703 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3704 } else
3705 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3706 &addr->bdaddr);
3707
3708 if (err < 0)
3709 mgmt_pending_remove(cmd);
3710
3711 done:
3712 hci_dev_unlock(hdev);
3713 return err;
3714 }
3715
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3716 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3717 void *data, u16 len)
3718 {
3719 struct mgmt_cp_pin_code_neg_reply *cp = data;
3720
3721 bt_dev_dbg(hdev, "sock %p", sk);
3722
3723 return user_pairing_resp(sk, hdev, &cp->addr,
3724 MGMT_OP_PIN_CODE_NEG_REPLY,
3725 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3726 }
3727
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3728 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3729 u16 len)
3730 {
3731 struct mgmt_cp_user_confirm_reply *cp = data;
3732
3733 bt_dev_dbg(hdev, "sock %p", sk);
3734
3735 if (len != sizeof(*cp))
3736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3737 MGMT_STATUS_INVALID_PARAMS);
3738
3739 return user_pairing_resp(sk, hdev, &cp->addr,
3740 MGMT_OP_USER_CONFIRM_REPLY,
3741 HCI_OP_USER_CONFIRM_REPLY, 0);
3742 }
3743
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3744 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3745 void *data, u16 len)
3746 {
3747 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3748
3749 bt_dev_dbg(hdev, "sock %p", sk);
3750
3751 return user_pairing_resp(sk, hdev, &cp->addr,
3752 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3753 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3754 }
3755
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3756 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3757 u16 len)
3758 {
3759 struct mgmt_cp_user_passkey_reply *cp = data;
3760
3761 bt_dev_dbg(hdev, "sock %p", sk);
3762
3763 return user_pairing_resp(sk, hdev, &cp->addr,
3764 MGMT_OP_USER_PASSKEY_REPLY,
3765 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3766 }
3767
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3768 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3769 void *data, u16 len)
3770 {
3771 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3772
3773 bt_dev_dbg(hdev, "sock %p", sk);
3774
3775 return user_pairing_resp(sk, hdev, &cp->addr,
3776 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3777 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3778 }
3779
adv_expire_sync(struct hci_dev * hdev,u32 flags)3780 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3781 {
3782 struct adv_info *adv_instance;
3783
3784 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3785 if (!adv_instance)
3786 return 0;
3787
3788 /* stop if current instance doesn't need to be changed */
3789 if (!(adv_instance->flags & flags))
3790 return 0;
3791
3792 cancel_adv_timeout(hdev);
3793
3794 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3795 if (!adv_instance)
3796 return 0;
3797
3798 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3799
3800 return 0;
3801 }
3802
name_changed_sync(struct hci_dev * hdev,void * data)3803 static int name_changed_sync(struct hci_dev *hdev, void *data)
3804 {
3805 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3806 }
3807
set_name_complete(struct hci_dev * hdev,void * data,int err)3808 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3809 {
3810 struct mgmt_pending_cmd *cmd = data;
3811 struct mgmt_cp_set_local_name *cp = cmd->param;
3812 u8 status = mgmt_status(err);
3813
3814 bt_dev_dbg(hdev, "err %d", err);
3815
3816 if (err == -ECANCELED ||
3817 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3818 return;
3819
3820 if (status) {
3821 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3822 status);
3823 } else {
3824 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3825 cp, sizeof(*cp));
3826
3827 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3828 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3829 }
3830
3831 mgmt_pending_remove(cmd);
3832 }
3833
set_name_sync(struct hci_dev * hdev,void * data)3834 static int set_name_sync(struct hci_dev *hdev, void *data)
3835 {
3836 struct mgmt_pending_cmd *cmd = data;
3837 struct mgmt_cp_set_local_name *cp = cmd->param;
3838
3839 if (lmp_bredr_capable(hdev)) {
3840 hci_update_name_sync(hdev, cp->name);
3841 hci_update_eir_sync(hdev);
3842 }
3843
3844 /* The name is stored in the scan response data and so
3845 * no need to update the advertising data here.
3846 */
3847 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3848 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3849
3850 return 0;
3851 }
3852
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3853 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3854 u16 len)
3855 {
3856 struct mgmt_cp_set_local_name *cp = data;
3857 struct mgmt_pending_cmd *cmd;
3858 int err;
3859
3860 bt_dev_dbg(hdev, "sock %p", sk);
3861
3862 hci_dev_lock(hdev);
3863
3864 /* If the old values are the same as the new ones just return a
3865 * direct command complete event.
3866 */
3867 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3868 !memcmp(hdev->short_name, cp->short_name,
3869 sizeof(hdev->short_name))) {
3870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3871 data, len);
3872 goto failed;
3873 }
3874
3875 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3876
3877 if (!hdev_is_powered(hdev)) {
3878 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3879
3880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3881 data, len);
3882 if (err < 0)
3883 goto failed;
3884
3885 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3886 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3887 ext_info_changed(hdev, sk);
3888
3889 goto failed;
3890 }
3891
3892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3893 if (!cmd)
3894 err = -ENOMEM;
3895 else
3896 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3897 set_name_complete);
3898
3899 if (err < 0) {
3900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3901 MGMT_STATUS_FAILED);
3902
3903 if (cmd)
3904 mgmt_pending_remove(cmd);
3905
3906 goto failed;
3907 }
3908
3909 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3910
3911 failed:
3912 hci_dev_unlock(hdev);
3913 return err;
3914 }
3915
appearance_changed_sync(struct hci_dev * hdev,void * data)3916 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3917 {
3918 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3919 }
3920
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3921 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3922 u16 len)
3923 {
3924 struct mgmt_cp_set_appearance *cp = data;
3925 u16 appearance;
3926 int err;
3927
3928 bt_dev_dbg(hdev, "sock %p", sk);
3929
3930 if (!lmp_le_capable(hdev))
3931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3932 MGMT_STATUS_NOT_SUPPORTED);
3933
3934 appearance = le16_to_cpu(cp->appearance);
3935
3936 hci_dev_lock(hdev);
3937
3938 if (hdev->appearance != appearance) {
3939 hdev->appearance = appearance;
3940
3941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3942 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3943 NULL);
3944
3945 ext_info_changed(hdev, sk);
3946 }
3947
3948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3949 0);
3950
3951 hci_dev_unlock(hdev);
3952
3953 return err;
3954 }
3955
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3956 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3958 {
3959 struct mgmt_rp_get_phy_configuration rp;
3960
3961 bt_dev_dbg(hdev, "sock %p", sk);
3962
3963 hci_dev_lock(hdev);
3964
3965 memset(&rp, 0, sizeof(rp));
3966
3967 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3968 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3970
3971 hci_dev_unlock(hdev);
3972
3973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3974 &rp, sizeof(rp));
3975 }
3976
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3977 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3978 {
3979 struct mgmt_ev_phy_configuration_changed ev;
3980
3981 memset(&ev, 0, sizeof(ev));
3982
3983 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3984
3985 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3986 sizeof(ev), skip);
3987 }
3988
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3989 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3990 {
3991 struct mgmt_pending_cmd *cmd = data;
3992 struct sk_buff *skb = cmd->skb;
3993 u8 status = mgmt_status(err);
3994
3995 if (err == -ECANCELED ||
3996 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3997 return;
3998
3999 if (!status) {
4000 if (!skb)
4001 status = MGMT_STATUS_FAILED;
4002 else if (IS_ERR(skb))
4003 status = mgmt_status(PTR_ERR(skb));
4004 else
4005 status = mgmt_status(skb->data[0]);
4006 }
4007
4008 bt_dev_dbg(hdev, "status %d", status);
4009
4010 if (status) {
4011 mgmt_cmd_status(cmd->sk, hdev->id,
4012 MGMT_OP_SET_PHY_CONFIGURATION, status);
4013 } else {
4014 mgmt_cmd_complete(cmd->sk, hdev->id,
4015 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4016 NULL, 0);
4017
4018 mgmt_phy_configuration_changed(hdev, cmd->sk);
4019 }
4020
4021 if (skb && !IS_ERR(skb))
4022 kfree_skb(skb);
4023
4024 mgmt_pending_remove(cmd);
4025 }
4026
set_default_phy_sync(struct hci_dev * hdev,void * data)4027 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4028 {
4029 struct mgmt_pending_cmd *cmd = data;
4030 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4031 struct hci_cp_le_set_default_phy cp_phy;
4032 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4033
4034 memset(&cp_phy, 0, sizeof(cp_phy));
4035
4036 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4037 cp_phy.all_phys |= 0x01;
4038
4039 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4040 cp_phy.all_phys |= 0x02;
4041
4042 if (selected_phys & MGMT_PHY_LE_1M_TX)
4043 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4044
4045 if (selected_phys & MGMT_PHY_LE_2M_TX)
4046 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4047
4048 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4049 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4050
4051 if (selected_phys & MGMT_PHY_LE_1M_RX)
4052 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4053
4054 if (selected_phys & MGMT_PHY_LE_2M_RX)
4055 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4056
4057 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4058 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4059
4060 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4061 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4062
4063 return 0;
4064 }
4065
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4066 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4067 void *data, u16 len)
4068 {
4069 struct mgmt_cp_set_phy_configuration *cp = data;
4070 struct mgmt_pending_cmd *cmd;
4071 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4072 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4073 bool changed = false;
4074 int err;
4075
4076 bt_dev_dbg(hdev, "sock %p", sk);
4077
4078 configurable_phys = get_configurable_phys(hdev);
4079 supported_phys = get_supported_phys(hdev);
4080 selected_phys = __le32_to_cpu(cp->selected_phys);
4081
4082 if (selected_phys & ~supported_phys)
4083 return mgmt_cmd_status(sk, hdev->id,
4084 MGMT_OP_SET_PHY_CONFIGURATION,
4085 MGMT_STATUS_INVALID_PARAMS);
4086
4087 unconfigure_phys = supported_phys & ~configurable_phys;
4088
4089 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4090 return mgmt_cmd_status(sk, hdev->id,
4091 MGMT_OP_SET_PHY_CONFIGURATION,
4092 MGMT_STATUS_INVALID_PARAMS);
4093
4094 if (selected_phys == get_selected_phys(hdev))
4095 return mgmt_cmd_complete(sk, hdev->id,
4096 MGMT_OP_SET_PHY_CONFIGURATION,
4097 0, NULL, 0);
4098
4099 hci_dev_lock(hdev);
4100
4101 if (!hdev_is_powered(hdev)) {
4102 err = mgmt_cmd_status(sk, hdev->id,
4103 MGMT_OP_SET_PHY_CONFIGURATION,
4104 MGMT_STATUS_REJECTED);
4105 goto unlock;
4106 }
4107
4108 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4109 err = mgmt_cmd_status(sk, hdev->id,
4110 MGMT_OP_SET_PHY_CONFIGURATION,
4111 MGMT_STATUS_BUSY);
4112 goto unlock;
4113 }
4114
4115 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4116 pkt_type |= (HCI_DH3 | HCI_DM3);
4117 else
4118 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4119
4120 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4121 pkt_type |= (HCI_DH5 | HCI_DM5);
4122 else
4123 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4124
4125 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4126 pkt_type &= ~HCI_2DH1;
4127 else
4128 pkt_type |= HCI_2DH1;
4129
4130 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4131 pkt_type &= ~HCI_2DH3;
4132 else
4133 pkt_type |= HCI_2DH3;
4134
4135 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4136 pkt_type &= ~HCI_2DH5;
4137 else
4138 pkt_type |= HCI_2DH5;
4139
4140 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4141 pkt_type &= ~HCI_3DH1;
4142 else
4143 pkt_type |= HCI_3DH1;
4144
4145 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4146 pkt_type &= ~HCI_3DH3;
4147 else
4148 pkt_type |= HCI_3DH3;
4149
4150 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4151 pkt_type &= ~HCI_3DH5;
4152 else
4153 pkt_type |= HCI_3DH5;
4154
4155 if (pkt_type != hdev->pkt_type) {
4156 hdev->pkt_type = pkt_type;
4157 changed = true;
4158 }
4159
4160 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4161 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4162 if (changed)
4163 mgmt_phy_configuration_changed(hdev, sk);
4164
4165 err = mgmt_cmd_complete(sk, hdev->id,
4166 MGMT_OP_SET_PHY_CONFIGURATION,
4167 0, NULL, 0);
4168
4169 goto unlock;
4170 }
4171
4172 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4173 len);
4174 if (!cmd)
4175 err = -ENOMEM;
4176 else
4177 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4178 set_default_phy_complete);
4179
4180 if (err < 0) {
4181 err = mgmt_cmd_status(sk, hdev->id,
4182 MGMT_OP_SET_PHY_CONFIGURATION,
4183 MGMT_STATUS_FAILED);
4184
4185 if (cmd)
4186 mgmt_pending_remove(cmd);
4187 }
4188
4189 unlock:
4190 hci_dev_unlock(hdev);
4191
4192 return err;
4193 }
4194
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4195 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4196 u16 len)
4197 {
4198 int err = MGMT_STATUS_SUCCESS;
4199 struct mgmt_cp_set_blocked_keys *keys = data;
4200 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4201 sizeof(struct mgmt_blocked_key_info));
4202 u16 key_count, expected_len;
4203 int i;
4204
4205 bt_dev_dbg(hdev, "sock %p", sk);
4206
4207 key_count = __le16_to_cpu(keys->key_count);
4208 if (key_count > max_key_count) {
4209 bt_dev_err(hdev, "too big key_count value %u", key_count);
4210 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4211 MGMT_STATUS_INVALID_PARAMS);
4212 }
4213
4214 expected_len = struct_size(keys, keys, key_count);
4215 if (expected_len != len) {
4216 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4217 expected_len, len);
4218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4219 MGMT_STATUS_INVALID_PARAMS);
4220 }
4221
4222 hci_dev_lock(hdev);
4223
4224 hci_blocked_keys_clear(hdev);
4225
4226 for (i = 0; i < key_count; ++i) {
4227 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4228
4229 if (!b) {
4230 err = MGMT_STATUS_NO_RESOURCES;
4231 break;
4232 }
4233
4234 b->type = keys->keys[i].type;
4235 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4236 list_add_rcu(&b->list, &hdev->blocked_keys);
4237 }
4238 hci_dev_unlock(hdev);
4239
4240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4241 err, NULL, 0);
4242 }
4243
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4244 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4245 void *data, u16 len)
4246 {
4247 struct mgmt_mode *cp = data;
4248 int err;
4249 bool changed = false;
4250
4251 bt_dev_dbg(hdev, "sock %p", sk);
4252
4253 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4254 return mgmt_cmd_status(sk, hdev->id,
4255 MGMT_OP_SET_WIDEBAND_SPEECH,
4256 MGMT_STATUS_NOT_SUPPORTED);
4257
4258 if (cp->val != 0x00 && cp->val != 0x01)
4259 return mgmt_cmd_status(sk, hdev->id,
4260 MGMT_OP_SET_WIDEBAND_SPEECH,
4261 MGMT_STATUS_INVALID_PARAMS);
4262
4263 hci_dev_lock(hdev);
4264
4265 if (hdev_is_powered(hdev) &&
4266 !!cp->val != hci_dev_test_flag(hdev,
4267 HCI_WIDEBAND_SPEECH_ENABLED)) {
4268 err = mgmt_cmd_status(sk, hdev->id,
4269 MGMT_OP_SET_WIDEBAND_SPEECH,
4270 MGMT_STATUS_REJECTED);
4271 goto unlock;
4272 }
4273
4274 if (cp->val)
4275 changed = !hci_dev_test_and_set_flag(hdev,
4276 HCI_WIDEBAND_SPEECH_ENABLED);
4277 else
4278 changed = hci_dev_test_and_clear_flag(hdev,
4279 HCI_WIDEBAND_SPEECH_ENABLED);
4280
4281 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4282 if (err < 0)
4283 goto unlock;
4284
4285 if (changed)
4286 err = new_settings(hdev, sk);
4287
4288 unlock:
4289 hci_dev_unlock(hdev);
4290 return err;
4291 }
4292
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4293 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4294 void *data, u16 data_len)
4295 {
4296 char buf[20];
4297 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4298 u16 cap_len = 0;
4299 u8 flags = 0;
4300 u8 tx_power_range[2];
4301
4302 bt_dev_dbg(hdev, "sock %p", sk);
4303
4304 memset(&buf, 0, sizeof(buf));
4305
4306 hci_dev_lock(hdev);
4307
4308 /* When the Read Simple Pairing Options command is supported, then
4309 * the remote public key validation is supported.
4310 *
4311 * Alternatively, when Microsoft extensions are available, they can
4312 * indicate support for public key validation as well.
4313 */
4314 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4315 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4316
4317 flags |= 0x02; /* Remote public key validation (LE) */
4318
4319 /* When the Read Encryption Key Size command is supported, then the
4320 * encryption key size is enforced.
4321 */
4322 if (hdev->commands[20] & 0x10)
4323 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4324
4325 flags |= 0x08; /* Encryption key size enforcement (LE) */
4326
4327 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4328 &flags, 1);
4329
4330 /* When the Read Simple Pairing Options command is supported, then
4331 * also max encryption key size information is provided.
4332 */
4333 if (hdev->commands[41] & 0x08)
4334 cap_len = eir_append_le16(rp->cap, cap_len,
4335 MGMT_CAP_MAX_ENC_KEY_SIZE,
4336 hdev->max_enc_key_size);
4337
4338 cap_len = eir_append_le16(rp->cap, cap_len,
4339 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4340 SMP_MAX_ENC_KEY_SIZE);
4341
4342 /* Append the min/max LE tx power parameters if we were able to fetch
4343 * it from the controller
4344 */
4345 if (hdev->commands[38] & 0x80) {
4346 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4347 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4348 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4349 tx_power_range, 2);
4350 }
4351
4352 rp->cap_len = cpu_to_le16(cap_len);
4353
4354 hci_dev_unlock(hdev);
4355
4356 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4357 rp, sizeof(*rp) + cap_len);
4358 }
4359
4360 #ifdef CONFIG_BT_FEATURE_DEBUG
4361 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4362 static const u8 debug_uuid[16] = {
4363 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4364 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4365 };
4366 #endif
4367
4368 /* 330859bc-7506-492d-9370-9a6f0614037f */
4369 static const u8 quality_report_uuid[16] = {
4370 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4371 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4372 };
4373
4374 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4375 static const u8 offload_codecs_uuid[16] = {
4376 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4377 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4378 };
4379
4380 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4381 static const u8 le_simultaneous_roles_uuid[16] = {
4382 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4383 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4384 };
4385
4386 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4387 static const u8 rpa_resolution_uuid[16] = {
4388 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4389 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4390 };
4391
4392 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4393 static const u8 iso_socket_uuid[16] = {
4394 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4395 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4396 };
4397
4398 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4399 static const u8 mgmt_mesh_uuid[16] = {
4400 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4401 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4402 };
4403
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4404 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4405 void *data, u16 data_len)
4406 {
4407 struct mgmt_rp_read_exp_features_info *rp;
4408 size_t len;
4409 u16 idx = 0;
4410 u32 flags;
4411 int status;
4412
4413 bt_dev_dbg(hdev, "sock %p", sk);
4414
4415 /* Enough space for 7 features */
4416 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4417 rp = kzalloc(len, GFP_KERNEL);
4418 if (!rp)
4419 return -ENOMEM;
4420
4421 #ifdef CONFIG_BT_FEATURE_DEBUG
4422 if (!hdev) {
4423 flags = bt_dbg_get() ? BIT(0) : 0;
4424
4425 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4426 rp->features[idx].flags = cpu_to_le32(flags);
4427 idx++;
4428 }
4429 #endif
4430
4431 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4432 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4433 flags = BIT(0);
4434 else
4435 flags = 0;
4436
4437 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4438 rp->features[idx].flags = cpu_to_le32(flags);
4439 idx++;
4440 }
4441
4442 if (hdev && ll_privacy_capable(hdev)) {
4443 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4444 flags = BIT(0) | BIT(1);
4445 else
4446 flags = BIT(1);
4447
4448 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4449 rp->features[idx].flags = cpu_to_le32(flags);
4450 idx++;
4451 }
4452
4453 if (hdev && (aosp_has_quality_report(hdev) ||
4454 hdev->set_quality_report)) {
4455 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4456 flags = BIT(0);
4457 else
4458 flags = 0;
4459
4460 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4461 rp->features[idx].flags = cpu_to_le32(flags);
4462 idx++;
4463 }
4464
4465 if (hdev && hdev->get_data_path_id) {
4466 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4467 flags = BIT(0);
4468 else
4469 flags = 0;
4470
4471 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4472 rp->features[idx].flags = cpu_to_le32(flags);
4473 idx++;
4474 }
4475
4476 if (IS_ENABLED(CONFIG_BT_LE)) {
4477 flags = iso_enabled() ? BIT(0) : 0;
4478 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4479 rp->features[idx].flags = cpu_to_le32(flags);
4480 idx++;
4481 }
4482
4483 if (hdev && lmp_le_capable(hdev)) {
4484 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4485 flags = BIT(0);
4486 else
4487 flags = 0;
4488
4489 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4490 rp->features[idx].flags = cpu_to_le32(flags);
4491 idx++;
4492 }
4493
4494 rp->feature_count = cpu_to_le16(idx);
4495
4496 /* After reading the experimental features information, enable
4497 * the events to update client on any future change.
4498 */
4499 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4500
4501 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4502 MGMT_OP_READ_EXP_FEATURES_INFO,
4503 0, rp, sizeof(*rp) + (20 * idx));
4504
4505 kfree(rp);
4506 return status;
4507 }
4508
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4509 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4510 struct sock *skip)
4511 {
4512 struct mgmt_ev_exp_feature_changed ev;
4513
4514 memset(&ev, 0, sizeof(ev));
4515 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4516 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4517
4518 // Do we need to be atomic with the conn_flags?
4519 if (enabled && privacy_mode_capable(hdev))
4520 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4521 else
4522 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4523
4524 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4525 &ev, sizeof(ev),
4526 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4527
4528 }
4529
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4530 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4531 bool enabled, struct sock *skip)
4532 {
4533 struct mgmt_ev_exp_feature_changed ev;
4534
4535 memset(&ev, 0, sizeof(ev));
4536 memcpy(ev.uuid, uuid, 16);
4537 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4538
4539 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4540 &ev, sizeof(ev),
4541 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4542 }
4543
4544 #define EXP_FEAT(_uuid, _set_func) \
4545 { \
4546 .uuid = _uuid, \
4547 .set_func = _set_func, \
4548 }
4549
4550 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4551 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4552 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4553 {
4554 struct mgmt_rp_set_exp_feature rp;
4555
4556 memset(rp.uuid, 0, 16);
4557 rp.flags = cpu_to_le32(0);
4558
4559 #ifdef CONFIG_BT_FEATURE_DEBUG
4560 if (!hdev) {
4561 bool changed = bt_dbg_get();
4562
4563 bt_dbg_set(false);
4564
4565 if (changed)
4566 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4567 }
4568 #endif
4569
4570 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4571 bool changed;
4572
4573 changed = hci_dev_test_and_clear_flag(hdev,
4574 HCI_ENABLE_LL_PRIVACY);
4575 if (changed)
4576 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4577 sk);
4578 }
4579
4580 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4581
4582 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4583 MGMT_OP_SET_EXP_FEATURE, 0,
4584 &rp, sizeof(rp));
4585 }
4586
4587 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4588 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4589 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4590 {
4591 struct mgmt_rp_set_exp_feature rp;
4592
4593 bool val, changed;
4594 int err;
4595
4596 /* Command requires to use the non-controller index */
4597 if (hdev)
4598 return mgmt_cmd_status(sk, hdev->id,
4599 MGMT_OP_SET_EXP_FEATURE,
4600 MGMT_STATUS_INVALID_INDEX);
4601
4602 /* Parameters are limited to a single octet */
4603 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4604 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4605 MGMT_OP_SET_EXP_FEATURE,
4606 MGMT_STATUS_INVALID_PARAMS);
4607
4608 /* Only boolean on/off is supported */
4609 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4610 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4611 MGMT_OP_SET_EXP_FEATURE,
4612 MGMT_STATUS_INVALID_PARAMS);
4613
4614 val = !!cp->param[0];
4615 changed = val ? !bt_dbg_get() : bt_dbg_get();
4616 bt_dbg_set(val);
4617
4618 memcpy(rp.uuid, debug_uuid, 16);
4619 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4620
4621 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4622
4623 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4624 MGMT_OP_SET_EXP_FEATURE, 0,
4625 &rp, sizeof(rp));
4626
4627 if (changed)
4628 exp_feature_changed(hdev, debug_uuid, val, sk);
4629
4630 return err;
4631 }
4632 #endif
4633
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4634 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4635 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4636 {
4637 struct mgmt_rp_set_exp_feature rp;
4638 bool val, changed;
4639 int err;
4640
4641 /* Command requires to use the controller index */
4642 if (!hdev)
4643 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4644 MGMT_OP_SET_EXP_FEATURE,
4645 MGMT_STATUS_INVALID_INDEX);
4646
4647 /* Parameters are limited to a single octet */
4648 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4649 return mgmt_cmd_status(sk, hdev->id,
4650 MGMT_OP_SET_EXP_FEATURE,
4651 MGMT_STATUS_INVALID_PARAMS);
4652
4653 /* Only boolean on/off is supported */
4654 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4655 return mgmt_cmd_status(sk, hdev->id,
4656 MGMT_OP_SET_EXP_FEATURE,
4657 MGMT_STATUS_INVALID_PARAMS);
4658
4659 val = !!cp->param[0];
4660
4661 if (val) {
4662 changed = !hci_dev_test_and_set_flag(hdev,
4663 HCI_MESH_EXPERIMENTAL);
4664 } else {
4665 hci_dev_clear_flag(hdev, HCI_MESH);
4666 changed = hci_dev_test_and_clear_flag(hdev,
4667 HCI_MESH_EXPERIMENTAL);
4668 }
4669
4670 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4671 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4672
4673 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674
4675 err = mgmt_cmd_complete(sk, hdev->id,
4676 MGMT_OP_SET_EXP_FEATURE, 0,
4677 &rp, sizeof(rp));
4678
4679 if (changed)
4680 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4681
4682 return err;
4683 }
4684
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4685 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4686 struct mgmt_cp_set_exp_feature *cp,
4687 u16 data_len)
4688 {
4689 struct mgmt_rp_set_exp_feature rp;
4690 bool val, changed;
4691 int err;
4692 u32 flags;
4693
4694 /* Command requires to use the controller index */
4695 if (!hdev)
4696 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4697 MGMT_OP_SET_EXP_FEATURE,
4698 MGMT_STATUS_INVALID_INDEX);
4699
4700 /* Changes can only be made when controller is powered down */
4701 if (hdev_is_powered(hdev))
4702 return mgmt_cmd_status(sk, hdev->id,
4703 MGMT_OP_SET_EXP_FEATURE,
4704 MGMT_STATUS_REJECTED);
4705
4706 /* Parameters are limited to a single octet */
4707 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4708 return mgmt_cmd_status(sk, hdev->id,
4709 MGMT_OP_SET_EXP_FEATURE,
4710 MGMT_STATUS_INVALID_PARAMS);
4711
4712 /* Only boolean on/off is supported */
4713 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4714 return mgmt_cmd_status(sk, hdev->id,
4715 MGMT_OP_SET_EXP_FEATURE,
4716 MGMT_STATUS_INVALID_PARAMS);
4717
4718 val = !!cp->param[0];
4719
4720 if (val) {
4721 changed = !hci_dev_test_and_set_flag(hdev,
4722 HCI_ENABLE_LL_PRIVACY);
4723 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4724
4725 /* Enable LL privacy + supported settings changed */
4726 flags = BIT(0) | BIT(1);
4727 } else {
4728 changed = hci_dev_test_and_clear_flag(hdev,
4729 HCI_ENABLE_LL_PRIVACY);
4730
4731 /* Disable LL privacy + supported settings changed */
4732 flags = BIT(1);
4733 }
4734
4735 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4736 rp.flags = cpu_to_le32(flags);
4737
4738 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739
4740 err = mgmt_cmd_complete(sk, hdev->id,
4741 MGMT_OP_SET_EXP_FEATURE, 0,
4742 &rp, sizeof(rp));
4743
4744 if (changed)
4745 exp_ll_privacy_feature_changed(val, hdev, sk);
4746
4747 return err;
4748 }
4749
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4750 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4751 struct mgmt_cp_set_exp_feature *cp,
4752 u16 data_len)
4753 {
4754 struct mgmt_rp_set_exp_feature rp;
4755 bool val, changed;
4756 int err;
4757
4758 /* Command requires to use a valid controller index */
4759 if (!hdev)
4760 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4761 MGMT_OP_SET_EXP_FEATURE,
4762 MGMT_STATUS_INVALID_INDEX);
4763
4764 /* Parameters are limited to a single octet */
4765 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4766 return mgmt_cmd_status(sk, hdev->id,
4767 MGMT_OP_SET_EXP_FEATURE,
4768 MGMT_STATUS_INVALID_PARAMS);
4769
4770 /* Only boolean on/off is supported */
4771 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4772 return mgmt_cmd_status(sk, hdev->id,
4773 MGMT_OP_SET_EXP_FEATURE,
4774 MGMT_STATUS_INVALID_PARAMS);
4775
4776 hci_req_sync_lock(hdev);
4777
4778 val = !!cp->param[0];
4779 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4780
4781 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4782 err = mgmt_cmd_status(sk, hdev->id,
4783 MGMT_OP_SET_EXP_FEATURE,
4784 MGMT_STATUS_NOT_SUPPORTED);
4785 goto unlock_quality_report;
4786 }
4787
4788 if (changed) {
4789 if (hdev->set_quality_report)
4790 err = hdev->set_quality_report(hdev, val);
4791 else
4792 err = aosp_set_quality_report(hdev, val);
4793
4794 if (err) {
4795 err = mgmt_cmd_status(sk, hdev->id,
4796 MGMT_OP_SET_EXP_FEATURE,
4797 MGMT_STATUS_FAILED);
4798 goto unlock_quality_report;
4799 }
4800
4801 if (val)
4802 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4803 else
4804 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4805 }
4806
4807 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4808
4809 memcpy(rp.uuid, quality_report_uuid, 16);
4810 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4811 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4812
4813 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4814 &rp, sizeof(rp));
4815
4816 if (changed)
4817 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4818
4819 unlock_quality_report:
4820 hci_req_sync_unlock(hdev);
4821 return err;
4822 }
4823
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4824 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4825 struct mgmt_cp_set_exp_feature *cp,
4826 u16 data_len)
4827 {
4828 bool val, changed;
4829 int err;
4830 struct mgmt_rp_set_exp_feature rp;
4831
4832 /* Command requires to use a valid controller index */
4833 if (!hdev)
4834 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4835 MGMT_OP_SET_EXP_FEATURE,
4836 MGMT_STATUS_INVALID_INDEX);
4837
4838 /* Parameters are limited to a single octet */
4839 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4840 return mgmt_cmd_status(sk, hdev->id,
4841 MGMT_OP_SET_EXP_FEATURE,
4842 MGMT_STATUS_INVALID_PARAMS);
4843
4844 /* Only boolean on/off is supported */
4845 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4846 return mgmt_cmd_status(sk, hdev->id,
4847 MGMT_OP_SET_EXP_FEATURE,
4848 MGMT_STATUS_INVALID_PARAMS);
4849
4850 val = !!cp->param[0];
4851 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4852
4853 if (!hdev->get_data_path_id) {
4854 return mgmt_cmd_status(sk, hdev->id,
4855 MGMT_OP_SET_EXP_FEATURE,
4856 MGMT_STATUS_NOT_SUPPORTED);
4857 }
4858
4859 if (changed) {
4860 if (val)
4861 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862 else
4863 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4864 }
4865
4866 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4867 val, changed);
4868
4869 memcpy(rp.uuid, offload_codecs_uuid, 16);
4870 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4871 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4872 err = mgmt_cmd_complete(sk, hdev->id,
4873 MGMT_OP_SET_EXP_FEATURE, 0,
4874 &rp, sizeof(rp));
4875
4876 if (changed)
4877 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4878
4879 return err;
4880 }
4881
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4882 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4883 struct mgmt_cp_set_exp_feature *cp,
4884 u16 data_len)
4885 {
4886 bool val, changed;
4887 int err;
4888 struct mgmt_rp_set_exp_feature rp;
4889
4890 /* Command requires to use a valid controller index */
4891 if (!hdev)
4892 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4893 MGMT_OP_SET_EXP_FEATURE,
4894 MGMT_STATUS_INVALID_INDEX);
4895
4896 /* Parameters are limited to a single octet */
4897 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4898 return mgmt_cmd_status(sk, hdev->id,
4899 MGMT_OP_SET_EXP_FEATURE,
4900 MGMT_STATUS_INVALID_PARAMS);
4901
4902 /* Only boolean on/off is supported */
4903 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4904 return mgmt_cmd_status(sk, hdev->id,
4905 MGMT_OP_SET_EXP_FEATURE,
4906 MGMT_STATUS_INVALID_PARAMS);
4907
4908 val = !!cp->param[0];
4909 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4910
4911 if (!hci_dev_le_state_simultaneous(hdev)) {
4912 return mgmt_cmd_status(sk, hdev->id,
4913 MGMT_OP_SET_EXP_FEATURE,
4914 MGMT_STATUS_NOT_SUPPORTED);
4915 }
4916
4917 if (changed) {
4918 if (val)
4919 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920 else
4921 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4922 }
4923
4924 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4925 val, changed);
4926
4927 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4928 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4929 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4930 err = mgmt_cmd_complete(sk, hdev->id,
4931 MGMT_OP_SET_EXP_FEATURE, 0,
4932 &rp, sizeof(rp));
4933
4934 if (changed)
4935 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4936
4937 return err;
4938 }
4939
4940 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4941 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4942 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4943 {
4944 struct mgmt_rp_set_exp_feature rp;
4945 bool val, changed = false;
4946 int err;
4947
4948 /* Command requires to use the non-controller index */
4949 if (hdev)
4950 return mgmt_cmd_status(sk, hdev->id,
4951 MGMT_OP_SET_EXP_FEATURE,
4952 MGMT_STATUS_INVALID_INDEX);
4953
4954 /* Parameters are limited to a single octet */
4955 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4956 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4957 MGMT_OP_SET_EXP_FEATURE,
4958 MGMT_STATUS_INVALID_PARAMS);
4959
4960 /* Only boolean on/off is supported */
4961 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4962 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4963 MGMT_OP_SET_EXP_FEATURE,
4964 MGMT_STATUS_INVALID_PARAMS);
4965
4966 val = cp->param[0] ? true : false;
4967 if (val)
4968 err = iso_init();
4969 else
4970 err = iso_exit();
4971
4972 if (!err)
4973 changed = true;
4974
4975 memcpy(rp.uuid, iso_socket_uuid, 16);
4976 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4977
4978 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4979
4980 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4981 MGMT_OP_SET_EXP_FEATURE, 0,
4982 &rp, sizeof(rp));
4983
4984 if (changed)
4985 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4986
4987 return err;
4988 }
4989 #endif
4990
4991 static const struct mgmt_exp_feature {
4992 const u8 *uuid;
4993 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4994 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4995 } exp_features[] = {
4996 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4997 #ifdef CONFIG_BT_FEATURE_DEBUG
4998 EXP_FEAT(debug_uuid, set_debug_func),
4999 #endif
5000 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5001 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5002 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5003 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5004 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5005 #ifdef CONFIG_BT_LE
5006 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5007 #endif
5008
5009 /* end with a null feature */
5010 EXP_FEAT(NULL, NULL)
5011 };
5012
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5013 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5014 void *data, u16 data_len)
5015 {
5016 struct mgmt_cp_set_exp_feature *cp = data;
5017 size_t i = 0;
5018
5019 bt_dev_dbg(hdev, "sock %p", sk);
5020
5021 for (i = 0; exp_features[i].uuid; i++) {
5022 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5023 return exp_features[i].set_func(sk, hdev, cp, data_len);
5024 }
5025
5026 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5027 MGMT_OP_SET_EXP_FEATURE,
5028 MGMT_STATUS_NOT_SUPPORTED);
5029 }
5030
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)5031 static u32 get_params_flags(struct hci_dev *hdev,
5032 struct hci_conn_params *params)
5033 {
5034 u32 flags = hdev->conn_flags;
5035
5036 /* Devices using RPAs can only be programmed in the acceptlist if
5037 * LL Privacy has been enable otherwise they cannot mark
5038 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5039 */
5040 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5041 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5042 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5043
5044 return flags;
5045 }
5046
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5047 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5048 u16 data_len)
5049 {
5050 struct mgmt_cp_get_device_flags *cp = data;
5051 struct mgmt_rp_get_device_flags rp;
5052 struct bdaddr_list_with_flags *br_params;
5053 struct hci_conn_params *params;
5054 u32 supported_flags;
5055 u32 current_flags = 0;
5056 u8 status = MGMT_STATUS_INVALID_PARAMS;
5057
5058 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5059 &cp->addr.bdaddr, cp->addr.type);
5060
5061 hci_dev_lock(hdev);
5062
5063 supported_flags = hdev->conn_flags;
5064
5065 memset(&rp, 0, sizeof(rp));
5066
5067 if (cp->addr.type == BDADDR_BREDR) {
5068 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5069 &cp->addr.bdaddr,
5070 cp->addr.type);
5071 if (!br_params)
5072 goto done;
5073
5074 current_flags = br_params->flags;
5075 } else {
5076 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5077 le_addr_type(cp->addr.type));
5078 if (!params)
5079 goto done;
5080
5081 supported_flags = get_params_flags(hdev, params);
5082 current_flags = params->flags;
5083 }
5084
5085 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5086 rp.addr.type = cp->addr.type;
5087 rp.supported_flags = cpu_to_le32(supported_flags);
5088 rp.current_flags = cpu_to_le32(current_flags);
5089
5090 status = MGMT_STATUS_SUCCESS;
5091
5092 done:
5093 hci_dev_unlock(hdev);
5094
5095 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5096 &rp, sizeof(rp));
5097 }
5098
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5099 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5100 bdaddr_t *bdaddr, u8 bdaddr_type,
5101 u32 supported_flags, u32 current_flags)
5102 {
5103 struct mgmt_ev_device_flags_changed ev;
5104
5105 bacpy(&ev.addr.bdaddr, bdaddr);
5106 ev.addr.type = bdaddr_type;
5107 ev.supported_flags = cpu_to_le32(supported_flags);
5108 ev.current_flags = cpu_to_le32(current_flags);
5109
5110 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5111 }
5112
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5113 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5114 u16 len)
5115 {
5116 struct mgmt_cp_set_device_flags *cp = data;
5117 struct bdaddr_list_with_flags *br_params;
5118 struct hci_conn_params *params;
5119 u8 status = MGMT_STATUS_INVALID_PARAMS;
5120 u32 supported_flags;
5121 u32 current_flags = __le32_to_cpu(cp->current_flags);
5122
5123 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5124 &cp->addr.bdaddr, cp->addr.type, current_flags);
5125
5126 // We should take hci_dev_lock() early, I think.. conn_flags can change
5127 supported_flags = hdev->conn_flags;
5128
5129 if ((supported_flags | current_flags) != supported_flags) {
5130 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5131 current_flags, supported_flags);
5132 goto done;
5133 }
5134
5135 hci_dev_lock(hdev);
5136
5137 if (cp->addr.type == BDADDR_BREDR) {
5138 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5139 &cp->addr.bdaddr,
5140 cp->addr.type);
5141
5142 if (br_params) {
5143 br_params->flags = current_flags;
5144 status = MGMT_STATUS_SUCCESS;
5145 } else {
5146 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5147 &cp->addr.bdaddr, cp->addr.type);
5148 }
5149
5150 goto unlock;
5151 }
5152
5153 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5154 le_addr_type(cp->addr.type));
5155 if (!params) {
5156 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5157 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5158 goto unlock;
5159 }
5160
5161 supported_flags = get_params_flags(hdev, params);
5162
5163 if ((supported_flags | current_flags) != supported_flags) {
5164 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5165 current_flags, supported_flags);
5166 goto unlock;
5167 }
5168
5169 WRITE_ONCE(params->flags, current_flags);
5170 status = MGMT_STATUS_SUCCESS;
5171
5172 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5173 * has been set.
5174 */
5175 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5176 hci_update_passive_scan(hdev);
5177
5178 unlock:
5179 hci_dev_unlock(hdev);
5180
5181 done:
5182 if (status == MGMT_STATUS_SUCCESS)
5183 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5184 supported_flags, current_flags);
5185
5186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5187 &cp->addr, sizeof(cp->addr));
5188 }
5189
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5190 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5191 u16 handle)
5192 {
5193 struct mgmt_ev_adv_monitor_added ev;
5194
5195 ev.monitor_handle = cpu_to_le16(handle);
5196
5197 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5198 }
5199
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5200 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5201 __le16 handle)
5202 {
5203 struct mgmt_ev_adv_monitor_removed ev;
5204
5205 ev.monitor_handle = handle;
5206
5207 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5208 }
5209
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5210 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5211 void *data, u16 len)
5212 {
5213 struct adv_monitor *monitor = NULL;
5214 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5215 int handle, err;
5216 size_t rp_size = 0;
5217 __u32 supported = 0;
5218 __u32 enabled = 0;
5219 __u16 num_handles = 0;
5220 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5221
5222 BT_DBG("request for %s", hdev->name);
5223
5224 hci_dev_lock(hdev);
5225
5226 if (msft_monitor_supported(hdev))
5227 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5228
5229 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5230 handles[num_handles++] = monitor->handle;
5231
5232 hci_dev_unlock(hdev);
5233
5234 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5235 rp = kmalloc(rp_size, GFP_KERNEL);
5236 if (!rp)
5237 return -ENOMEM;
5238
5239 /* All supported features are currently enabled */
5240 enabled = supported;
5241
5242 rp->supported_features = cpu_to_le32(supported);
5243 rp->enabled_features = cpu_to_le32(enabled);
5244 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5245 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5246 rp->num_handles = cpu_to_le16(num_handles);
5247 if (num_handles)
5248 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5249
5250 err = mgmt_cmd_complete(sk, hdev->id,
5251 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5252 MGMT_STATUS_SUCCESS, rp, rp_size);
5253
5254 kfree(rp);
5255
5256 return err;
5257 }
5258
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5259 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5260 void *data, int status)
5261 {
5262 struct mgmt_rp_add_adv_patterns_monitor rp;
5263 struct mgmt_pending_cmd *cmd = data;
5264 struct adv_monitor *monitor = cmd->user_data;
5265
5266 hci_dev_lock(hdev);
5267
5268 rp.monitor_handle = cpu_to_le16(monitor->handle);
5269
5270 if (!status) {
5271 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5272 hdev->adv_monitors_cnt++;
5273 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5274 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5275 hci_update_passive_scan(hdev);
5276 }
5277
5278 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5279 mgmt_status(status), &rp, sizeof(rp));
5280 mgmt_pending_remove(cmd);
5281
5282 hci_dev_unlock(hdev);
5283 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5284 rp.monitor_handle, status);
5285 }
5286
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5287 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5288 {
5289 struct mgmt_pending_cmd *cmd = data;
5290 struct adv_monitor *monitor = cmd->user_data;
5291
5292 return hci_add_adv_monitor(hdev, monitor);
5293 }
5294
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5295 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5296 struct adv_monitor *m, u8 status,
5297 void *data, u16 len, u16 op)
5298 {
5299 struct mgmt_pending_cmd *cmd;
5300 int err;
5301
5302 hci_dev_lock(hdev);
5303
5304 if (status)
5305 goto unlock;
5306
5307 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5308 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5309 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5310 status = MGMT_STATUS_BUSY;
5311 goto unlock;
5312 }
5313
5314 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5315 if (!cmd) {
5316 status = MGMT_STATUS_NO_RESOURCES;
5317 goto unlock;
5318 }
5319
5320 cmd->user_data = m;
5321 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5322 mgmt_add_adv_patterns_monitor_complete);
5323 if (err) {
5324 if (err == -ENOMEM)
5325 status = MGMT_STATUS_NO_RESOURCES;
5326 else
5327 status = MGMT_STATUS_FAILED;
5328
5329 goto unlock;
5330 }
5331
5332 hci_dev_unlock(hdev);
5333
5334 return 0;
5335
5336 unlock:
5337 hci_free_adv_monitor(hdev, m);
5338 hci_dev_unlock(hdev);
5339 return mgmt_cmd_status(sk, hdev->id, op, status);
5340 }
5341
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5342 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5343 struct mgmt_adv_rssi_thresholds *rssi)
5344 {
5345 if (rssi) {
5346 m->rssi.low_threshold = rssi->low_threshold;
5347 m->rssi.low_threshold_timeout =
5348 __le16_to_cpu(rssi->low_threshold_timeout);
5349 m->rssi.high_threshold = rssi->high_threshold;
5350 m->rssi.high_threshold_timeout =
5351 __le16_to_cpu(rssi->high_threshold_timeout);
5352 m->rssi.sampling_period = rssi->sampling_period;
5353 } else {
5354 /* Default values. These numbers are the least constricting
5355 * parameters for MSFT API to work, so it behaves as if there
5356 * are no rssi parameter to consider. May need to be changed
5357 * if other API are to be supported.
5358 */
5359 m->rssi.low_threshold = -127;
5360 m->rssi.low_threshold_timeout = 60;
5361 m->rssi.high_threshold = -127;
5362 m->rssi.high_threshold_timeout = 0;
5363 m->rssi.sampling_period = 0;
5364 }
5365 }
5366
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5367 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5368 struct mgmt_adv_pattern *patterns)
5369 {
5370 u8 offset = 0, length = 0;
5371 struct adv_pattern *p = NULL;
5372 int i;
5373
5374 for (i = 0; i < pattern_count; i++) {
5375 offset = patterns[i].offset;
5376 length = patterns[i].length;
5377 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5378 length > HCI_MAX_EXT_AD_LENGTH ||
5379 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5380 return MGMT_STATUS_INVALID_PARAMS;
5381
5382 p = kmalloc(sizeof(*p), GFP_KERNEL);
5383 if (!p)
5384 return MGMT_STATUS_NO_RESOURCES;
5385
5386 p->ad_type = patterns[i].ad_type;
5387 p->offset = patterns[i].offset;
5388 p->length = patterns[i].length;
5389 memcpy(p->value, patterns[i].value, p->length);
5390
5391 INIT_LIST_HEAD(&p->list);
5392 list_add(&p->list, &m->patterns);
5393 }
5394
5395 return MGMT_STATUS_SUCCESS;
5396 }
5397
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5398 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5399 void *data, u16 len)
5400 {
5401 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5402 struct adv_monitor *m = NULL;
5403 u8 status = MGMT_STATUS_SUCCESS;
5404 size_t expected_size = sizeof(*cp);
5405
5406 BT_DBG("request for %s", hdev->name);
5407
5408 if (len <= sizeof(*cp)) {
5409 status = MGMT_STATUS_INVALID_PARAMS;
5410 goto done;
5411 }
5412
5413 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5414 if (len != expected_size) {
5415 status = MGMT_STATUS_INVALID_PARAMS;
5416 goto done;
5417 }
5418
5419 m = kzalloc(sizeof(*m), GFP_KERNEL);
5420 if (!m) {
5421 status = MGMT_STATUS_NO_RESOURCES;
5422 goto done;
5423 }
5424
5425 INIT_LIST_HEAD(&m->patterns);
5426
5427 parse_adv_monitor_rssi(m, NULL);
5428 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5429
5430 done:
5431 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5432 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5433 }
5434
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5435 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5436 void *data, u16 len)
5437 {
5438 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5439 struct adv_monitor *m = NULL;
5440 u8 status = MGMT_STATUS_SUCCESS;
5441 size_t expected_size = sizeof(*cp);
5442
5443 BT_DBG("request for %s", hdev->name);
5444
5445 if (len <= sizeof(*cp)) {
5446 status = MGMT_STATUS_INVALID_PARAMS;
5447 goto done;
5448 }
5449
5450 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5451 if (len != expected_size) {
5452 status = MGMT_STATUS_INVALID_PARAMS;
5453 goto done;
5454 }
5455
5456 m = kzalloc(sizeof(*m), GFP_KERNEL);
5457 if (!m) {
5458 status = MGMT_STATUS_NO_RESOURCES;
5459 goto done;
5460 }
5461
5462 INIT_LIST_HEAD(&m->patterns);
5463
5464 parse_adv_monitor_rssi(m, &cp->rssi);
5465 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5466
5467 done:
5468 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5469 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5470 }
5471
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5472 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5473 void *data, int status)
5474 {
5475 struct mgmt_rp_remove_adv_monitor rp;
5476 struct mgmt_pending_cmd *cmd = data;
5477 struct mgmt_cp_remove_adv_monitor *cp;
5478
5479 if (status == -ECANCELED)
5480 return;
5481
5482 hci_dev_lock(hdev);
5483
5484 cp = cmd->param;
5485
5486 rp.monitor_handle = cp->monitor_handle;
5487
5488 if (!status) {
5489 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5490 hci_update_passive_scan(hdev);
5491 }
5492
5493 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5494 mgmt_status(status), &rp, sizeof(rp));
5495 mgmt_pending_free(cmd);
5496
5497 hci_dev_unlock(hdev);
5498 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5499 rp.monitor_handle, status);
5500 }
5501
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5502 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5503 {
5504 struct mgmt_pending_cmd *cmd = data;
5505 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5506 u16 handle = __le16_to_cpu(cp->monitor_handle);
5507
5508 if (!handle)
5509 return hci_remove_all_adv_monitor(hdev);
5510
5511 return hci_remove_single_adv_monitor(hdev, handle);
5512 }
5513
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5514 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5515 void *data, u16 len)
5516 {
5517 struct mgmt_pending_cmd *cmd;
5518 int err, status;
5519
5520 hci_dev_lock(hdev);
5521
5522 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5523 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5524 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5525 status = MGMT_STATUS_BUSY;
5526 goto unlock;
5527 }
5528
5529 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5530 if (!cmd) {
5531 status = MGMT_STATUS_NO_RESOURCES;
5532 goto unlock;
5533 }
5534
5535 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5536 mgmt_remove_adv_monitor_complete);
5537
5538 if (err) {
5539 mgmt_pending_free(cmd);
5540
5541 if (err == -ENOMEM)
5542 status = MGMT_STATUS_NO_RESOURCES;
5543 else
5544 status = MGMT_STATUS_FAILED;
5545
5546 goto unlock;
5547 }
5548
5549 hci_dev_unlock(hdev);
5550
5551 return 0;
5552
5553 unlock:
5554 hci_dev_unlock(hdev);
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5556 status);
5557 }
5558
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5559 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5560 {
5561 struct mgmt_rp_read_local_oob_data mgmt_rp;
5562 size_t rp_size = sizeof(mgmt_rp);
5563 struct mgmt_pending_cmd *cmd = data;
5564 struct sk_buff *skb = cmd->skb;
5565 u8 status = mgmt_status(err);
5566
5567 if (!status) {
5568 if (!skb)
5569 status = MGMT_STATUS_FAILED;
5570 else if (IS_ERR(skb))
5571 status = mgmt_status(PTR_ERR(skb));
5572 else
5573 status = mgmt_status(skb->data[0]);
5574 }
5575
5576 bt_dev_dbg(hdev, "status %d", status);
5577
5578 if (status) {
5579 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5580 goto remove;
5581 }
5582
5583 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5584
5585 if (!bredr_sc_enabled(hdev)) {
5586 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5587
5588 if (skb->len < sizeof(*rp)) {
5589 mgmt_cmd_status(cmd->sk, hdev->id,
5590 MGMT_OP_READ_LOCAL_OOB_DATA,
5591 MGMT_STATUS_FAILED);
5592 goto remove;
5593 }
5594
5595 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5596 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5597
5598 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5599 } else {
5600 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5601
5602 if (skb->len < sizeof(*rp)) {
5603 mgmt_cmd_status(cmd->sk, hdev->id,
5604 MGMT_OP_READ_LOCAL_OOB_DATA,
5605 MGMT_STATUS_FAILED);
5606 goto remove;
5607 }
5608
5609 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5610 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5611
5612 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5613 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5614 }
5615
5616 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5617 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5618
5619 remove:
5620 if (skb && !IS_ERR(skb))
5621 kfree_skb(skb);
5622
5623 mgmt_pending_free(cmd);
5624 }
5625
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5626 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5627 {
5628 struct mgmt_pending_cmd *cmd = data;
5629
5630 if (bredr_sc_enabled(hdev))
5631 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5632 else
5633 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5634
5635 if (IS_ERR(cmd->skb))
5636 return PTR_ERR(cmd->skb);
5637 else
5638 return 0;
5639 }
5640
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5641 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5642 void *data, u16 data_len)
5643 {
5644 struct mgmt_pending_cmd *cmd;
5645 int err;
5646
5647 bt_dev_dbg(hdev, "sock %p", sk);
5648
5649 hci_dev_lock(hdev);
5650
5651 if (!hdev_is_powered(hdev)) {
5652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5653 MGMT_STATUS_NOT_POWERED);
5654 goto unlock;
5655 }
5656
5657 if (!lmp_ssp_capable(hdev)) {
5658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 MGMT_STATUS_NOT_SUPPORTED);
5660 goto unlock;
5661 }
5662
5663 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5664 if (!cmd)
5665 err = -ENOMEM;
5666 else
5667 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5668 read_local_oob_data_complete);
5669
5670 if (err < 0) {
5671 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5672 MGMT_STATUS_FAILED);
5673
5674 if (cmd)
5675 mgmt_pending_free(cmd);
5676 }
5677
5678 unlock:
5679 hci_dev_unlock(hdev);
5680 return err;
5681 }
5682
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5683 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5684 void *data, u16 len)
5685 {
5686 struct mgmt_addr_info *addr = data;
5687 int err;
5688
5689 bt_dev_dbg(hdev, "sock %p", sk);
5690
5691 if (!bdaddr_type_is_valid(addr->type))
5692 return mgmt_cmd_complete(sk, hdev->id,
5693 MGMT_OP_ADD_REMOTE_OOB_DATA,
5694 MGMT_STATUS_INVALID_PARAMS,
5695 addr, sizeof(*addr));
5696
5697 hci_dev_lock(hdev);
5698
5699 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5700 struct mgmt_cp_add_remote_oob_data *cp = data;
5701 u8 status;
5702
5703 if (cp->addr.type != BDADDR_BREDR) {
5704 err = mgmt_cmd_complete(sk, hdev->id,
5705 MGMT_OP_ADD_REMOTE_OOB_DATA,
5706 MGMT_STATUS_INVALID_PARAMS,
5707 &cp->addr, sizeof(cp->addr));
5708 goto unlock;
5709 }
5710
5711 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5712 cp->addr.type, cp->hash,
5713 cp->rand, NULL, NULL);
5714 if (err < 0)
5715 status = MGMT_STATUS_FAILED;
5716 else
5717 status = MGMT_STATUS_SUCCESS;
5718
5719 err = mgmt_cmd_complete(sk, hdev->id,
5720 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5721 &cp->addr, sizeof(cp->addr));
5722 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5723 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5724 u8 *rand192, *hash192, *rand256, *hash256;
5725 u8 status;
5726
5727 if (bdaddr_type_is_le(cp->addr.type)) {
5728 /* Enforce zero-valued 192-bit parameters as
5729 * long as legacy SMP OOB isn't implemented.
5730 */
5731 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5732 memcmp(cp->hash192, ZERO_KEY, 16)) {
5733 err = mgmt_cmd_complete(sk, hdev->id,
5734 MGMT_OP_ADD_REMOTE_OOB_DATA,
5735 MGMT_STATUS_INVALID_PARAMS,
5736 addr, sizeof(*addr));
5737 goto unlock;
5738 }
5739
5740 rand192 = NULL;
5741 hash192 = NULL;
5742 } else {
5743 /* In case one of the P-192 values is set to zero,
5744 * then just disable OOB data for P-192.
5745 */
5746 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5747 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5748 rand192 = NULL;
5749 hash192 = NULL;
5750 } else {
5751 rand192 = cp->rand192;
5752 hash192 = cp->hash192;
5753 }
5754 }
5755
5756 /* In case one of the P-256 values is set to zero, then just
5757 * disable OOB data for P-256.
5758 */
5759 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5760 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5761 rand256 = NULL;
5762 hash256 = NULL;
5763 } else {
5764 rand256 = cp->rand256;
5765 hash256 = cp->hash256;
5766 }
5767
5768 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5769 cp->addr.type, hash192, rand192,
5770 hash256, rand256);
5771 if (err < 0)
5772 status = MGMT_STATUS_FAILED;
5773 else
5774 status = MGMT_STATUS_SUCCESS;
5775
5776 err = mgmt_cmd_complete(sk, hdev->id,
5777 MGMT_OP_ADD_REMOTE_OOB_DATA,
5778 status, &cp->addr, sizeof(cp->addr));
5779 } else {
5780 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5781 len);
5782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5783 MGMT_STATUS_INVALID_PARAMS);
5784 }
5785
5786 unlock:
5787 hci_dev_unlock(hdev);
5788 return err;
5789 }
5790
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5791 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5792 void *data, u16 len)
5793 {
5794 struct mgmt_cp_remove_remote_oob_data *cp = data;
5795 u8 status;
5796 int err;
5797
5798 bt_dev_dbg(hdev, "sock %p", sk);
5799
5800 if (cp->addr.type != BDADDR_BREDR)
5801 return mgmt_cmd_complete(sk, hdev->id,
5802 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5803 MGMT_STATUS_INVALID_PARAMS,
5804 &cp->addr, sizeof(cp->addr));
5805
5806 hci_dev_lock(hdev);
5807
5808 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5809 hci_remote_oob_data_clear(hdev);
5810 status = MGMT_STATUS_SUCCESS;
5811 goto done;
5812 }
5813
5814 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5815 if (err < 0)
5816 status = MGMT_STATUS_INVALID_PARAMS;
5817 else
5818 status = MGMT_STATUS_SUCCESS;
5819
5820 done:
5821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5822 status, &cp->addr, sizeof(cp->addr));
5823
5824 hci_dev_unlock(hdev);
5825 return err;
5826 }
5827
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5828 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5829 {
5830 struct mgmt_pending_cmd *cmd;
5831
5832 bt_dev_dbg(hdev, "status %u", status);
5833
5834 hci_dev_lock(hdev);
5835
5836 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5837 if (!cmd)
5838 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5839
5840 if (!cmd)
5841 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5842
5843 if (cmd) {
5844 cmd->cmd_complete(cmd, mgmt_status(status));
5845 mgmt_pending_remove(cmd);
5846 }
5847
5848 hci_dev_unlock(hdev);
5849 }
5850
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5851 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5852 uint8_t *mgmt_status)
5853 {
5854 switch (type) {
5855 case DISCOV_TYPE_LE:
5856 *mgmt_status = mgmt_le_support(hdev);
5857 if (*mgmt_status)
5858 return false;
5859 break;
5860 case DISCOV_TYPE_INTERLEAVED:
5861 *mgmt_status = mgmt_le_support(hdev);
5862 if (*mgmt_status)
5863 return false;
5864 fallthrough;
5865 case DISCOV_TYPE_BREDR:
5866 *mgmt_status = mgmt_bredr_support(hdev);
5867 if (*mgmt_status)
5868 return false;
5869 break;
5870 default:
5871 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5872 return false;
5873 }
5874
5875 return true;
5876 }
5877
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5878 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5879 {
5880 struct mgmt_pending_cmd *cmd = data;
5881
5882 bt_dev_dbg(hdev, "err %d", err);
5883
5884 if (err == -ECANCELED)
5885 return;
5886
5887 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5888 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5889 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5890 return;
5891
5892 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5893 cmd->param, 1);
5894 mgmt_pending_remove(cmd);
5895
5896 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5897 DISCOVERY_FINDING);
5898 }
5899
start_discovery_sync(struct hci_dev * hdev,void * data)5900 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5901 {
5902 return hci_start_discovery_sync(hdev);
5903 }
5904
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5905 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5906 u16 op, void *data, u16 len)
5907 {
5908 struct mgmt_cp_start_discovery *cp = data;
5909 struct mgmt_pending_cmd *cmd;
5910 u8 status;
5911 int err;
5912
5913 bt_dev_dbg(hdev, "sock %p", sk);
5914
5915 hci_dev_lock(hdev);
5916
5917 if (!hdev_is_powered(hdev)) {
5918 err = mgmt_cmd_complete(sk, hdev->id, op,
5919 MGMT_STATUS_NOT_POWERED,
5920 &cp->type, sizeof(cp->type));
5921 goto failed;
5922 }
5923
5924 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5925 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5926 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5927 &cp->type, sizeof(cp->type));
5928 goto failed;
5929 }
5930
5931 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5932 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5933 &cp->type, sizeof(cp->type));
5934 goto failed;
5935 }
5936
5937 /* Can't start discovery when it is paused */
5938 if (hdev->discovery_paused) {
5939 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5940 &cp->type, sizeof(cp->type));
5941 goto failed;
5942 }
5943
5944 /* Clear the discovery filter first to free any previously
5945 * allocated memory for the UUID list.
5946 */
5947 hci_discovery_filter_clear(hdev);
5948
5949 hdev->discovery.type = cp->type;
5950 hdev->discovery.report_invalid_rssi = false;
5951 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5952 hdev->discovery.limited = true;
5953 else
5954 hdev->discovery.limited = false;
5955
5956 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5957 if (!cmd) {
5958 err = -ENOMEM;
5959 goto failed;
5960 }
5961
5962 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5963 start_discovery_complete);
5964 if (err < 0) {
5965 mgmt_pending_remove(cmd);
5966 goto failed;
5967 }
5968
5969 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5970
5971 failed:
5972 hci_dev_unlock(hdev);
5973 return err;
5974 }
5975
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5976 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5977 void *data, u16 len)
5978 {
5979 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5980 data, len);
5981 }
5982
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5983 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5984 void *data, u16 len)
5985 {
5986 return start_discovery_internal(sk, hdev,
5987 MGMT_OP_START_LIMITED_DISCOVERY,
5988 data, len);
5989 }
5990
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5991 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5992 void *data, u16 len)
5993 {
5994 struct mgmt_cp_start_service_discovery *cp = data;
5995 struct mgmt_pending_cmd *cmd;
5996 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5997 u16 uuid_count, expected_len;
5998 u8 status;
5999 int err;
6000
6001 bt_dev_dbg(hdev, "sock %p", sk);
6002
6003 hci_dev_lock(hdev);
6004
6005 if (!hdev_is_powered(hdev)) {
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_START_SERVICE_DISCOVERY,
6008 MGMT_STATUS_NOT_POWERED,
6009 &cp->type, sizeof(cp->type));
6010 goto failed;
6011 }
6012
6013 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6014 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6015 err = mgmt_cmd_complete(sk, hdev->id,
6016 MGMT_OP_START_SERVICE_DISCOVERY,
6017 MGMT_STATUS_BUSY, &cp->type,
6018 sizeof(cp->type));
6019 goto failed;
6020 }
6021
6022 if (hdev->discovery_paused) {
6023 err = mgmt_cmd_complete(sk, hdev->id,
6024 MGMT_OP_START_SERVICE_DISCOVERY,
6025 MGMT_STATUS_BUSY, &cp->type,
6026 sizeof(cp->type));
6027 goto failed;
6028 }
6029
6030 uuid_count = __le16_to_cpu(cp->uuid_count);
6031 if (uuid_count > max_uuid_count) {
6032 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6033 uuid_count);
6034 err = mgmt_cmd_complete(sk, hdev->id,
6035 MGMT_OP_START_SERVICE_DISCOVERY,
6036 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6037 sizeof(cp->type));
6038 goto failed;
6039 }
6040
6041 expected_len = sizeof(*cp) + uuid_count * 16;
6042 if (expected_len != len) {
6043 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6044 expected_len, len);
6045 err = mgmt_cmd_complete(sk, hdev->id,
6046 MGMT_OP_START_SERVICE_DISCOVERY,
6047 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6048 sizeof(cp->type));
6049 goto failed;
6050 }
6051
6052 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6053 err = mgmt_cmd_complete(sk, hdev->id,
6054 MGMT_OP_START_SERVICE_DISCOVERY,
6055 status, &cp->type, sizeof(cp->type));
6056 goto failed;
6057 }
6058
6059 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6060 hdev, data, len);
6061 if (!cmd) {
6062 err = -ENOMEM;
6063 goto failed;
6064 }
6065
6066 /* Clear the discovery filter first to free any previously
6067 * allocated memory for the UUID list.
6068 */
6069 hci_discovery_filter_clear(hdev);
6070
6071 hdev->discovery.result_filtering = true;
6072 hdev->discovery.type = cp->type;
6073 hdev->discovery.rssi = cp->rssi;
6074 hdev->discovery.uuid_count = uuid_count;
6075
6076 if (uuid_count > 0) {
6077 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6078 GFP_KERNEL);
6079 if (!hdev->discovery.uuids) {
6080 err = mgmt_cmd_complete(sk, hdev->id,
6081 MGMT_OP_START_SERVICE_DISCOVERY,
6082 MGMT_STATUS_FAILED,
6083 &cp->type, sizeof(cp->type));
6084 mgmt_pending_remove(cmd);
6085 goto failed;
6086 }
6087 }
6088
6089 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6090 start_discovery_complete);
6091 if (err < 0) {
6092 mgmt_pending_remove(cmd);
6093 goto failed;
6094 }
6095
6096 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6097
6098 failed:
6099 hci_dev_unlock(hdev);
6100 return err;
6101 }
6102
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6103 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6104 {
6105 struct mgmt_pending_cmd *cmd;
6106
6107 bt_dev_dbg(hdev, "status %u", status);
6108
6109 hci_dev_lock(hdev);
6110
6111 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6112 if (cmd) {
6113 cmd->cmd_complete(cmd, mgmt_status(status));
6114 mgmt_pending_remove(cmd);
6115 }
6116
6117 hci_dev_unlock(hdev);
6118 }
6119
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6120 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6121 {
6122 struct mgmt_pending_cmd *cmd = data;
6123
6124 if (err == -ECANCELED ||
6125 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6126 return;
6127
6128 bt_dev_dbg(hdev, "err %d", err);
6129
6130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6131 cmd->param, 1);
6132 mgmt_pending_remove(cmd);
6133
6134 if (!err)
6135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6136 }
6137
stop_discovery_sync(struct hci_dev * hdev,void * data)6138 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6139 {
6140 return hci_stop_discovery_sync(hdev);
6141 }
6142
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6143 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6144 u16 len)
6145 {
6146 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6147 struct mgmt_pending_cmd *cmd;
6148 int err;
6149
6150 bt_dev_dbg(hdev, "sock %p", sk);
6151
6152 hci_dev_lock(hdev);
6153
6154 if (!hci_discovery_active(hdev)) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6157 sizeof(mgmt_cp->type));
6158 goto unlock;
6159 }
6160
6161 if (hdev->discovery.type != mgmt_cp->type) {
6162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &mgmt_cp->type, sizeof(mgmt_cp->type));
6165 goto unlock;
6166 }
6167
6168 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6169 if (!cmd) {
6170 err = -ENOMEM;
6171 goto unlock;
6172 }
6173
6174 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6175 stop_discovery_complete);
6176 if (err < 0) {
6177 mgmt_pending_remove(cmd);
6178 goto unlock;
6179 }
6180
6181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6182
6183 unlock:
6184 hci_dev_unlock(hdev);
6185 return err;
6186 }
6187
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6188 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6189 u16 len)
6190 {
6191 struct mgmt_cp_confirm_name *cp = data;
6192 struct inquiry_entry *e;
6193 int err;
6194
6195 bt_dev_dbg(hdev, "sock %p", sk);
6196
6197 hci_dev_lock(hdev);
6198
6199 if (!hci_discovery_active(hdev)) {
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6201 MGMT_STATUS_FAILED, &cp->addr,
6202 sizeof(cp->addr));
6203 goto failed;
6204 }
6205
6206 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6207 if (!e) {
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6209 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6210 sizeof(cp->addr));
6211 goto failed;
6212 }
6213
6214 if (cp->name_known) {
6215 e->name_state = NAME_KNOWN;
6216 list_del(&e->list);
6217 } else {
6218 e->name_state = NAME_NEEDED;
6219 hci_inquiry_cache_update_resolve(hdev, e);
6220 }
6221
6222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6223 &cp->addr, sizeof(cp->addr));
6224
6225 failed:
6226 hci_dev_unlock(hdev);
6227 return err;
6228 }
6229
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6230 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6231 u16 len)
6232 {
6233 struct mgmt_cp_block_device *cp = data;
6234 u8 status;
6235 int err;
6236
6237 bt_dev_dbg(hdev, "sock %p", sk);
6238
6239 if (!bdaddr_type_is_valid(cp->addr.type))
6240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &cp->addr, sizeof(cp->addr));
6243
6244 hci_dev_lock(hdev);
6245
6246 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6247 cp->addr.type);
6248 if (err < 0) {
6249 status = MGMT_STATUS_FAILED;
6250 goto done;
6251 }
6252
6253 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6254 sk);
6255 status = MGMT_STATUS_SUCCESS;
6256
6257 done:
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6259 &cp->addr, sizeof(cp->addr));
6260
6261 hci_dev_unlock(hdev);
6262
6263 return err;
6264 }
6265
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6266 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6267 u16 len)
6268 {
6269 struct mgmt_cp_unblock_device *cp = data;
6270 u8 status;
6271 int err;
6272
6273 bt_dev_dbg(hdev, "sock %p", sk);
6274
6275 if (!bdaddr_type_is_valid(cp->addr.type))
6276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6277 MGMT_STATUS_INVALID_PARAMS,
6278 &cp->addr, sizeof(cp->addr));
6279
6280 hci_dev_lock(hdev);
6281
6282 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6283 cp->addr.type);
6284 if (err < 0) {
6285 status = MGMT_STATUS_INVALID_PARAMS;
6286 goto done;
6287 }
6288
6289 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6290 sk);
6291 status = MGMT_STATUS_SUCCESS;
6292
6293 done:
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6295 &cp->addr, sizeof(cp->addr));
6296
6297 hci_dev_unlock(hdev);
6298
6299 return err;
6300 }
6301
set_device_id_sync(struct hci_dev * hdev,void * data)6302 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6303 {
6304 return hci_update_eir_sync(hdev);
6305 }
6306
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6308 u16 len)
6309 {
6310 struct mgmt_cp_set_device_id *cp = data;
6311 int err;
6312 __u16 source;
6313
6314 bt_dev_dbg(hdev, "sock %p", sk);
6315
6316 source = __le16_to_cpu(cp->source);
6317
6318 if (source > 0x0002)
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6320 MGMT_STATUS_INVALID_PARAMS);
6321
6322 hci_dev_lock(hdev);
6323
6324 hdev->devid_source = source;
6325 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6326 hdev->devid_product = __le16_to_cpu(cp->product);
6327 hdev->devid_version = __le16_to_cpu(cp->version);
6328
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6330 NULL, 0);
6331
6332 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6333
6334 hci_dev_unlock(hdev);
6335
6336 return err;
6337 }
6338
enable_advertising_instance(struct hci_dev * hdev,int err)6339 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6340 {
6341 if (err)
6342 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6343 else
6344 bt_dev_dbg(hdev, "status %d", err);
6345 }
6346
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6347 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6348 {
6349 struct cmd_lookup match = { NULL, hdev };
6350 u8 instance;
6351 struct adv_info *adv_instance;
6352 u8 status = mgmt_status(err);
6353
6354 if (status) {
6355 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6356 cmd_status_rsp, &status);
6357 return;
6358 }
6359
6360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6361 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6362 else
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6364
6365 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6366 &match);
6367
6368 new_settings(hdev, match.sk);
6369
6370 if (match.sk)
6371 sock_put(match.sk);
6372
6373 /* If "Set Advertising" was just disabled and instance advertising was
6374 * set up earlier, then re-enable multi-instance advertising.
6375 */
6376 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6377 list_empty(&hdev->adv_instances))
6378 return;
6379
6380 instance = hdev->cur_adv_instance;
6381 if (!instance) {
6382 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6383 struct adv_info, list);
6384 if (!adv_instance)
6385 return;
6386
6387 instance = adv_instance->instance;
6388 }
6389
6390 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6391
6392 enable_advertising_instance(hdev, err);
6393 }
6394
set_adv_sync(struct hci_dev * hdev,void * data)6395 static int set_adv_sync(struct hci_dev *hdev, void *data)
6396 {
6397 struct mgmt_pending_cmd *cmd = data;
6398 struct mgmt_mode *cp = cmd->param;
6399 u8 val = !!cp->val;
6400
6401 if (cp->val == 0x02)
6402 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6403 else
6404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6405
6406 cancel_adv_timeout(hdev);
6407
6408 if (val) {
6409 /* Switch to instance "0" for the Set Advertising setting.
6410 * We cannot use update_[adv|scan_rsp]_data() here as the
6411 * HCI_ADVERTISING flag is not yet set.
6412 */
6413 hdev->cur_adv_instance = 0x00;
6414
6415 if (ext_adv_capable(hdev)) {
6416 hci_start_ext_adv_sync(hdev, 0x00);
6417 } else {
6418 hci_update_adv_data_sync(hdev, 0x00);
6419 hci_update_scan_rsp_data_sync(hdev, 0x00);
6420 hci_enable_advertising_sync(hdev);
6421 }
6422 } else {
6423 hci_disable_advertising_sync(hdev);
6424 }
6425
6426 return 0;
6427 }
6428
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6429 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6430 u16 len)
6431 {
6432 struct mgmt_mode *cp = data;
6433 struct mgmt_pending_cmd *cmd;
6434 u8 val, status;
6435 int err;
6436
6437 bt_dev_dbg(hdev, "sock %p", sk);
6438
6439 status = mgmt_le_support(hdev);
6440 if (status)
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6442 status);
6443
6444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 MGMT_STATUS_INVALID_PARAMS);
6447
6448 if (hdev->advertising_paused)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450 MGMT_STATUS_BUSY);
6451
6452 hci_dev_lock(hdev);
6453
6454 val = !!cp->val;
6455
6456 /* The following conditions are ones which mean that we should
6457 * not do any HCI communication but directly send a mgmt
6458 * response to user space (after toggling the flag if
6459 * necessary).
6460 */
6461 if (!hdev_is_powered(hdev) ||
6462 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6464 hci_dev_test_flag(hdev, HCI_MESH) ||
6465 hci_conn_num(hdev, LE_LINK) > 0 ||
6466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6468 bool changed;
6469
6470 if (cp->val) {
6471 hdev->cur_adv_instance = 0x00;
6472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6473 if (cp->val == 0x02)
6474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 else
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 } else {
6478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 }
6481
6482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6483 if (err < 0)
6484 goto unlock;
6485
6486 if (changed)
6487 err = new_settings(hdev, sk);
6488
6489 goto unlock;
6490 }
6491
6492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6493 pending_find(MGMT_OP_SET_LE, hdev)) {
6494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6495 MGMT_STATUS_BUSY);
6496 goto unlock;
6497 }
6498
6499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6500 if (!cmd)
6501 err = -ENOMEM;
6502 else
6503 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6504 set_advertising_complete);
6505
6506 if (err < 0 && cmd)
6507 mgmt_pending_remove(cmd);
6508
6509 unlock:
6510 hci_dev_unlock(hdev);
6511 return err;
6512 }
6513
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6514 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6515 void *data, u16 len)
6516 {
6517 struct mgmt_cp_set_static_address *cp = data;
6518 int err;
6519
6520 bt_dev_dbg(hdev, "sock %p", sk);
6521
6522 if (!lmp_le_capable(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_NOT_SUPPORTED);
6525
6526 if (hdev_is_powered(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_REJECTED);
6529
6530 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6531 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6532 return mgmt_cmd_status(sk, hdev->id,
6533 MGMT_OP_SET_STATIC_ADDRESS,
6534 MGMT_STATUS_INVALID_PARAMS);
6535
6536 /* Two most significant bits shall be set */
6537 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6538 return mgmt_cmd_status(sk, hdev->id,
6539 MGMT_OP_SET_STATIC_ADDRESS,
6540 MGMT_STATUS_INVALID_PARAMS);
6541 }
6542
6543 hci_dev_lock(hdev);
6544
6545 bacpy(&hdev->static_addr, &cp->bdaddr);
6546
6547 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6548 if (err < 0)
6549 goto unlock;
6550
6551 err = new_settings(hdev, sk);
6552
6553 unlock:
6554 hci_dev_unlock(hdev);
6555 return err;
6556 }
6557
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6558 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 len)
6560 {
6561 struct mgmt_cp_set_scan_params *cp = data;
6562 __u16 interval, window;
6563 int err;
6564
6565 bt_dev_dbg(hdev, "sock %p", sk);
6566
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_NOT_SUPPORTED);
6570
6571 /* Keep allowed ranges in sync with set_mesh() */
6572 interval = __le16_to_cpu(cp->interval);
6573
6574 if (interval < 0x0004 || interval > 0x4000)
6575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6576 MGMT_STATUS_INVALID_PARAMS);
6577
6578 window = __le16_to_cpu(cp->window);
6579
6580 if (window < 0x0004 || window > 0x4000)
6581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6582 MGMT_STATUS_INVALID_PARAMS);
6583
6584 if (window > interval)
6585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6586 MGMT_STATUS_INVALID_PARAMS);
6587
6588 hci_dev_lock(hdev);
6589
6590 hdev->le_scan_interval = interval;
6591 hdev->le_scan_window = window;
6592
6593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6594 NULL, 0);
6595
6596 /* If background scan is running, restart it so new parameters are
6597 * loaded.
6598 */
6599 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6600 hdev->discovery.state == DISCOVERY_STOPPED)
6601 hci_update_passive_scan(hdev);
6602
6603 hci_dev_unlock(hdev);
6604
6605 return err;
6606 }
6607
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6608 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6609 {
6610 struct mgmt_pending_cmd *cmd = data;
6611
6612 bt_dev_dbg(hdev, "err %d", err);
6613
6614 if (err) {
6615 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6616 mgmt_status(err));
6617 } else {
6618 struct mgmt_mode *cp = cmd->param;
6619
6620 if (cp->val)
6621 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6622 else
6623 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6624
6625 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6626 new_settings(hdev, cmd->sk);
6627 }
6628
6629 mgmt_pending_free(cmd);
6630 }
6631
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6632 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6633 {
6634 struct mgmt_pending_cmd *cmd = data;
6635 struct mgmt_mode *cp = cmd->param;
6636
6637 return hci_write_fast_connectable_sync(hdev, cp->val);
6638 }
6639
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6640 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6641 void *data, u16 len)
6642 {
6643 struct mgmt_mode *cp = data;
6644 struct mgmt_pending_cmd *cmd;
6645 int err;
6646
6647 bt_dev_dbg(hdev, "sock %p", sk);
6648
6649 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6650 hdev->hci_ver < BLUETOOTH_VER_1_2)
6651 return mgmt_cmd_status(sk, hdev->id,
6652 MGMT_OP_SET_FAST_CONNECTABLE,
6653 MGMT_STATUS_NOT_SUPPORTED);
6654
6655 if (cp->val != 0x00 && cp->val != 0x01)
6656 return mgmt_cmd_status(sk, hdev->id,
6657 MGMT_OP_SET_FAST_CONNECTABLE,
6658 MGMT_STATUS_INVALID_PARAMS);
6659
6660 hci_dev_lock(hdev);
6661
6662 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6663 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6664 goto unlock;
6665 }
6666
6667 if (!hdev_is_powered(hdev)) {
6668 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6669 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6670 new_settings(hdev, sk);
6671 goto unlock;
6672 }
6673
6674 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6675 len);
6676 if (!cmd)
6677 err = -ENOMEM;
6678 else
6679 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6680 fast_connectable_complete);
6681
6682 if (err < 0) {
6683 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6684 MGMT_STATUS_FAILED);
6685
6686 if (cmd)
6687 mgmt_pending_free(cmd);
6688 }
6689
6690 unlock:
6691 hci_dev_unlock(hdev);
6692
6693 return err;
6694 }
6695
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6696 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6697 {
6698 struct mgmt_pending_cmd *cmd = data;
6699
6700 bt_dev_dbg(hdev, "err %d", err);
6701
6702 if (err) {
6703 u8 mgmt_err = mgmt_status(err);
6704
6705 /* We need to restore the flag if related HCI commands
6706 * failed.
6707 */
6708 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6709
6710 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6711 } else {
6712 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6713 new_settings(hdev, cmd->sk);
6714 }
6715
6716 mgmt_pending_free(cmd);
6717 }
6718
set_bredr_sync(struct hci_dev * hdev,void * data)6719 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6720 {
6721 int status;
6722
6723 status = hci_write_fast_connectable_sync(hdev, false);
6724
6725 if (!status)
6726 status = hci_update_scan_sync(hdev);
6727
6728 /* Since only the advertising data flags will change, there
6729 * is no need to update the scan response data.
6730 */
6731 if (!status)
6732 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6733
6734 return status;
6735 }
6736
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6737 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6738 {
6739 struct mgmt_mode *cp = data;
6740 struct mgmt_pending_cmd *cmd;
6741 int err;
6742
6743 bt_dev_dbg(hdev, "sock %p", sk);
6744
6745 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747 MGMT_STATUS_NOT_SUPPORTED);
6748
6749 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6751 MGMT_STATUS_REJECTED);
6752
6753 if (cp->val != 0x00 && cp->val != 0x01)
6754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6755 MGMT_STATUS_INVALID_PARAMS);
6756
6757 hci_dev_lock(hdev);
6758
6759 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6760 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6761 goto unlock;
6762 }
6763
6764 if (!hdev_is_powered(hdev)) {
6765 if (!cp->val) {
6766 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6767 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6768 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6769 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6770 }
6771
6772 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6773
6774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6775 if (err < 0)
6776 goto unlock;
6777
6778 err = new_settings(hdev, sk);
6779 goto unlock;
6780 }
6781
6782 /* Reject disabling when powered on */
6783 if (!cp->val) {
6784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_REJECTED);
6786 goto unlock;
6787 } else {
6788 /* When configuring a dual-mode controller to operate
6789 * with LE only and using a static address, then switching
6790 * BR/EDR back on is not allowed.
6791 *
6792 * Dual-mode controllers shall operate with the public
6793 * address as its identity address for BR/EDR and LE. So
6794 * reject the attempt to create an invalid configuration.
6795 *
6796 * The same restrictions applies when secure connections
6797 * has been enabled. For BR/EDR this is a controller feature
6798 * while for LE it is a host stack feature. This means that
6799 * switching BR/EDR back on when secure connections has been
6800 * enabled is not a supported transaction.
6801 */
6802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6803 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6804 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6806 MGMT_STATUS_REJECTED);
6807 goto unlock;
6808 }
6809 }
6810
6811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6812 if (!cmd)
6813 err = -ENOMEM;
6814 else
6815 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6816 set_bredr_complete);
6817
6818 if (err < 0) {
6819 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6820 MGMT_STATUS_FAILED);
6821 if (cmd)
6822 mgmt_pending_free(cmd);
6823
6824 goto unlock;
6825 }
6826
6827 /* We need to flip the bit already here so that
6828 * hci_req_update_adv_data generates the correct flags.
6829 */
6830 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6831
6832 unlock:
6833 hci_dev_unlock(hdev);
6834 return err;
6835 }
6836
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6837 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6838 {
6839 struct mgmt_pending_cmd *cmd = data;
6840 struct mgmt_mode *cp;
6841
6842 bt_dev_dbg(hdev, "err %d", err);
6843
6844 if (err) {
6845 u8 mgmt_err = mgmt_status(err);
6846
6847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6848 goto done;
6849 }
6850
6851 cp = cmd->param;
6852
6853 switch (cp->val) {
6854 case 0x00:
6855 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 break;
6858 case 0x01:
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 break;
6862 case 0x02:
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6865 break;
6866 }
6867
6868 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6869 new_settings(hdev, cmd->sk);
6870
6871 done:
6872 mgmt_pending_free(cmd);
6873 }
6874
set_secure_conn_sync(struct hci_dev * hdev,void * data)6875 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6876 {
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp = cmd->param;
6879 u8 val = !!cp->val;
6880
6881 /* Force write of val */
6882 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6883
6884 return hci_write_sc_support_sync(hdev, val);
6885 }
6886
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6887 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6888 void *data, u16 len)
6889 {
6890 struct mgmt_mode *cp = data;
6891 struct mgmt_pending_cmd *cmd;
6892 u8 val;
6893 int err;
6894
6895 bt_dev_dbg(hdev, "sock %p", sk);
6896
6897 if (!lmp_sc_capable(hdev) &&
6898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6900 MGMT_STATUS_NOT_SUPPORTED);
6901
6902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6903 lmp_sc_capable(hdev) &&
6904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_REJECTED);
6907
6908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_INVALID_PARAMS);
6911
6912 hci_dev_lock(hdev);
6913
6914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6916 bool changed;
6917
6918 if (cp->val) {
6919 changed = !hci_dev_test_and_set_flag(hdev,
6920 HCI_SC_ENABLED);
6921 if (cp->val == 0x02)
6922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6923 else
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6925 } else {
6926 changed = hci_dev_test_and_clear_flag(hdev,
6927 HCI_SC_ENABLED);
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929 }
6930
6931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6932 if (err < 0)
6933 goto failed;
6934
6935 if (changed)
6936 err = new_settings(hdev, sk);
6937
6938 goto failed;
6939 }
6940
6941 val = !!cp->val;
6942
6943 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6944 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6945 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6946 goto failed;
6947 }
6948
6949 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6950 if (!cmd)
6951 err = -ENOMEM;
6952 else
6953 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6954 set_secure_conn_complete);
6955
6956 if (err < 0) {
6957 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6958 MGMT_STATUS_FAILED);
6959 if (cmd)
6960 mgmt_pending_free(cmd);
6961 }
6962
6963 failed:
6964 hci_dev_unlock(hdev);
6965 return err;
6966 }
6967
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6968 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6970 {
6971 struct mgmt_mode *cp = data;
6972 bool changed, use_changed;
6973 int err;
6974
6975 bt_dev_dbg(hdev, "sock %p", sk);
6976
6977 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6979 MGMT_STATUS_INVALID_PARAMS);
6980
6981 hci_dev_lock(hdev);
6982
6983 if (cp->val)
6984 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6985 else
6986 changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_KEEP_DEBUG_KEYS);
6988
6989 if (cp->val == 0x02)
6990 use_changed = !hci_dev_test_and_set_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6992 else
6993 use_changed = hci_dev_test_and_clear_flag(hdev,
6994 HCI_USE_DEBUG_KEYS);
6995
6996 if (hdev_is_powered(hdev) && use_changed &&
6997 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6998 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7000 sizeof(mode), &mode);
7001 }
7002
7003 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7004 if (err < 0)
7005 goto unlock;
7006
7007 if (changed)
7008 err = new_settings(hdev, sk);
7009
7010 unlock:
7011 hci_dev_unlock(hdev);
7012 return err;
7013 }
7014
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7015 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7016 u16 len)
7017 {
7018 struct mgmt_cp_set_privacy *cp = cp_data;
7019 bool changed;
7020 int err;
7021
7022 bt_dev_dbg(hdev, "sock %p", sk);
7023
7024 if (!lmp_le_capable(hdev))
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_NOT_SUPPORTED);
7027
7028 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_INVALID_PARAMS);
7031
7032 if (hdev_is_powered(hdev))
7033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034 MGMT_STATUS_REJECTED);
7035
7036 hci_dev_lock(hdev);
7037
7038 /* If user space supports this command it is also expected to
7039 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7040 */
7041 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7042
7043 if (cp->privacy) {
7044 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7045 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7046 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7047 hci_adv_instances_set_rpa_expired(hdev, true);
7048 if (cp->privacy == 0x02)
7049 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7050 else
7051 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7052 } else {
7053 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7054 memset(hdev->irk, 0, sizeof(hdev->irk));
7055 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7056 hci_adv_instances_set_rpa_expired(hdev, false);
7057 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 }
7059
7060 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7061 if (err < 0)
7062 goto unlock;
7063
7064 if (changed)
7065 err = new_settings(hdev, sk);
7066
7067 unlock:
7068 hci_dev_unlock(hdev);
7069 return err;
7070 }
7071
irk_is_valid(struct mgmt_irk_info * irk)7072 static bool irk_is_valid(struct mgmt_irk_info *irk)
7073 {
7074 switch (irk->addr.type) {
7075 case BDADDR_LE_PUBLIC:
7076 return true;
7077
7078 case BDADDR_LE_RANDOM:
7079 /* Two most significant bits shall be set */
7080 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7081 return false;
7082 return true;
7083 }
7084
7085 return false;
7086 }
7087
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7088 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7089 u16 len)
7090 {
7091 struct mgmt_cp_load_irks *cp = cp_data;
7092 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7093 sizeof(struct mgmt_irk_info));
7094 u16 irk_count, expected_len;
7095 int i, err;
7096
7097 bt_dev_dbg(hdev, "sock %p", sk);
7098
7099 if (!lmp_le_capable(hdev))
7100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7101 MGMT_STATUS_NOT_SUPPORTED);
7102
7103 irk_count = __le16_to_cpu(cp->irk_count);
7104 if (irk_count > max_irk_count) {
7105 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7106 irk_count);
7107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7108 MGMT_STATUS_INVALID_PARAMS);
7109 }
7110
7111 expected_len = struct_size(cp, irks, irk_count);
7112 if (expected_len != len) {
7113 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7114 expected_len, len);
7115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7116 MGMT_STATUS_INVALID_PARAMS);
7117 }
7118
7119 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7120
7121 for (i = 0; i < irk_count; i++) {
7122 struct mgmt_irk_info *key = &cp->irks[i];
7123
7124 if (!irk_is_valid(key))
7125 return mgmt_cmd_status(sk, hdev->id,
7126 MGMT_OP_LOAD_IRKS,
7127 MGMT_STATUS_INVALID_PARAMS);
7128 }
7129
7130 hci_dev_lock(hdev);
7131
7132 hci_smp_irks_clear(hdev);
7133
7134 for (i = 0; i < irk_count; i++) {
7135 struct mgmt_irk_info *irk = &cp->irks[i];
7136
7137 if (hci_is_blocked_key(hdev,
7138 HCI_BLOCKED_KEY_TYPE_IRK,
7139 irk->val)) {
7140 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7141 &irk->addr.bdaddr);
7142 continue;
7143 }
7144
7145 hci_add_irk(hdev, &irk->addr.bdaddr,
7146 le_addr_type(irk->addr.type), irk->val,
7147 BDADDR_ANY);
7148 }
7149
7150 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7151
7152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7153
7154 hci_dev_unlock(hdev);
7155
7156 return err;
7157 }
7158
ltk_is_valid(struct mgmt_ltk_info * key)7159 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7160 {
7161 if (key->initiator != 0x00 && key->initiator != 0x01)
7162 return false;
7163
7164 switch (key->addr.type) {
7165 case BDADDR_LE_PUBLIC:
7166 return true;
7167
7168 case BDADDR_LE_RANDOM:
7169 /* Two most significant bits shall be set */
7170 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7171 return false;
7172 return true;
7173 }
7174
7175 return false;
7176 }
7177
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7178 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7179 void *cp_data, u16 len)
7180 {
7181 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7182 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7183 sizeof(struct mgmt_ltk_info));
7184 u16 key_count, expected_len;
7185 int i, err;
7186
7187 bt_dev_dbg(hdev, "sock %p", sk);
7188
7189 if (!lmp_le_capable(hdev))
7190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7191 MGMT_STATUS_NOT_SUPPORTED);
7192
7193 key_count = __le16_to_cpu(cp->key_count);
7194 if (key_count > max_key_count) {
7195 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7196 key_count);
7197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7198 MGMT_STATUS_INVALID_PARAMS);
7199 }
7200
7201 expected_len = struct_size(cp, keys, key_count);
7202 if (expected_len != len) {
7203 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7204 expected_len, len);
7205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7206 MGMT_STATUS_INVALID_PARAMS);
7207 }
7208
7209 bt_dev_dbg(hdev, "key_count %u", key_count);
7210
7211 hci_dev_lock(hdev);
7212
7213 hci_smp_ltks_clear(hdev);
7214
7215 for (i = 0; i < key_count; i++) {
7216 struct mgmt_ltk_info *key = &cp->keys[i];
7217 u8 type, authenticated;
7218
7219 if (hci_is_blocked_key(hdev,
7220 HCI_BLOCKED_KEY_TYPE_LTK,
7221 key->val)) {
7222 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7223 &key->addr.bdaddr);
7224 continue;
7225 }
7226
7227 if (!ltk_is_valid(key)) {
7228 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7229 &key->addr.bdaddr);
7230 continue;
7231 }
7232
7233 switch (key->type) {
7234 case MGMT_LTK_UNAUTHENTICATED:
7235 authenticated = 0x00;
7236 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7237 break;
7238 case MGMT_LTK_AUTHENTICATED:
7239 authenticated = 0x01;
7240 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7241 break;
7242 case MGMT_LTK_P256_UNAUTH:
7243 authenticated = 0x00;
7244 type = SMP_LTK_P256;
7245 break;
7246 case MGMT_LTK_P256_AUTH:
7247 authenticated = 0x01;
7248 type = SMP_LTK_P256;
7249 break;
7250 case MGMT_LTK_P256_DEBUG:
7251 authenticated = 0x00;
7252 type = SMP_LTK_P256_DEBUG;
7253 fallthrough;
7254 default:
7255 continue;
7256 }
7257
7258 hci_add_ltk(hdev, &key->addr.bdaddr,
7259 le_addr_type(key->addr.type), type, authenticated,
7260 key->val, key->enc_size, key->ediv, key->rand);
7261 }
7262
7263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7264 NULL, 0);
7265
7266 hci_dev_unlock(hdev);
7267
7268 return err;
7269 }
7270
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7271 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7272 {
7273 struct mgmt_pending_cmd *cmd = data;
7274 struct hci_conn *conn = cmd->user_data;
7275 struct mgmt_cp_get_conn_info *cp = cmd->param;
7276 struct mgmt_rp_get_conn_info rp;
7277 u8 status;
7278
7279 bt_dev_dbg(hdev, "err %d", err);
7280
7281 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7282
7283 status = mgmt_status(err);
7284 if (status == MGMT_STATUS_SUCCESS) {
7285 rp.rssi = conn->rssi;
7286 rp.tx_power = conn->tx_power;
7287 rp.max_tx_power = conn->max_tx_power;
7288 } else {
7289 rp.rssi = HCI_RSSI_INVALID;
7290 rp.tx_power = HCI_TX_POWER_INVALID;
7291 rp.max_tx_power = HCI_TX_POWER_INVALID;
7292 }
7293
7294 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7295 &rp, sizeof(rp));
7296
7297 mgmt_pending_free(cmd);
7298 }
7299
get_conn_info_sync(struct hci_dev * hdev,void * data)7300 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7301 {
7302 struct mgmt_pending_cmd *cmd = data;
7303 struct mgmt_cp_get_conn_info *cp = cmd->param;
7304 struct hci_conn *conn;
7305 int err;
7306 __le16 handle;
7307
7308 /* Make sure we are still connected */
7309 if (cp->addr.type == BDADDR_BREDR)
7310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7311 &cp->addr.bdaddr);
7312 else
7313 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7314
7315 if (!conn || conn->state != BT_CONNECTED)
7316 return MGMT_STATUS_NOT_CONNECTED;
7317
7318 cmd->user_data = conn;
7319 handle = cpu_to_le16(conn->handle);
7320
7321 /* Refresh RSSI each time */
7322 err = hci_read_rssi_sync(hdev, handle);
7323
7324 /* For LE links TX power does not change thus we don't need to
7325 * query for it once value is known.
7326 */
7327 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7328 conn->tx_power == HCI_TX_POWER_INVALID))
7329 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7330
7331 /* Max TX power needs to be read only once per connection */
7332 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7333 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7334
7335 return err;
7336 }
7337
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7338 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7339 u16 len)
7340 {
7341 struct mgmt_cp_get_conn_info *cp = data;
7342 struct mgmt_rp_get_conn_info rp;
7343 struct hci_conn *conn;
7344 unsigned long conn_info_age;
7345 int err = 0;
7346
7347 bt_dev_dbg(hdev, "sock %p", sk);
7348
7349 memset(&rp, 0, sizeof(rp));
7350 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7351 rp.addr.type = cp->addr.type;
7352
7353 if (!bdaddr_type_is_valid(cp->addr.type))
7354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7355 MGMT_STATUS_INVALID_PARAMS,
7356 &rp, sizeof(rp));
7357
7358 hci_dev_lock(hdev);
7359
7360 if (!hdev_is_powered(hdev)) {
7361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_NOT_POWERED, &rp,
7363 sizeof(rp));
7364 goto unlock;
7365 }
7366
7367 if (cp->addr.type == BDADDR_BREDR)
7368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7369 &cp->addr.bdaddr);
7370 else
7371 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7372
7373 if (!conn || conn->state != BT_CONNECTED) {
7374 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7375 MGMT_STATUS_NOT_CONNECTED, &rp,
7376 sizeof(rp));
7377 goto unlock;
7378 }
7379
7380 /* To avoid client trying to guess when to poll again for information we
7381 * calculate conn info age as random value between min/max set in hdev.
7382 */
7383 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7384 hdev->conn_info_max_age - 1);
7385
7386 /* Query controller to refresh cached values if they are too old or were
7387 * never read.
7388 */
7389 if (time_after(jiffies, conn->conn_info_timestamp +
7390 msecs_to_jiffies(conn_info_age)) ||
7391 !conn->conn_info_timestamp) {
7392 struct mgmt_pending_cmd *cmd;
7393
7394 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7395 len);
7396 if (!cmd) {
7397 err = -ENOMEM;
7398 } else {
7399 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7400 cmd, get_conn_info_complete);
7401 }
7402
7403 if (err < 0) {
7404 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7405 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7406
7407 if (cmd)
7408 mgmt_pending_free(cmd);
7409
7410 goto unlock;
7411 }
7412
7413 conn->conn_info_timestamp = jiffies;
7414 } else {
7415 /* Cache is valid, just reply with values cached in hci_conn */
7416 rp.rssi = conn->rssi;
7417 rp.tx_power = conn->tx_power;
7418 rp.max_tx_power = conn->max_tx_power;
7419
7420 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7421 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7422 }
7423
7424 unlock:
7425 hci_dev_unlock(hdev);
7426 return err;
7427 }
7428
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7429 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7430 {
7431 struct mgmt_pending_cmd *cmd = data;
7432 struct mgmt_cp_get_clock_info *cp = cmd->param;
7433 struct mgmt_rp_get_clock_info rp;
7434 struct hci_conn *conn = cmd->user_data;
7435 u8 status = mgmt_status(err);
7436
7437 bt_dev_dbg(hdev, "err %d", err);
7438
7439 memset(&rp, 0, sizeof(rp));
7440 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7441 rp.addr.type = cp->addr.type;
7442
7443 if (err)
7444 goto complete;
7445
7446 rp.local_clock = cpu_to_le32(hdev->clock);
7447
7448 if (conn) {
7449 rp.piconet_clock = cpu_to_le32(conn->clock);
7450 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7451 }
7452
7453 complete:
7454 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7455 sizeof(rp));
7456
7457 mgmt_pending_free(cmd);
7458 }
7459
get_clock_info_sync(struct hci_dev * hdev,void * data)7460 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7461 {
7462 struct mgmt_pending_cmd *cmd = data;
7463 struct mgmt_cp_get_clock_info *cp = cmd->param;
7464 struct hci_cp_read_clock hci_cp;
7465 struct hci_conn *conn;
7466
7467 memset(&hci_cp, 0, sizeof(hci_cp));
7468 hci_read_clock_sync(hdev, &hci_cp);
7469
7470 /* Make sure connection still exists */
7471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7472 if (!conn || conn->state != BT_CONNECTED)
7473 return MGMT_STATUS_NOT_CONNECTED;
7474
7475 cmd->user_data = conn;
7476 hci_cp.handle = cpu_to_le16(conn->handle);
7477 hci_cp.which = 0x01; /* Piconet clock */
7478
7479 return hci_read_clock_sync(hdev, &hci_cp);
7480 }
7481
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7482 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7483 u16 len)
7484 {
7485 struct mgmt_cp_get_clock_info *cp = data;
7486 struct mgmt_rp_get_clock_info rp;
7487 struct mgmt_pending_cmd *cmd;
7488 struct hci_conn *conn;
7489 int err;
7490
7491 bt_dev_dbg(hdev, "sock %p", sk);
7492
7493 memset(&rp, 0, sizeof(rp));
7494 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7495 rp.addr.type = cp->addr.type;
7496
7497 if (cp->addr.type != BDADDR_BREDR)
7498 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7499 MGMT_STATUS_INVALID_PARAMS,
7500 &rp, sizeof(rp));
7501
7502 hci_dev_lock(hdev);
7503
7504 if (!hdev_is_powered(hdev)) {
7505 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 MGMT_STATUS_NOT_POWERED, &rp,
7507 sizeof(rp));
7508 goto unlock;
7509 }
7510
7511 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7513 &cp->addr.bdaddr);
7514 if (!conn || conn->state != BT_CONNECTED) {
7515 err = mgmt_cmd_complete(sk, hdev->id,
7516 MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_NOT_CONNECTED,
7518 &rp, sizeof(rp));
7519 goto unlock;
7520 }
7521 } else {
7522 conn = NULL;
7523 }
7524
7525 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7526 if (!cmd)
7527 err = -ENOMEM;
7528 else
7529 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7530 get_clock_info_complete);
7531
7532 if (err < 0) {
7533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7534 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7535
7536 if (cmd)
7537 mgmt_pending_free(cmd);
7538 }
7539
7540
7541 unlock:
7542 hci_dev_unlock(hdev);
7543 return err;
7544 }
7545
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7546 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7547 {
7548 struct hci_conn *conn;
7549
7550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7551 if (!conn)
7552 return false;
7553
7554 if (conn->dst_type != type)
7555 return false;
7556
7557 if (conn->state != BT_CONNECTED)
7558 return false;
7559
7560 return true;
7561 }
7562
7563 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7564 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7565 u8 addr_type, u8 auto_connect)
7566 {
7567 struct hci_conn_params *params;
7568
7569 params = hci_conn_params_add(hdev, addr, addr_type);
7570 if (!params)
7571 return -EIO;
7572
7573 if (params->auto_connect == auto_connect)
7574 return 0;
7575
7576 hci_pend_le_list_del_init(params);
7577
7578 switch (auto_connect) {
7579 case HCI_AUTO_CONN_DISABLED:
7580 case HCI_AUTO_CONN_LINK_LOSS:
7581 /* If auto connect is being disabled when we're trying to
7582 * connect to device, keep connecting.
7583 */
7584 if (params->explicit_connect)
7585 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7586 break;
7587 case HCI_AUTO_CONN_REPORT:
7588 if (params->explicit_connect)
7589 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7590 else
7591 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7592 break;
7593 case HCI_AUTO_CONN_DIRECT:
7594 case HCI_AUTO_CONN_ALWAYS:
7595 if (!is_connected(hdev, addr, addr_type))
7596 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7597 break;
7598 }
7599
7600 params->auto_connect = auto_connect;
7601
7602 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7603 addr, addr_type, auto_connect);
7604
7605 return 0;
7606 }
7607
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7608 static void device_added(struct sock *sk, struct hci_dev *hdev,
7609 bdaddr_t *bdaddr, u8 type, u8 action)
7610 {
7611 struct mgmt_ev_device_added ev;
7612
7613 bacpy(&ev.addr.bdaddr, bdaddr);
7614 ev.addr.type = type;
7615 ev.action = action;
7616
7617 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7618 }
7619
add_device_complete(struct hci_dev * hdev,void * data,int err)7620 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7621 {
7622 struct mgmt_pending_cmd *cmd = data;
7623 struct mgmt_cp_add_device *cp = cmd->param;
7624
7625 if (!err) {
7626 struct hci_conn_params *params;
7627
7628 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7629 le_addr_type(cp->addr.type));
7630
7631 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7632 cp->action);
7633 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7634 cp->addr.type, hdev->conn_flags,
7635 params ? params->flags : 0);
7636 }
7637
7638 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7640 mgmt_pending_free(cmd);
7641 }
7642
add_device_sync(struct hci_dev * hdev,void * data)7643 static int add_device_sync(struct hci_dev *hdev, void *data)
7644 {
7645 return hci_update_passive_scan_sync(hdev);
7646 }
7647
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7648 static int add_device(struct sock *sk, struct hci_dev *hdev,
7649 void *data, u16 len)
7650 {
7651 struct mgmt_pending_cmd *cmd;
7652 struct mgmt_cp_add_device *cp = data;
7653 u8 auto_conn, addr_type;
7654 struct hci_conn_params *params;
7655 int err;
7656 u32 current_flags = 0;
7657 u32 supported_flags;
7658
7659 bt_dev_dbg(hdev, "sock %p", sk);
7660
7661 if (!bdaddr_type_is_valid(cp->addr.type) ||
7662 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7664 MGMT_STATUS_INVALID_PARAMS,
7665 &cp->addr, sizeof(cp->addr));
7666
7667 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7668 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7669 MGMT_STATUS_INVALID_PARAMS,
7670 &cp->addr, sizeof(cp->addr));
7671
7672 hci_dev_lock(hdev);
7673
7674 if (cp->addr.type == BDADDR_BREDR) {
7675 /* Only incoming connections action is supported for now */
7676 if (cp->action != 0x01) {
7677 err = mgmt_cmd_complete(sk, hdev->id,
7678 MGMT_OP_ADD_DEVICE,
7679 MGMT_STATUS_INVALID_PARAMS,
7680 &cp->addr, sizeof(cp->addr));
7681 goto unlock;
7682 }
7683
7684 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7685 &cp->addr.bdaddr,
7686 cp->addr.type, 0);
7687 if (err)
7688 goto unlock;
7689
7690 hci_update_scan(hdev);
7691
7692 goto added;
7693 }
7694
7695 addr_type = le_addr_type(cp->addr.type);
7696
7697 if (cp->action == 0x02)
7698 auto_conn = HCI_AUTO_CONN_ALWAYS;
7699 else if (cp->action == 0x01)
7700 auto_conn = HCI_AUTO_CONN_DIRECT;
7701 else
7702 auto_conn = HCI_AUTO_CONN_REPORT;
7703
7704 /* Kernel internally uses conn_params with resolvable private
7705 * address, but Add Device allows only identity addresses.
7706 * Make sure it is enforced before calling
7707 * hci_conn_params_lookup.
7708 */
7709 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7710 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7711 MGMT_STATUS_INVALID_PARAMS,
7712 &cp->addr, sizeof(cp->addr));
7713 goto unlock;
7714 }
7715
7716 /* If the connection parameters don't exist for this device,
7717 * they will be created and configured with defaults.
7718 */
7719 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7720 auto_conn) < 0) {
7721 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7722 MGMT_STATUS_FAILED, &cp->addr,
7723 sizeof(cp->addr));
7724 goto unlock;
7725 } else {
7726 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7727 addr_type);
7728 if (params)
7729 current_flags = params->flags;
7730 }
7731
7732 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7733 if (!cmd) {
7734 err = -ENOMEM;
7735 goto unlock;
7736 }
7737
7738 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7739 add_device_complete);
7740 if (err < 0) {
7741 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7742 MGMT_STATUS_FAILED, &cp->addr,
7743 sizeof(cp->addr));
7744 mgmt_pending_free(cmd);
7745 }
7746
7747 goto unlock;
7748
7749 added:
7750 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7751 supported_flags = hdev->conn_flags;
7752 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7753 supported_flags, current_flags);
7754
7755 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7756 MGMT_STATUS_SUCCESS, &cp->addr,
7757 sizeof(cp->addr));
7758
7759 unlock:
7760 hci_dev_unlock(hdev);
7761 return err;
7762 }
7763
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7764 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7765 bdaddr_t *bdaddr, u8 type)
7766 {
7767 struct mgmt_ev_device_removed ev;
7768
7769 bacpy(&ev.addr.bdaddr, bdaddr);
7770 ev.addr.type = type;
7771
7772 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7773 }
7774
remove_device_sync(struct hci_dev * hdev,void * data)7775 static int remove_device_sync(struct hci_dev *hdev, void *data)
7776 {
7777 return hci_update_passive_scan_sync(hdev);
7778 }
7779
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7780 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7781 void *data, u16 len)
7782 {
7783 struct mgmt_cp_remove_device *cp = data;
7784 int err;
7785
7786 bt_dev_dbg(hdev, "sock %p", sk);
7787
7788 hci_dev_lock(hdev);
7789
7790 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7791 struct hci_conn_params *params;
7792 u8 addr_type;
7793
7794 if (!bdaddr_type_is_valid(cp->addr.type)) {
7795 err = mgmt_cmd_complete(sk, hdev->id,
7796 MGMT_OP_REMOVE_DEVICE,
7797 MGMT_STATUS_INVALID_PARAMS,
7798 &cp->addr, sizeof(cp->addr));
7799 goto unlock;
7800 }
7801
7802 if (cp->addr.type == BDADDR_BREDR) {
7803 err = hci_bdaddr_list_del(&hdev->accept_list,
7804 &cp->addr.bdaddr,
7805 cp->addr.type);
7806 if (err) {
7807 err = mgmt_cmd_complete(sk, hdev->id,
7808 MGMT_OP_REMOVE_DEVICE,
7809 MGMT_STATUS_INVALID_PARAMS,
7810 &cp->addr,
7811 sizeof(cp->addr));
7812 goto unlock;
7813 }
7814
7815 hci_update_scan(hdev);
7816
7817 device_removed(sk, hdev, &cp->addr.bdaddr,
7818 cp->addr.type);
7819 goto complete;
7820 }
7821
7822 addr_type = le_addr_type(cp->addr.type);
7823
7824 /* Kernel internally uses conn_params with resolvable private
7825 * address, but Remove Device allows only identity addresses.
7826 * Make sure it is enforced before calling
7827 * hci_conn_params_lookup.
7828 */
7829 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7830 err = mgmt_cmd_complete(sk, hdev->id,
7831 MGMT_OP_REMOVE_DEVICE,
7832 MGMT_STATUS_INVALID_PARAMS,
7833 &cp->addr, sizeof(cp->addr));
7834 goto unlock;
7835 }
7836
7837 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7838 addr_type);
7839 if (!params) {
7840 err = mgmt_cmd_complete(sk, hdev->id,
7841 MGMT_OP_REMOVE_DEVICE,
7842 MGMT_STATUS_INVALID_PARAMS,
7843 &cp->addr, sizeof(cp->addr));
7844 goto unlock;
7845 }
7846
7847 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7848 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7849 err = mgmt_cmd_complete(sk, hdev->id,
7850 MGMT_OP_REMOVE_DEVICE,
7851 MGMT_STATUS_INVALID_PARAMS,
7852 &cp->addr, sizeof(cp->addr));
7853 goto unlock;
7854 }
7855
7856 hci_conn_params_free(params);
7857
7858 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7859 } else {
7860 struct hci_conn_params *p, *tmp;
7861 struct bdaddr_list *b, *btmp;
7862
7863 if (cp->addr.type) {
7864 err = mgmt_cmd_complete(sk, hdev->id,
7865 MGMT_OP_REMOVE_DEVICE,
7866 MGMT_STATUS_INVALID_PARAMS,
7867 &cp->addr, sizeof(cp->addr));
7868 goto unlock;
7869 }
7870
7871 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7872 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7873 list_del(&b->list);
7874 kfree(b);
7875 }
7876
7877 hci_update_scan(hdev);
7878
7879 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7880 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7881 continue;
7882 device_removed(sk, hdev, &p->addr, p->addr_type);
7883 if (p->explicit_connect) {
7884 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7885 continue;
7886 }
7887 hci_conn_params_free(p);
7888 }
7889
7890 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7891 }
7892
7893 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7894
7895 complete:
7896 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7897 MGMT_STATUS_SUCCESS, &cp->addr,
7898 sizeof(cp->addr));
7899 unlock:
7900 hci_dev_unlock(hdev);
7901 return err;
7902 }
7903
conn_update_sync(struct hci_dev * hdev,void * data)7904 static int conn_update_sync(struct hci_dev *hdev, void *data)
7905 {
7906 struct hci_conn_params *params = data;
7907 struct hci_conn *conn;
7908
7909 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7910 if (!conn)
7911 return -ECANCELED;
7912
7913 return hci_le_conn_update_sync(hdev, conn, params);
7914 }
7915
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7916 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7917 u16 len)
7918 {
7919 struct mgmt_cp_load_conn_param *cp = data;
7920 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7921 sizeof(struct mgmt_conn_param));
7922 u16 param_count, expected_len;
7923 int i;
7924
7925 if (!lmp_le_capable(hdev))
7926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7927 MGMT_STATUS_NOT_SUPPORTED);
7928
7929 param_count = __le16_to_cpu(cp->param_count);
7930 if (param_count > max_param_count) {
7931 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7932 param_count);
7933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7934 MGMT_STATUS_INVALID_PARAMS);
7935 }
7936
7937 expected_len = struct_size(cp, params, param_count);
7938 if (expected_len != len) {
7939 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7940 expected_len, len);
7941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7942 MGMT_STATUS_INVALID_PARAMS);
7943 }
7944
7945 bt_dev_dbg(hdev, "param_count %u", param_count);
7946
7947 hci_dev_lock(hdev);
7948
7949 if (param_count > 1)
7950 hci_conn_params_clear_disabled(hdev);
7951
7952 for (i = 0; i < param_count; i++) {
7953 struct mgmt_conn_param *param = &cp->params[i];
7954 struct hci_conn_params *hci_param;
7955 u16 min, max, latency, timeout;
7956 bool update = false;
7957 u8 addr_type;
7958
7959 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7960 param->addr.type);
7961
7962 if (param->addr.type == BDADDR_LE_PUBLIC) {
7963 addr_type = ADDR_LE_DEV_PUBLIC;
7964 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7965 addr_type = ADDR_LE_DEV_RANDOM;
7966 } else {
7967 bt_dev_err(hdev, "ignoring invalid connection parameters");
7968 continue;
7969 }
7970
7971 min = le16_to_cpu(param->min_interval);
7972 max = le16_to_cpu(param->max_interval);
7973 latency = le16_to_cpu(param->latency);
7974 timeout = le16_to_cpu(param->timeout);
7975
7976 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7977 min, max, latency, timeout);
7978
7979 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7980 bt_dev_err(hdev, "ignoring invalid connection parameters");
7981 continue;
7982 }
7983
7984 /* Detect when the loading is for an existing parameter then
7985 * attempt to trigger the connection update procedure.
7986 */
7987 if (!i && param_count == 1) {
7988 hci_param = hci_conn_params_lookup(hdev,
7989 ¶m->addr.bdaddr,
7990 addr_type);
7991 if (hci_param)
7992 update = true;
7993 else
7994 hci_conn_params_clear_disabled(hdev);
7995 }
7996
7997 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7998 addr_type);
7999 if (!hci_param) {
8000 bt_dev_err(hdev, "failed to add connection parameters");
8001 continue;
8002 }
8003
8004 hci_param->conn_min_interval = min;
8005 hci_param->conn_max_interval = max;
8006 hci_param->conn_latency = latency;
8007 hci_param->supervision_timeout = timeout;
8008
8009 /* Check if we need to trigger a connection update */
8010 if (update) {
8011 struct hci_conn *conn;
8012
8013 /* Lookup for existing connection as central and check
8014 * if parameters match and if they don't then trigger
8015 * a connection update.
8016 */
8017 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8018 addr_type);
8019 if (conn && conn->role == HCI_ROLE_MASTER &&
8020 (conn->le_conn_min_interval != min ||
8021 conn->le_conn_max_interval != max ||
8022 conn->le_conn_latency != latency ||
8023 conn->le_supv_timeout != timeout))
8024 hci_cmd_sync_queue(hdev, conn_update_sync,
8025 hci_param, NULL);
8026 }
8027 }
8028
8029 hci_dev_unlock(hdev);
8030
8031 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8032 NULL, 0);
8033 }
8034
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8035 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8036 void *data, u16 len)
8037 {
8038 struct mgmt_cp_set_external_config *cp = data;
8039 bool changed;
8040 int err;
8041
8042 bt_dev_dbg(hdev, "sock %p", sk);
8043
8044 if (hdev_is_powered(hdev))
8045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8046 MGMT_STATUS_REJECTED);
8047
8048 if (cp->config != 0x00 && cp->config != 0x01)
8049 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8050 MGMT_STATUS_INVALID_PARAMS);
8051
8052 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8053 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8054 MGMT_STATUS_NOT_SUPPORTED);
8055
8056 hci_dev_lock(hdev);
8057
8058 if (cp->config)
8059 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8060 else
8061 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8062
8063 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8064 if (err < 0)
8065 goto unlock;
8066
8067 if (!changed)
8068 goto unlock;
8069
8070 err = new_options(hdev, sk);
8071
8072 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8073 mgmt_index_removed(hdev);
8074
8075 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8076 hci_dev_set_flag(hdev, HCI_CONFIG);
8077 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8078
8079 queue_work(hdev->req_workqueue, &hdev->power_on);
8080 } else {
8081 set_bit(HCI_RAW, &hdev->flags);
8082 mgmt_index_added(hdev);
8083 }
8084 }
8085
8086 unlock:
8087 hci_dev_unlock(hdev);
8088 return err;
8089 }
8090
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8091 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8092 void *data, u16 len)
8093 {
8094 struct mgmt_cp_set_public_address *cp = data;
8095 bool changed;
8096 int err;
8097
8098 bt_dev_dbg(hdev, "sock %p", sk);
8099
8100 if (hdev_is_powered(hdev))
8101 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8102 MGMT_STATUS_REJECTED);
8103
8104 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8106 MGMT_STATUS_INVALID_PARAMS);
8107
8108 if (!hdev->set_bdaddr)
8109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8110 MGMT_STATUS_NOT_SUPPORTED);
8111
8112 hci_dev_lock(hdev);
8113
8114 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8115 bacpy(&hdev->public_addr, &cp->bdaddr);
8116
8117 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8118 if (err < 0)
8119 goto unlock;
8120
8121 if (!changed)
8122 goto unlock;
8123
8124 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8125 err = new_options(hdev, sk);
8126
8127 if (is_configured(hdev)) {
8128 mgmt_index_removed(hdev);
8129
8130 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8131
8132 hci_dev_set_flag(hdev, HCI_CONFIG);
8133 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8134
8135 queue_work(hdev->req_workqueue, &hdev->power_on);
8136 }
8137
8138 unlock:
8139 hci_dev_unlock(hdev);
8140 return err;
8141 }
8142
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8143 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8144 int err)
8145 {
8146 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8147 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8148 u8 *h192, *r192, *h256, *r256;
8149 struct mgmt_pending_cmd *cmd = data;
8150 struct sk_buff *skb = cmd->skb;
8151 u8 status = mgmt_status(err);
8152 u16 eir_len;
8153
8154 if (err == -ECANCELED ||
8155 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8156 return;
8157
8158 if (!status) {
8159 if (!skb)
8160 status = MGMT_STATUS_FAILED;
8161 else if (IS_ERR(skb))
8162 status = mgmt_status(PTR_ERR(skb));
8163 else
8164 status = mgmt_status(skb->data[0]);
8165 }
8166
8167 bt_dev_dbg(hdev, "status %u", status);
8168
8169 mgmt_cp = cmd->param;
8170
8171 if (status) {
8172 status = mgmt_status(status);
8173 eir_len = 0;
8174
8175 h192 = NULL;
8176 r192 = NULL;
8177 h256 = NULL;
8178 r256 = NULL;
8179 } else if (!bredr_sc_enabled(hdev)) {
8180 struct hci_rp_read_local_oob_data *rp;
8181
8182 if (skb->len != sizeof(*rp)) {
8183 status = MGMT_STATUS_FAILED;
8184 eir_len = 0;
8185 } else {
8186 status = MGMT_STATUS_SUCCESS;
8187 rp = (void *)skb->data;
8188
8189 eir_len = 5 + 18 + 18;
8190 h192 = rp->hash;
8191 r192 = rp->rand;
8192 h256 = NULL;
8193 r256 = NULL;
8194 }
8195 } else {
8196 struct hci_rp_read_local_oob_ext_data *rp;
8197
8198 if (skb->len != sizeof(*rp)) {
8199 status = MGMT_STATUS_FAILED;
8200 eir_len = 0;
8201 } else {
8202 status = MGMT_STATUS_SUCCESS;
8203 rp = (void *)skb->data;
8204
8205 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8206 eir_len = 5 + 18 + 18;
8207 h192 = NULL;
8208 r192 = NULL;
8209 } else {
8210 eir_len = 5 + 18 + 18 + 18 + 18;
8211 h192 = rp->hash192;
8212 r192 = rp->rand192;
8213 }
8214
8215 h256 = rp->hash256;
8216 r256 = rp->rand256;
8217 }
8218 }
8219
8220 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8221 if (!mgmt_rp)
8222 goto done;
8223
8224 if (eir_len == 0)
8225 goto send_rsp;
8226
8227 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8228 hdev->dev_class, 3);
8229
8230 if (h192 && r192) {
8231 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8232 EIR_SSP_HASH_C192, h192, 16);
8233 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8234 EIR_SSP_RAND_R192, r192, 16);
8235 }
8236
8237 if (h256 && r256) {
8238 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8239 EIR_SSP_HASH_C256, h256, 16);
8240 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8241 EIR_SSP_RAND_R256, r256, 16);
8242 }
8243
8244 send_rsp:
8245 mgmt_rp->type = mgmt_cp->type;
8246 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8247
8248 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8249 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8250 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8251 if (err < 0 || status)
8252 goto done;
8253
8254 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8255
8256 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8257 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8258 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8259 done:
8260 if (skb && !IS_ERR(skb))
8261 kfree_skb(skb);
8262
8263 kfree(mgmt_rp);
8264 mgmt_pending_remove(cmd);
8265 }
8266
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8267 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8268 struct mgmt_cp_read_local_oob_ext_data *cp)
8269 {
8270 struct mgmt_pending_cmd *cmd;
8271 int err;
8272
8273 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8274 cp, sizeof(*cp));
8275 if (!cmd)
8276 return -ENOMEM;
8277
8278 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8279 read_local_oob_ext_data_complete);
8280
8281 if (err < 0) {
8282 mgmt_pending_remove(cmd);
8283 return err;
8284 }
8285
8286 return 0;
8287 }
8288
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8289 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8290 void *data, u16 data_len)
8291 {
8292 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8293 struct mgmt_rp_read_local_oob_ext_data *rp;
8294 size_t rp_len;
8295 u16 eir_len;
8296 u8 status, flags, role, addr[7], hash[16], rand[16];
8297 int err;
8298
8299 bt_dev_dbg(hdev, "sock %p", sk);
8300
8301 if (hdev_is_powered(hdev)) {
8302 switch (cp->type) {
8303 case BIT(BDADDR_BREDR):
8304 status = mgmt_bredr_support(hdev);
8305 if (status)
8306 eir_len = 0;
8307 else
8308 eir_len = 5;
8309 break;
8310 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8311 status = mgmt_le_support(hdev);
8312 if (status)
8313 eir_len = 0;
8314 else
8315 eir_len = 9 + 3 + 18 + 18 + 3;
8316 break;
8317 default:
8318 status = MGMT_STATUS_INVALID_PARAMS;
8319 eir_len = 0;
8320 break;
8321 }
8322 } else {
8323 status = MGMT_STATUS_NOT_POWERED;
8324 eir_len = 0;
8325 }
8326
8327 rp_len = sizeof(*rp) + eir_len;
8328 rp = kmalloc(rp_len, GFP_ATOMIC);
8329 if (!rp)
8330 return -ENOMEM;
8331
8332 if (!status && !lmp_ssp_capable(hdev)) {
8333 status = MGMT_STATUS_NOT_SUPPORTED;
8334 eir_len = 0;
8335 }
8336
8337 if (status)
8338 goto complete;
8339
8340 hci_dev_lock(hdev);
8341
8342 eir_len = 0;
8343 switch (cp->type) {
8344 case BIT(BDADDR_BREDR):
8345 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8346 err = read_local_ssp_oob_req(hdev, sk, cp);
8347 hci_dev_unlock(hdev);
8348 if (!err)
8349 goto done;
8350
8351 status = MGMT_STATUS_FAILED;
8352 goto complete;
8353 } else {
8354 eir_len = eir_append_data(rp->eir, eir_len,
8355 EIR_CLASS_OF_DEV,
8356 hdev->dev_class, 3);
8357 }
8358 break;
8359 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8360 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8361 smp_generate_oob(hdev, hash, rand) < 0) {
8362 hci_dev_unlock(hdev);
8363 status = MGMT_STATUS_FAILED;
8364 goto complete;
8365 }
8366
8367 /* This should return the active RPA, but since the RPA
8368 * is only programmed on demand, it is really hard to fill
8369 * this in at the moment. For now disallow retrieving
8370 * local out-of-band data when privacy is in use.
8371 *
8372 * Returning the identity address will not help here since
8373 * pairing happens before the identity resolving key is
8374 * known and thus the connection establishment happens
8375 * based on the RPA and not the identity address.
8376 */
8377 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8378 hci_dev_unlock(hdev);
8379 status = MGMT_STATUS_REJECTED;
8380 goto complete;
8381 }
8382
8383 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8384 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8385 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8386 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8387 memcpy(addr, &hdev->static_addr, 6);
8388 addr[6] = 0x01;
8389 } else {
8390 memcpy(addr, &hdev->bdaddr, 6);
8391 addr[6] = 0x00;
8392 }
8393
8394 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8395 addr, sizeof(addr));
8396
8397 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8398 role = 0x02;
8399 else
8400 role = 0x01;
8401
8402 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8403 &role, sizeof(role));
8404
8405 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8406 eir_len = eir_append_data(rp->eir, eir_len,
8407 EIR_LE_SC_CONFIRM,
8408 hash, sizeof(hash));
8409
8410 eir_len = eir_append_data(rp->eir, eir_len,
8411 EIR_LE_SC_RANDOM,
8412 rand, sizeof(rand));
8413 }
8414
8415 flags = mgmt_get_adv_discov_flags(hdev);
8416
8417 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8418 flags |= LE_AD_NO_BREDR;
8419
8420 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8421 &flags, sizeof(flags));
8422 break;
8423 }
8424
8425 hci_dev_unlock(hdev);
8426
8427 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8428
8429 status = MGMT_STATUS_SUCCESS;
8430
8431 complete:
8432 rp->type = cp->type;
8433 rp->eir_len = cpu_to_le16(eir_len);
8434
8435 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8436 status, rp, sizeof(*rp) + eir_len);
8437 if (err < 0 || status)
8438 goto done;
8439
8440 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8441 rp, sizeof(*rp) + eir_len,
8442 HCI_MGMT_OOB_DATA_EVENTS, sk);
8443
8444 done:
8445 kfree(rp);
8446
8447 return err;
8448 }
8449
get_supported_adv_flags(struct hci_dev * hdev)8450 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8451 {
8452 u32 flags = 0;
8453
8454 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8455 flags |= MGMT_ADV_FLAG_DISCOV;
8456 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8457 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8458 flags |= MGMT_ADV_FLAG_APPEARANCE;
8459 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8460 flags |= MGMT_ADV_PARAM_DURATION;
8461 flags |= MGMT_ADV_PARAM_TIMEOUT;
8462 flags |= MGMT_ADV_PARAM_INTERVALS;
8463 flags |= MGMT_ADV_PARAM_TX_POWER;
8464 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8465
8466 /* In extended adv TX_POWER returned from Set Adv Param
8467 * will be always valid.
8468 */
8469 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8470 flags |= MGMT_ADV_FLAG_TX_POWER;
8471
8472 if (ext_adv_capable(hdev)) {
8473 flags |= MGMT_ADV_FLAG_SEC_1M;
8474 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8475 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8476
8477 if (le_2m_capable(hdev))
8478 flags |= MGMT_ADV_FLAG_SEC_2M;
8479
8480 if (le_coded_capable(hdev))
8481 flags |= MGMT_ADV_FLAG_SEC_CODED;
8482 }
8483
8484 return flags;
8485 }
8486
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8487 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8488 void *data, u16 data_len)
8489 {
8490 struct mgmt_rp_read_adv_features *rp;
8491 size_t rp_len;
8492 int err;
8493 struct adv_info *adv_instance;
8494 u32 supported_flags;
8495 u8 *instance;
8496
8497 bt_dev_dbg(hdev, "sock %p", sk);
8498
8499 if (!lmp_le_capable(hdev))
8500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8501 MGMT_STATUS_REJECTED);
8502
8503 hci_dev_lock(hdev);
8504
8505 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8506 rp = kmalloc(rp_len, GFP_ATOMIC);
8507 if (!rp) {
8508 hci_dev_unlock(hdev);
8509 return -ENOMEM;
8510 }
8511
8512 supported_flags = get_supported_adv_flags(hdev);
8513
8514 rp->supported_flags = cpu_to_le32(supported_flags);
8515 rp->max_adv_data_len = max_adv_len(hdev);
8516 rp->max_scan_rsp_len = max_adv_len(hdev);
8517 rp->max_instances = hdev->le_num_of_adv_sets;
8518 rp->num_instances = hdev->adv_instance_cnt;
8519
8520 instance = rp->instance;
8521 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8522 /* Only instances 1-le_num_of_adv_sets are externally visible */
8523 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8524 *instance = adv_instance->instance;
8525 instance++;
8526 } else {
8527 rp->num_instances--;
8528 rp_len--;
8529 }
8530 }
8531
8532 hci_dev_unlock(hdev);
8533
8534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8535 MGMT_STATUS_SUCCESS, rp, rp_len);
8536
8537 kfree(rp);
8538
8539 return err;
8540 }
8541
calculate_name_len(struct hci_dev * hdev)8542 static u8 calculate_name_len(struct hci_dev *hdev)
8543 {
8544 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8545
8546 return eir_append_local_name(hdev, buf, 0);
8547 }
8548
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8549 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8550 bool is_adv_data)
8551 {
8552 u8 max_len = max_adv_len(hdev);
8553
8554 if (is_adv_data) {
8555 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8556 MGMT_ADV_FLAG_LIMITED_DISCOV |
8557 MGMT_ADV_FLAG_MANAGED_FLAGS))
8558 max_len -= 3;
8559
8560 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8561 max_len -= 3;
8562 } else {
8563 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8564 max_len -= calculate_name_len(hdev);
8565
8566 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8567 max_len -= 4;
8568 }
8569
8570 return max_len;
8571 }
8572
flags_managed(u32 adv_flags)8573 static bool flags_managed(u32 adv_flags)
8574 {
8575 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8576 MGMT_ADV_FLAG_LIMITED_DISCOV |
8577 MGMT_ADV_FLAG_MANAGED_FLAGS);
8578 }
8579
tx_power_managed(u32 adv_flags)8580 static bool tx_power_managed(u32 adv_flags)
8581 {
8582 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8583 }
8584
name_managed(u32 adv_flags)8585 static bool name_managed(u32 adv_flags)
8586 {
8587 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8588 }
8589
appearance_managed(u32 adv_flags)8590 static bool appearance_managed(u32 adv_flags)
8591 {
8592 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8593 }
8594
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8595 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8596 u8 len, bool is_adv_data)
8597 {
8598 int i, cur_len;
8599 u8 max_len;
8600
8601 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8602
8603 if (len > max_len)
8604 return false;
8605
8606 /* Make sure that the data is correctly formatted. */
8607 for (i = 0; i < len; i += (cur_len + 1)) {
8608 cur_len = data[i];
8609
8610 if (!cur_len)
8611 continue;
8612
8613 if (data[i + 1] == EIR_FLAGS &&
8614 (!is_adv_data || flags_managed(adv_flags)))
8615 return false;
8616
8617 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8618 return false;
8619
8620 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8621 return false;
8622
8623 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8624 return false;
8625
8626 if (data[i + 1] == EIR_APPEARANCE &&
8627 appearance_managed(adv_flags))
8628 return false;
8629
8630 /* If the current field length would exceed the total data
8631 * length, then it's invalid.
8632 */
8633 if (i + cur_len >= len)
8634 return false;
8635 }
8636
8637 return true;
8638 }
8639
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8640 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8641 {
8642 u32 supported_flags, phy_flags;
8643
8644 /* The current implementation only supports a subset of the specified
8645 * flags. Also need to check mutual exclusiveness of sec flags.
8646 */
8647 supported_flags = get_supported_adv_flags(hdev);
8648 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8649 if (adv_flags & ~supported_flags ||
8650 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8651 return false;
8652
8653 return true;
8654 }
8655
adv_busy(struct hci_dev * hdev)8656 static bool adv_busy(struct hci_dev *hdev)
8657 {
8658 return pending_find(MGMT_OP_SET_LE, hdev);
8659 }
8660
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8661 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8662 int err)
8663 {
8664 struct adv_info *adv, *n;
8665
8666 bt_dev_dbg(hdev, "err %d", err);
8667
8668 hci_dev_lock(hdev);
8669
8670 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8671 u8 instance;
8672
8673 if (!adv->pending)
8674 continue;
8675
8676 if (!err) {
8677 adv->pending = false;
8678 continue;
8679 }
8680
8681 instance = adv->instance;
8682
8683 if (hdev->cur_adv_instance == instance)
8684 cancel_adv_timeout(hdev);
8685
8686 hci_remove_adv_instance(hdev, instance);
8687 mgmt_advertising_removed(sk, hdev, instance);
8688 }
8689
8690 hci_dev_unlock(hdev);
8691 }
8692
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8693 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8694 {
8695 struct mgmt_pending_cmd *cmd = data;
8696 struct mgmt_cp_add_advertising *cp = cmd->param;
8697 struct mgmt_rp_add_advertising rp;
8698
8699 memset(&rp, 0, sizeof(rp));
8700
8701 rp.instance = cp->instance;
8702
8703 if (err)
8704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8705 mgmt_status(err));
8706 else
8707 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8708 mgmt_status(err), &rp, sizeof(rp));
8709
8710 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8711
8712 mgmt_pending_free(cmd);
8713 }
8714
add_advertising_sync(struct hci_dev * hdev,void * data)8715 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8716 {
8717 struct mgmt_pending_cmd *cmd = data;
8718 struct mgmt_cp_add_advertising *cp = cmd->param;
8719
8720 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8721 }
8722
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8723 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8724 void *data, u16 data_len)
8725 {
8726 struct mgmt_cp_add_advertising *cp = data;
8727 struct mgmt_rp_add_advertising rp;
8728 u32 flags;
8729 u8 status;
8730 u16 timeout, duration;
8731 unsigned int prev_instance_cnt;
8732 u8 schedule_instance = 0;
8733 struct adv_info *adv, *next_instance;
8734 int err;
8735 struct mgmt_pending_cmd *cmd;
8736
8737 bt_dev_dbg(hdev, "sock %p", sk);
8738
8739 status = mgmt_le_support(hdev);
8740 if (status)
8741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8742 status);
8743
8744 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8746 MGMT_STATUS_INVALID_PARAMS);
8747
8748 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8750 MGMT_STATUS_INVALID_PARAMS);
8751
8752 flags = __le32_to_cpu(cp->flags);
8753 timeout = __le16_to_cpu(cp->timeout);
8754 duration = __le16_to_cpu(cp->duration);
8755
8756 if (!requested_adv_flags_are_valid(hdev, flags))
8757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8758 MGMT_STATUS_INVALID_PARAMS);
8759
8760 hci_dev_lock(hdev);
8761
8762 if (timeout && !hdev_is_powered(hdev)) {
8763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8764 MGMT_STATUS_REJECTED);
8765 goto unlock;
8766 }
8767
8768 if (adv_busy(hdev)) {
8769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8770 MGMT_STATUS_BUSY);
8771 goto unlock;
8772 }
8773
8774 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8775 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8776 cp->scan_rsp_len, false)) {
8777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8778 MGMT_STATUS_INVALID_PARAMS);
8779 goto unlock;
8780 }
8781
8782 prev_instance_cnt = hdev->adv_instance_cnt;
8783
8784 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8785 cp->adv_data_len, cp->data,
8786 cp->scan_rsp_len,
8787 cp->data + cp->adv_data_len,
8788 timeout, duration,
8789 HCI_ADV_TX_POWER_NO_PREFERENCE,
8790 hdev->le_adv_min_interval,
8791 hdev->le_adv_max_interval, 0);
8792 if (IS_ERR(adv)) {
8793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8794 MGMT_STATUS_FAILED);
8795 goto unlock;
8796 }
8797
8798 /* Only trigger an advertising added event if a new instance was
8799 * actually added.
8800 */
8801 if (hdev->adv_instance_cnt > prev_instance_cnt)
8802 mgmt_advertising_added(sk, hdev, cp->instance);
8803
8804 if (hdev->cur_adv_instance == cp->instance) {
8805 /* If the currently advertised instance is being changed then
8806 * cancel the current advertising and schedule the next
8807 * instance. If there is only one instance then the overridden
8808 * advertising data will be visible right away.
8809 */
8810 cancel_adv_timeout(hdev);
8811
8812 next_instance = hci_get_next_instance(hdev, cp->instance);
8813 if (next_instance)
8814 schedule_instance = next_instance->instance;
8815 } else if (!hdev->adv_instance_timeout) {
8816 /* Immediately advertise the new instance if no other
8817 * instance is currently being advertised.
8818 */
8819 schedule_instance = cp->instance;
8820 }
8821
8822 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8823 * there is no instance to be advertised then we have no HCI
8824 * communication to make. Simply return.
8825 */
8826 if (!hdev_is_powered(hdev) ||
8827 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8828 !schedule_instance) {
8829 rp.instance = cp->instance;
8830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8831 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8832 goto unlock;
8833 }
8834
8835 /* We're good to go, update advertising data, parameters, and start
8836 * advertising.
8837 */
8838 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8839 data_len);
8840 if (!cmd) {
8841 err = -ENOMEM;
8842 goto unlock;
8843 }
8844
8845 cp->instance = schedule_instance;
8846
8847 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8848 add_advertising_complete);
8849 if (err < 0)
8850 mgmt_pending_free(cmd);
8851
8852 unlock:
8853 hci_dev_unlock(hdev);
8854
8855 return err;
8856 }
8857
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8858 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8859 int err)
8860 {
8861 struct mgmt_pending_cmd *cmd = data;
8862 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8863 struct mgmt_rp_add_ext_adv_params rp;
8864 struct adv_info *adv;
8865 u32 flags;
8866
8867 BT_DBG("%s", hdev->name);
8868
8869 hci_dev_lock(hdev);
8870
8871 adv = hci_find_adv_instance(hdev, cp->instance);
8872 if (!adv)
8873 goto unlock;
8874
8875 rp.instance = cp->instance;
8876 rp.tx_power = adv->tx_power;
8877
8878 /* While we're at it, inform userspace of the available space for this
8879 * advertisement, given the flags that will be used.
8880 */
8881 flags = __le32_to_cpu(cp->flags);
8882 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8883 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8884
8885 if (err) {
8886 /* If this advertisement was previously advertising and we
8887 * failed to update it, we signal that it has been removed and
8888 * delete its structure
8889 */
8890 if (!adv->pending)
8891 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8892
8893 hci_remove_adv_instance(hdev, cp->instance);
8894
8895 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8896 mgmt_status(err));
8897 } else {
8898 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8899 mgmt_status(err), &rp, sizeof(rp));
8900 }
8901
8902 unlock:
8903 mgmt_pending_free(cmd);
8904
8905 hci_dev_unlock(hdev);
8906 }
8907
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8908 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8909 {
8910 struct mgmt_pending_cmd *cmd = data;
8911 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8912
8913 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8914 }
8915
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8916 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8917 void *data, u16 data_len)
8918 {
8919 struct mgmt_cp_add_ext_adv_params *cp = data;
8920 struct mgmt_rp_add_ext_adv_params rp;
8921 struct mgmt_pending_cmd *cmd = NULL;
8922 struct adv_info *adv;
8923 u32 flags, min_interval, max_interval;
8924 u16 timeout, duration;
8925 u8 status;
8926 s8 tx_power;
8927 int err;
8928
8929 BT_DBG("%s", hdev->name);
8930
8931 status = mgmt_le_support(hdev);
8932 if (status)
8933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8934 status);
8935
8936 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8938 MGMT_STATUS_INVALID_PARAMS);
8939
8940 /* The purpose of breaking add_advertising into two separate MGMT calls
8941 * for params and data is to allow more parameters to be added to this
8942 * structure in the future. For this reason, we verify that we have the
8943 * bare minimum structure we know of when the interface was defined. Any
8944 * extra parameters we don't know about will be ignored in this request.
8945 */
8946 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8948 MGMT_STATUS_INVALID_PARAMS);
8949
8950 flags = __le32_to_cpu(cp->flags);
8951
8952 if (!requested_adv_flags_are_valid(hdev, flags))
8953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8954 MGMT_STATUS_INVALID_PARAMS);
8955
8956 hci_dev_lock(hdev);
8957
8958 /* In new interface, we require that we are powered to register */
8959 if (!hdev_is_powered(hdev)) {
8960 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8961 MGMT_STATUS_REJECTED);
8962 goto unlock;
8963 }
8964
8965 if (adv_busy(hdev)) {
8966 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8967 MGMT_STATUS_BUSY);
8968 goto unlock;
8969 }
8970
8971 /* Parse defined parameters from request, use defaults otherwise */
8972 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8973 __le16_to_cpu(cp->timeout) : 0;
8974
8975 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8976 __le16_to_cpu(cp->duration) :
8977 hdev->def_multi_adv_rotation_duration;
8978
8979 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8980 __le32_to_cpu(cp->min_interval) :
8981 hdev->le_adv_min_interval;
8982
8983 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8984 __le32_to_cpu(cp->max_interval) :
8985 hdev->le_adv_max_interval;
8986
8987 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8988 cp->tx_power :
8989 HCI_ADV_TX_POWER_NO_PREFERENCE;
8990
8991 /* Create advertising instance with no advertising or response data */
8992 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8993 timeout, duration, tx_power, min_interval,
8994 max_interval, 0);
8995
8996 if (IS_ERR(adv)) {
8997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8998 MGMT_STATUS_FAILED);
8999 goto unlock;
9000 }
9001
9002 /* Submit request for advertising params if ext adv available */
9003 if (ext_adv_capable(hdev)) {
9004 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9005 data, data_len);
9006 if (!cmd) {
9007 err = -ENOMEM;
9008 hci_remove_adv_instance(hdev, cp->instance);
9009 goto unlock;
9010 }
9011
9012 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9013 add_ext_adv_params_complete);
9014 if (err < 0)
9015 mgmt_pending_free(cmd);
9016 } else {
9017 rp.instance = cp->instance;
9018 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9019 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9020 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9021 err = mgmt_cmd_complete(sk, hdev->id,
9022 MGMT_OP_ADD_EXT_ADV_PARAMS,
9023 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9024 }
9025
9026 unlock:
9027 hci_dev_unlock(hdev);
9028
9029 return err;
9030 }
9031
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9032 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9033 {
9034 struct mgmt_pending_cmd *cmd = data;
9035 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9036 struct mgmt_rp_add_advertising rp;
9037
9038 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9039
9040 memset(&rp, 0, sizeof(rp));
9041
9042 rp.instance = cp->instance;
9043
9044 if (err)
9045 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9046 mgmt_status(err));
9047 else
9048 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9049 mgmt_status(err), &rp, sizeof(rp));
9050
9051 mgmt_pending_free(cmd);
9052 }
9053
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9054 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9055 {
9056 struct mgmt_pending_cmd *cmd = data;
9057 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9058 int err;
9059
9060 if (ext_adv_capable(hdev)) {
9061 err = hci_update_adv_data_sync(hdev, cp->instance);
9062 if (err)
9063 return err;
9064
9065 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9066 if (err)
9067 return err;
9068
9069 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9070 }
9071
9072 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9073 }
9074
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9075 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9076 u16 data_len)
9077 {
9078 struct mgmt_cp_add_ext_adv_data *cp = data;
9079 struct mgmt_rp_add_ext_adv_data rp;
9080 u8 schedule_instance = 0;
9081 struct adv_info *next_instance;
9082 struct adv_info *adv_instance;
9083 int err = 0;
9084 struct mgmt_pending_cmd *cmd;
9085
9086 BT_DBG("%s", hdev->name);
9087
9088 hci_dev_lock(hdev);
9089
9090 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9091
9092 if (!adv_instance) {
9093 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9094 MGMT_STATUS_INVALID_PARAMS);
9095 goto unlock;
9096 }
9097
9098 /* In new interface, we require that we are powered to register */
9099 if (!hdev_is_powered(hdev)) {
9100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9101 MGMT_STATUS_REJECTED);
9102 goto clear_new_instance;
9103 }
9104
9105 if (adv_busy(hdev)) {
9106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9107 MGMT_STATUS_BUSY);
9108 goto clear_new_instance;
9109 }
9110
9111 /* Validate new data */
9112 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9113 cp->adv_data_len, true) ||
9114 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9115 cp->adv_data_len, cp->scan_rsp_len, false)) {
9116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9117 MGMT_STATUS_INVALID_PARAMS);
9118 goto clear_new_instance;
9119 }
9120
9121 /* Set the data in the advertising instance */
9122 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9123 cp->data, cp->scan_rsp_len,
9124 cp->data + cp->adv_data_len);
9125
9126 /* If using software rotation, determine next instance to use */
9127 if (hdev->cur_adv_instance == cp->instance) {
9128 /* If the currently advertised instance is being changed
9129 * then cancel the current advertising and schedule the
9130 * next instance. If there is only one instance then the
9131 * overridden advertising data will be visible right
9132 * away
9133 */
9134 cancel_adv_timeout(hdev);
9135
9136 next_instance = hci_get_next_instance(hdev, cp->instance);
9137 if (next_instance)
9138 schedule_instance = next_instance->instance;
9139 } else if (!hdev->adv_instance_timeout) {
9140 /* Immediately advertise the new instance if no other
9141 * instance is currently being advertised.
9142 */
9143 schedule_instance = cp->instance;
9144 }
9145
9146 /* If the HCI_ADVERTISING flag is set or there is no instance to
9147 * be advertised then we have no HCI communication to make.
9148 * Simply return.
9149 */
9150 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9151 if (adv_instance->pending) {
9152 mgmt_advertising_added(sk, hdev, cp->instance);
9153 adv_instance->pending = false;
9154 }
9155 rp.instance = cp->instance;
9156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9157 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9158 goto unlock;
9159 }
9160
9161 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9162 data_len);
9163 if (!cmd) {
9164 err = -ENOMEM;
9165 goto clear_new_instance;
9166 }
9167
9168 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9169 add_ext_adv_data_complete);
9170 if (err < 0) {
9171 mgmt_pending_free(cmd);
9172 goto clear_new_instance;
9173 }
9174
9175 /* We were successful in updating data, so trigger advertising_added
9176 * event if this is an instance that wasn't previously advertising. If
9177 * a failure occurs in the requests we initiated, we will remove the
9178 * instance again in add_advertising_complete
9179 */
9180 if (adv_instance->pending)
9181 mgmt_advertising_added(sk, hdev, cp->instance);
9182
9183 goto unlock;
9184
9185 clear_new_instance:
9186 hci_remove_adv_instance(hdev, cp->instance);
9187
9188 unlock:
9189 hci_dev_unlock(hdev);
9190
9191 return err;
9192 }
9193
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9194 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9195 int err)
9196 {
9197 struct mgmt_pending_cmd *cmd = data;
9198 struct mgmt_cp_remove_advertising *cp = cmd->param;
9199 struct mgmt_rp_remove_advertising rp;
9200
9201 bt_dev_dbg(hdev, "err %d", err);
9202
9203 memset(&rp, 0, sizeof(rp));
9204 rp.instance = cp->instance;
9205
9206 if (err)
9207 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9208 mgmt_status(err));
9209 else
9210 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9211 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9212
9213 mgmt_pending_free(cmd);
9214 }
9215
remove_advertising_sync(struct hci_dev * hdev,void * data)9216 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9217 {
9218 struct mgmt_pending_cmd *cmd = data;
9219 struct mgmt_cp_remove_advertising *cp = cmd->param;
9220 int err;
9221
9222 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9223 if (err)
9224 return err;
9225
9226 if (list_empty(&hdev->adv_instances))
9227 err = hci_disable_advertising_sync(hdev);
9228
9229 return err;
9230 }
9231
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9232 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9233 void *data, u16 data_len)
9234 {
9235 struct mgmt_cp_remove_advertising *cp = data;
9236 struct mgmt_pending_cmd *cmd;
9237 int err;
9238
9239 bt_dev_dbg(hdev, "sock %p", sk);
9240
9241 hci_dev_lock(hdev);
9242
9243 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9244 err = mgmt_cmd_status(sk, hdev->id,
9245 MGMT_OP_REMOVE_ADVERTISING,
9246 MGMT_STATUS_INVALID_PARAMS);
9247 goto unlock;
9248 }
9249
9250 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9252 MGMT_STATUS_BUSY);
9253 goto unlock;
9254 }
9255
9256 if (list_empty(&hdev->adv_instances)) {
9257 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9258 MGMT_STATUS_INVALID_PARAMS);
9259 goto unlock;
9260 }
9261
9262 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9263 data_len);
9264 if (!cmd) {
9265 err = -ENOMEM;
9266 goto unlock;
9267 }
9268
9269 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9270 remove_advertising_complete);
9271 if (err < 0)
9272 mgmt_pending_free(cmd);
9273
9274 unlock:
9275 hci_dev_unlock(hdev);
9276
9277 return err;
9278 }
9279
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9280 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9281 void *data, u16 data_len)
9282 {
9283 struct mgmt_cp_get_adv_size_info *cp = data;
9284 struct mgmt_rp_get_adv_size_info rp;
9285 u32 flags, supported_flags;
9286
9287 bt_dev_dbg(hdev, "sock %p", sk);
9288
9289 if (!lmp_le_capable(hdev))
9290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9291 MGMT_STATUS_REJECTED);
9292
9293 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9295 MGMT_STATUS_INVALID_PARAMS);
9296
9297 flags = __le32_to_cpu(cp->flags);
9298
9299 /* The current implementation only supports a subset of the specified
9300 * flags.
9301 */
9302 supported_flags = get_supported_adv_flags(hdev);
9303 if (flags & ~supported_flags)
9304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9305 MGMT_STATUS_INVALID_PARAMS);
9306
9307 rp.instance = cp->instance;
9308 rp.flags = cp->flags;
9309 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9310 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9311
9312 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9313 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9314 }
9315
9316 static const struct hci_mgmt_handler mgmt_handlers[] = {
9317 { NULL }, /* 0x0000 (no command) */
9318 { read_version, MGMT_READ_VERSION_SIZE,
9319 HCI_MGMT_NO_HDEV |
9320 HCI_MGMT_UNTRUSTED },
9321 { read_commands, MGMT_READ_COMMANDS_SIZE,
9322 HCI_MGMT_NO_HDEV |
9323 HCI_MGMT_UNTRUSTED },
9324 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9325 HCI_MGMT_NO_HDEV |
9326 HCI_MGMT_UNTRUSTED },
9327 { read_controller_info, MGMT_READ_INFO_SIZE,
9328 HCI_MGMT_UNTRUSTED },
9329 { set_powered, MGMT_SETTING_SIZE },
9330 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9331 { set_connectable, MGMT_SETTING_SIZE },
9332 { set_fast_connectable, MGMT_SETTING_SIZE },
9333 { set_bondable, MGMT_SETTING_SIZE },
9334 { set_link_security, MGMT_SETTING_SIZE },
9335 { set_ssp, MGMT_SETTING_SIZE },
9336 { set_hs, MGMT_SETTING_SIZE },
9337 { set_le, MGMT_SETTING_SIZE },
9338 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9339 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9340 { add_uuid, MGMT_ADD_UUID_SIZE },
9341 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9342 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9343 HCI_MGMT_VAR_LEN },
9344 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9345 HCI_MGMT_VAR_LEN },
9346 { disconnect, MGMT_DISCONNECT_SIZE },
9347 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9348 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9349 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9350 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9351 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9352 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9353 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9354 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9355 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9356 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9357 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9358 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9359 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9360 HCI_MGMT_VAR_LEN },
9361 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9362 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9363 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9364 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9365 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9366 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9367 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9368 { set_advertising, MGMT_SETTING_SIZE },
9369 { set_bredr, MGMT_SETTING_SIZE },
9370 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9371 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9372 { set_secure_conn, MGMT_SETTING_SIZE },
9373 { set_debug_keys, MGMT_SETTING_SIZE },
9374 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9375 { load_irks, MGMT_LOAD_IRKS_SIZE,
9376 HCI_MGMT_VAR_LEN },
9377 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9378 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9379 { add_device, MGMT_ADD_DEVICE_SIZE },
9380 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9381 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9382 HCI_MGMT_VAR_LEN },
9383 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9384 HCI_MGMT_NO_HDEV |
9385 HCI_MGMT_UNTRUSTED },
9386 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9387 HCI_MGMT_UNCONFIGURED |
9388 HCI_MGMT_UNTRUSTED },
9389 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9390 HCI_MGMT_UNCONFIGURED },
9391 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9392 HCI_MGMT_UNCONFIGURED },
9393 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9394 HCI_MGMT_VAR_LEN },
9395 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9396 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9397 HCI_MGMT_NO_HDEV |
9398 HCI_MGMT_UNTRUSTED },
9399 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9400 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9401 HCI_MGMT_VAR_LEN },
9402 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9403 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9404 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9405 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9406 HCI_MGMT_UNTRUSTED },
9407 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9408 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9409 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9410 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9411 HCI_MGMT_VAR_LEN },
9412 { set_wideband_speech, MGMT_SETTING_SIZE },
9413 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9414 HCI_MGMT_UNTRUSTED },
9415 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9416 HCI_MGMT_UNTRUSTED |
9417 HCI_MGMT_HDEV_OPTIONAL },
9418 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9419 HCI_MGMT_VAR_LEN |
9420 HCI_MGMT_HDEV_OPTIONAL },
9421 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9422 HCI_MGMT_UNTRUSTED },
9423 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9424 HCI_MGMT_VAR_LEN },
9425 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9426 HCI_MGMT_UNTRUSTED },
9427 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9428 HCI_MGMT_VAR_LEN },
9429 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9430 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9431 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9432 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9433 HCI_MGMT_VAR_LEN },
9434 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9435 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9436 HCI_MGMT_VAR_LEN },
9437 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9438 HCI_MGMT_VAR_LEN },
9439 { add_adv_patterns_monitor_rssi,
9440 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9441 HCI_MGMT_VAR_LEN },
9442 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9443 HCI_MGMT_VAR_LEN },
9444 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9445 { mesh_send, MGMT_MESH_SEND_SIZE,
9446 HCI_MGMT_VAR_LEN },
9447 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9448 };
9449
mgmt_index_added(struct hci_dev * hdev)9450 void mgmt_index_added(struct hci_dev *hdev)
9451 {
9452 struct mgmt_ev_ext_index ev;
9453
9454 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9455 return;
9456
9457 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9458 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9459 HCI_MGMT_UNCONF_INDEX_EVENTS);
9460 ev.type = 0x01;
9461 } else {
9462 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9463 HCI_MGMT_INDEX_EVENTS);
9464 ev.type = 0x00;
9465 }
9466
9467 ev.bus = hdev->bus;
9468
9469 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9470 HCI_MGMT_EXT_INDEX_EVENTS);
9471 }
9472
mgmt_index_removed(struct hci_dev * hdev)9473 void mgmt_index_removed(struct hci_dev *hdev)
9474 {
9475 struct mgmt_ev_ext_index ev;
9476 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9477
9478 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9479 return;
9480
9481 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9482
9483 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9484 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9485 HCI_MGMT_UNCONF_INDEX_EVENTS);
9486 ev.type = 0x01;
9487 } else {
9488 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9489 HCI_MGMT_INDEX_EVENTS);
9490 ev.type = 0x00;
9491 }
9492
9493 ev.bus = hdev->bus;
9494
9495 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9496 HCI_MGMT_EXT_INDEX_EVENTS);
9497
9498 /* Cancel any remaining timed work */
9499 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9500 return;
9501 cancel_delayed_work_sync(&hdev->discov_off);
9502 cancel_delayed_work_sync(&hdev->service_cache);
9503 cancel_delayed_work_sync(&hdev->rpa_expired);
9504 }
9505
mgmt_power_on(struct hci_dev * hdev,int err)9506 void mgmt_power_on(struct hci_dev *hdev, int err)
9507 {
9508 struct cmd_lookup match = { NULL, hdev };
9509
9510 bt_dev_dbg(hdev, "err %d", err);
9511
9512 hci_dev_lock(hdev);
9513
9514 if (!err) {
9515 restart_le_actions(hdev);
9516 hci_update_passive_scan(hdev);
9517 }
9518
9519 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9520
9521 new_settings(hdev, match.sk);
9522
9523 if (match.sk)
9524 sock_put(match.sk);
9525
9526 hci_dev_unlock(hdev);
9527 }
9528
__mgmt_power_off(struct hci_dev * hdev)9529 void __mgmt_power_off(struct hci_dev *hdev)
9530 {
9531 struct cmd_lookup match = { NULL, hdev };
9532 u8 zero_cod[] = { 0, 0, 0 };
9533
9534 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9535
9536 /* If the power off is because of hdev unregistration let
9537 * use the appropriate INVALID_INDEX status. Otherwise use
9538 * NOT_POWERED. We cover both scenarios here since later in
9539 * mgmt_index_removed() any hci_conn callbacks will have already
9540 * been triggered, potentially causing misleading DISCONNECTED
9541 * status responses.
9542 */
9543 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9544 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9545 else
9546 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9547
9548 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9549
9550 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9551 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9552 zero_cod, sizeof(zero_cod),
9553 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9554 ext_info_changed(hdev, NULL);
9555 }
9556
9557 new_settings(hdev, match.sk);
9558
9559 if (match.sk)
9560 sock_put(match.sk);
9561 }
9562
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9563 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9564 {
9565 struct mgmt_pending_cmd *cmd;
9566 u8 status;
9567
9568 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9569 if (!cmd)
9570 return;
9571
9572 if (err == -ERFKILL)
9573 status = MGMT_STATUS_RFKILLED;
9574 else
9575 status = MGMT_STATUS_FAILED;
9576
9577 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9578
9579 mgmt_pending_remove(cmd);
9580 }
9581
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9582 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9583 bool persistent)
9584 {
9585 struct mgmt_ev_new_link_key ev;
9586
9587 memset(&ev, 0, sizeof(ev));
9588
9589 ev.store_hint = persistent;
9590 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9591 ev.key.addr.type = BDADDR_BREDR;
9592 ev.key.type = key->type;
9593 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9594 ev.key.pin_len = key->pin_len;
9595
9596 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9597 }
9598
mgmt_ltk_type(struct smp_ltk * ltk)9599 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9600 {
9601 switch (ltk->type) {
9602 case SMP_LTK:
9603 case SMP_LTK_RESPONDER:
9604 if (ltk->authenticated)
9605 return MGMT_LTK_AUTHENTICATED;
9606 return MGMT_LTK_UNAUTHENTICATED;
9607 case SMP_LTK_P256:
9608 if (ltk->authenticated)
9609 return MGMT_LTK_P256_AUTH;
9610 return MGMT_LTK_P256_UNAUTH;
9611 case SMP_LTK_P256_DEBUG:
9612 return MGMT_LTK_P256_DEBUG;
9613 }
9614
9615 return MGMT_LTK_UNAUTHENTICATED;
9616 }
9617
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9618 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9619 {
9620 struct mgmt_ev_new_long_term_key ev;
9621
9622 memset(&ev, 0, sizeof(ev));
9623
9624 /* Devices using resolvable or non-resolvable random addresses
9625 * without providing an identity resolving key don't require
9626 * to store long term keys. Their addresses will change the
9627 * next time around.
9628 *
9629 * Only when a remote device provides an identity address
9630 * make sure the long term key is stored. If the remote
9631 * identity is known, the long term keys are internally
9632 * mapped to the identity address. So allow static random
9633 * and public addresses here.
9634 */
9635 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9636 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9637 ev.store_hint = 0x00;
9638 else
9639 ev.store_hint = persistent;
9640
9641 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9642 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9643 ev.key.type = mgmt_ltk_type(key);
9644 ev.key.enc_size = key->enc_size;
9645 ev.key.ediv = key->ediv;
9646 ev.key.rand = key->rand;
9647
9648 if (key->type == SMP_LTK)
9649 ev.key.initiator = 1;
9650
9651 /* Make sure we copy only the significant bytes based on the
9652 * encryption key size, and set the rest of the value to zeroes.
9653 */
9654 memcpy(ev.key.val, key->val, key->enc_size);
9655 memset(ev.key.val + key->enc_size, 0,
9656 sizeof(ev.key.val) - key->enc_size);
9657
9658 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9659 }
9660
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9661 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9662 {
9663 struct mgmt_ev_new_irk ev;
9664
9665 memset(&ev, 0, sizeof(ev));
9666
9667 ev.store_hint = persistent;
9668
9669 bacpy(&ev.rpa, &irk->rpa);
9670 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9671 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9672 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9673
9674 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9675 }
9676
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9677 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9678 bool persistent)
9679 {
9680 struct mgmt_ev_new_csrk ev;
9681
9682 memset(&ev, 0, sizeof(ev));
9683
9684 /* Devices using resolvable or non-resolvable random addresses
9685 * without providing an identity resolving key don't require
9686 * to store signature resolving keys. Their addresses will change
9687 * the next time around.
9688 *
9689 * Only when a remote device provides an identity address
9690 * make sure the signature resolving key is stored. So allow
9691 * static random and public addresses here.
9692 */
9693 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9694 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9695 ev.store_hint = 0x00;
9696 else
9697 ev.store_hint = persistent;
9698
9699 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9700 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9701 ev.key.type = csrk->type;
9702 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9703
9704 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9705 }
9706
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9707 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9708 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9709 u16 max_interval, u16 latency, u16 timeout)
9710 {
9711 struct mgmt_ev_new_conn_param ev;
9712
9713 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9714 return;
9715
9716 memset(&ev, 0, sizeof(ev));
9717 bacpy(&ev.addr.bdaddr, bdaddr);
9718 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9719 ev.store_hint = store_hint;
9720 ev.min_interval = cpu_to_le16(min_interval);
9721 ev.max_interval = cpu_to_le16(max_interval);
9722 ev.latency = cpu_to_le16(latency);
9723 ev.timeout = cpu_to_le16(timeout);
9724
9725 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9726 }
9727
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9728 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9729 u8 *name, u8 name_len)
9730 {
9731 struct sk_buff *skb;
9732 struct mgmt_ev_device_connected *ev;
9733 u16 eir_len = 0;
9734 u32 flags = 0;
9735
9736 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9737 return;
9738
9739 /* allocate buff for LE or BR/EDR adv */
9740 if (conn->le_adv_data_len > 0)
9741 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9742 sizeof(*ev) + conn->le_adv_data_len);
9743 else
9744 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9745 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9746 eir_precalc_len(sizeof(conn->dev_class)));
9747
9748 if (!skb)
9749 return;
9750
9751 ev = skb_put(skb, sizeof(*ev));
9752 bacpy(&ev->addr.bdaddr, &conn->dst);
9753 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9754
9755 if (conn->out)
9756 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9757
9758 ev->flags = __cpu_to_le32(flags);
9759
9760 /* We must ensure that the EIR Data fields are ordered and
9761 * unique. Keep it simple for now and avoid the problem by not
9762 * adding any BR/EDR data to the LE adv.
9763 */
9764 if (conn->le_adv_data_len > 0) {
9765 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9766 eir_len = conn->le_adv_data_len;
9767 } else {
9768 if (name)
9769 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9770
9771 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9772 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9773 conn->dev_class, sizeof(conn->dev_class));
9774 }
9775
9776 ev->eir_len = cpu_to_le16(eir_len);
9777
9778 mgmt_event_skb(skb, NULL);
9779 }
9780
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9781 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9782 {
9783 struct hci_dev *hdev = data;
9784 struct mgmt_cp_unpair_device *cp = cmd->param;
9785
9786 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9787
9788 cmd->cmd_complete(cmd, 0);
9789 mgmt_pending_remove(cmd);
9790 }
9791
mgmt_powering_down(struct hci_dev * hdev)9792 bool mgmt_powering_down(struct hci_dev *hdev)
9793 {
9794 struct mgmt_pending_cmd *cmd;
9795 struct mgmt_mode *cp;
9796
9797 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9798 return true;
9799
9800 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9801 if (!cmd)
9802 return false;
9803
9804 cp = cmd->param;
9805 if (!cp->val)
9806 return true;
9807
9808 return false;
9809 }
9810
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9811 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9812 u8 link_type, u8 addr_type, u8 reason,
9813 bool mgmt_connected)
9814 {
9815 struct mgmt_ev_device_disconnected ev;
9816 struct sock *sk = NULL;
9817
9818 if (!mgmt_connected)
9819 return;
9820
9821 if (link_type != ACL_LINK && link_type != LE_LINK)
9822 return;
9823
9824 bacpy(&ev.addr.bdaddr, bdaddr);
9825 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9826 ev.reason = reason;
9827
9828 /* Report disconnects due to suspend */
9829 if (hdev->suspended)
9830 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9831
9832 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9833
9834 if (sk)
9835 sock_put(sk);
9836 }
9837
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9838 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9839 u8 link_type, u8 addr_type, u8 status)
9840 {
9841 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9842 struct mgmt_cp_disconnect *cp;
9843 struct mgmt_pending_cmd *cmd;
9844
9845 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9846 hdev);
9847
9848 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9849 if (!cmd)
9850 return;
9851
9852 cp = cmd->param;
9853
9854 if (bacmp(bdaddr, &cp->addr.bdaddr))
9855 return;
9856
9857 if (cp->addr.type != bdaddr_type)
9858 return;
9859
9860 cmd->cmd_complete(cmd, mgmt_status(status));
9861 mgmt_pending_remove(cmd);
9862 }
9863
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9864 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9865 {
9866 struct mgmt_ev_connect_failed ev;
9867
9868 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9869 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9870 conn->dst_type, status, true);
9871 return;
9872 }
9873
9874 bacpy(&ev.addr.bdaddr, &conn->dst);
9875 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9876 ev.status = mgmt_status(status);
9877
9878 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9879 }
9880
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9881 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9882 {
9883 struct mgmt_ev_pin_code_request ev;
9884
9885 bacpy(&ev.addr.bdaddr, bdaddr);
9886 ev.addr.type = BDADDR_BREDR;
9887 ev.secure = secure;
9888
9889 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9890 }
9891
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9892 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9893 u8 status)
9894 {
9895 struct mgmt_pending_cmd *cmd;
9896
9897 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9898 if (!cmd)
9899 return;
9900
9901 cmd->cmd_complete(cmd, mgmt_status(status));
9902 mgmt_pending_remove(cmd);
9903 }
9904
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9905 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9906 u8 status)
9907 {
9908 struct mgmt_pending_cmd *cmd;
9909
9910 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9911 if (!cmd)
9912 return;
9913
9914 cmd->cmd_complete(cmd, mgmt_status(status));
9915 mgmt_pending_remove(cmd);
9916 }
9917
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9918 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9919 u8 link_type, u8 addr_type, u32 value,
9920 u8 confirm_hint)
9921 {
9922 struct mgmt_ev_user_confirm_request ev;
9923
9924 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9925
9926 bacpy(&ev.addr.bdaddr, bdaddr);
9927 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9928 ev.confirm_hint = confirm_hint;
9929 ev.value = cpu_to_le32(value);
9930
9931 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9932 NULL);
9933 }
9934
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9935 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9936 u8 link_type, u8 addr_type)
9937 {
9938 struct mgmt_ev_user_passkey_request ev;
9939
9940 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9941
9942 bacpy(&ev.addr.bdaddr, bdaddr);
9943 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9944
9945 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9946 NULL);
9947 }
9948
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9949 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9950 u8 link_type, u8 addr_type, u8 status,
9951 u8 opcode)
9952 {
9953 struct mgmt_pending_cmd *cmd;
9954
9955 cmd = pending_find(opcode, hdev);
9956 if (!cmd)
9957 return -ENOENT;
9958
9959 cmd->cmd_complete(cmd, mgmt_status(status));
9960 mgmt_pending_remove(cmd);
9961
9962 return 0;
9963 }
9964
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9965 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9966 u8 link_type, u8 addr_type, u8 status)
9967 {
9968 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9969 status, MGMT_OP_USER_CONFIRM_REPLY);
9970 }
9971
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9972 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9973 u8 link_type, u8 addr_type, u8 status)
9974 {
9975 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9976 status,
9977 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9978 }
9979
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9980 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9981 u8 link_type, u8 addr_type, u8 status)
9982 {
9983 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9984 status, MGMT_OP_USER_PASSKEY_REPLY);
9985 }
9986
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9987 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9988 u8 link_type, u8 addr_type, u8 status)
9989 {
9990 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9991 status,
9992 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9993 }
9994
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9995 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9996 u8 link_type, u8 addr_type, u32 passkey,
9997 u8 entered)
9998 {
9999 struct mgmt_ev_passkey_notify ev;
10000
10001 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10002
10003 bacpy(&ev.addr.bdaddr, bdaddr);
10004 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10005 ev.passkey = __cpu_to_le32(passkey);
10006 ev.entered = entered;
10007
10008 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10009 }
10010
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10011 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10012 {
10013 struct mgmt_ev_auth_failed ev;
10014 struct mgmt_pending_cmd *cmd;
10015 u8 status = mgmt_status(hci_status);
10016
10017 bacpy(&ev.addr.bdaddr, &conn->dst);
10018 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10019 ev.status = status;
10020
10021 cmd = find_pairing(conn);
10022
10023 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10024 cmd ? cmd->sk : NULL);
10025
10026 if (cmd) {
10027 cmd->cmd_complete(cmd, status);
10028 mgmt_pending_remove(cmd);
10029 }
10030 }
10031
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10032 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10033 {
10034 struct cmd_lookup match = { NULL, hdev };
10035 bool changed;
10036
10037 if (status) {
10038 u8 mgmt_err = mgmt_status(status);
10039 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10040 cmd_status_rsp, &mgmt_err);
10041 return;
10042 }
10043
10044 if (test_bit(HCI_AUTH, &hdev->flags))
10045 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10046 else
10047 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10048
10049 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10050 &match);
10051
10052 if (changed)
10053 new_settings(hdev, match.sk);
10054
10055 if (match.sk)
10056 sock_put(match.sk);
10057 }
10058
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10059 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10060 {
10061 struct cmd_lookup *match = data;
10062
10063 if (match->sk == NULL) {
10064 match->sk = cmd->sk;
10065 sock_hold(match->sk);
10066 }
10067 }
10068
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10069 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10070 u8 status)
10071 {
10072 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10073
10074 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10075 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10076 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10077
10078 if (!status) {
10079 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10080 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10081 ext_info_changed(hdev, NULL);
10082 }
10083
10084 if (match.sk)
10085 sock_put(match.sk);
10086 }
10087
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10088 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10089 {
10090 struct mgmt_cp_set_local_name ev;
10091 struct mgmt_pending_cmd *cmd;
10092
10093 if (status)
10094 return;
10095
10096 memset(&ev, 0, sizeof(ev));
10097 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10098 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10099
10100 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10101 if (!cmd) {
10102 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10103
10104 /* If this is a HCI command related to powering on the
10105 * HCI dev don't send any mgmt signals.
10106 */
10107 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10108 return;
10109
10110 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10111 return;
10112 }
10113
10114 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10115 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10116 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10117 }
10118
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10119 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10120 {
10121 int i;
10122
10123 for (i = 0; i < uuid_count; i++) {
10124 if (!memcmp(uuid, uuids[i], 16))
10125 return true;
10126 }
10127
10128 return false;
10129 }
10130
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10131 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10132 {
10133 u16 parsed = 0;
10134
10135 while (parsed < eir_len) {
10136 u8 field_len = eir[0];
10137 u8 uuid[16];
10138 int i;
10139
10140 if (field_len == 0)
10141 break;
10142
10143 if (eir_len - parsed < field_len + 1)
10144 break;
10145
10146 switch (eir[1]) {
10147 case EIR_UUID16_ALL:
10148 case EIR_UUID16_SOME:
10149 for (i = 0; i + 3 <= field_len; i += 2) {
10150 memcpy(uuid, bluetooth_base_uuid, 16);
10151 uuid[13] = eir[i + 3];
10152 uuid[12] = eir[i + 2];
10153 if (has_uuid(uuid, uuid_count, uuids))
10154 return true;
10155 }
10156 break;
10157 case EIR_UUID32_ALL:
10158 case EIR_UUID32_SOME:
10159 for (i = 0; i + 5 <= field_len; i += 4) {
10160 memcpy(uuid, bluetooth_base_uuid, 16);
10161 uuid[15] = eir[i + 5];
10162 uuid[14] = eir[i + 4];
10163 uuid[13] = eir[i + 3];
10164 uuid[12] = eir[i + 2];
10165 if (has_uuid(uuid, uuid_count, uuids))
10166 return true;
10167 }
10168 break;
10169 case EIR_UUID128_ALL:
10170 case EIR_UUID128_SOME:
10171 for (i = 0; i + 17 <= field_len; i += 16) {
10172 memcpy(uuid, eir + i + 2, 16);
10173 if (has_uuid(uuid, uuid_count, uuids))
10174 return true;
10175 }
10176 break;
10177 }
10178
10179 parsed += field_len + 1;
10180 eir += field_len + 1;
10181 }
10182
10183 return false;
10184 }
10185
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10186 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10187 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10188 {
10189 /* If a RSSI threshold has been specified, and
10190 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10191 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10192 * is set, let it through for further processing, as we might need to
10193 * restart the scan.
10194 *
10195 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10196 * the results are also dropped.
10197 */
10198 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10199 (rssi == HCI_RSSI_INVALID ||
10200 (rssi < hdev->discovery.rssi &&
10201 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10202 return false;
10203
10204 if (hdev->discovery.uuid_count != 0) {
10205 /* If a list of UUIDs is provided in filter, results with no
10206 * matching UUID should be dropped.
10207 */
10208 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10209 hdev->discovery.uuids) &&
10210 !eir_has_uuids(scan_rsp, scan_rsp_len,
10211 hdev->discovery.uuid_count,
10212 hdev->discovery.uuids))
10213 return false;
10214 }
10215
10216 /* If duplicate filtering does not report RSSI changes, then restart
10217 * scanning to ensure updated result with updated RSSI values.
10218 */
10219 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10220 /* Validate RSSI value against the RSSI threshold once more. */
10221 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10222 rssi < hdev->discovery.rssi)
10223 return false;
10224 }
10225
10226 return true;
10227 }
10228
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10229 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10230 bdaddr_t *bdaddr, u8 addr_type)
10231 {
10232 struct mgmt_ev_adv_monitor_device_lost ev;
10233
10234 ev.monitor_handle = cpu_to_le16(handle);
10235 bacpy(&ev.addr.bdaddr, bdaddr);
10236 ev.addr.type = addr_type;
10237
10238 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10239 NULL);
10240 }
10241
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10242 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10243 struct sk_buff *skb,
10244 struct sock *skip_sk,
10245 u16 handle)
10246 {
10247 struct sk_buff *advmon_skb;
10248 size_t advmon_skb_len;
10249 __le16 *monitor_handle;
10250
10251 if (!skb)
10252 return;
10253
10254 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10255 sizeof(struct mgmt_ev_device_found)) + skb->len;
10256 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10257 advmon_skb_len);
10258 if (!advmon_skb)
10259 return;
10260
10261 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10262 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10263 * store monitor_handle of the matched monitor.
10264 */
10265 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10266 *monitor_handle = cpu_to_le16(handle);
10267 skb_put_data(advmon_skb, skb->data, skb->len);
10268
10269 mgmt_event_skb(advmon_skb, skip_sk);
10270 }
10271
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10272 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10273 bdaddr_t *bdaddr, bool report_device,
10274 struct sk_buff *skb,
10275 struct sock *skip_sk)
10276 {
10277 struct monitored_device *dev, *tmp;
10278 bool matched = false;
10279 bool notified = false;
10280
10281 /* We have received the Advertisement Report because:
10282 * 1. the kernel has initiated active discovery
10283 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10284 * passive scanning
10285 * 3. if none of the above is true, we have one or more active
10286 * Advertisement Monitor
10287 *
10288 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10289 * and report ONLY one advertisement per device for the matched Monitor
10290 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10291 *
10292 * For case 3, since we are not active scanning and all advertisements
10293 * received are due to a matched Advertisement Monitor, report all
10294 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10295 */
10296 if (report_device && !hdev->advmon_pend_notify) {
10297 mgmt_event_skb(skb, skip_sk);
10298 return;
10299 }
10300
10301 hdev->advmon_pend_notify = false;
10302
10303 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10304 if (!bacmp(&dev->bdaddr, bdaddr)) {
10305 matched = true;
10306
10307 if (!dev->notified) {
10308 mgmt_send_adv_monitor_device_found(hdev, skb,
10309 skip_sk,
10310 dev->handle);
10311 notified = true;
10312 dev->notified = true;
10313 }
10314 }
10315
10316 if (!dev->notified)
10317 hdev->advmon_pend_notify = true;
10318 }
10319
10320 if (!report_device &&
10321 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10322 /* Handle 0 indicates that we are not active scanning and this
10323 * is a subsequent advertisement report for an already matched
10324 * Advertisement Monitor or the controller offloading support
10325 * is not available.
10326 */
10327 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10328 }
10329
10330 if (report_device)
10331 mgmt_event_skb(skb, skip_sk);
10332 else
10333 kfree_skb(skb);
10334 }
10335
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10336 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10337 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10338 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10339 u64 instant)
10340 {
10341 struct sk_buff *skb;
10342 struct mgmt_ev_mesh_device_found *ev;
10343 int i, j;
10344
10345 if (!hdev->mesh_ad_types[0])
10346 goto accepted;
10347
10348 /* Scan for requested AD types */
10349 if (eir_len > 0) {
10350 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10351 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10352 if (!hdev->mesh_ad_types[j])
10353 break;
10354
10355 if (hdev->mesh_ad_types[j] == eir[i + 1])
10356 goto accepted;
10357 }
10358 }
10359 }
10360
10361 if (scan_rsp_len > 0) {
10362 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10363 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10364 if (!hdev->mesh_ad_types[j])
10365 break;
10366
10367 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10368 goto accepted;
10369 }
10370 }
10371 }
10372
10373 return;
10374
10375 accepted:
10376 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10377 sizeof(*ev) + eir_len + scan_rsp_len);
10378 if (!skb)
10379 return;
10380
10381 ev = skb_put(skb, sizeof(*ev));
10382
10383 bacpy(&ev->addr.bdaddr, bdaddr);
10384 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10385 ev->rssi = rssi;
10386 ev->flags = cpu_to_le32(flags);
10387 ev->instant = cpu_to_le64(instant);
10388
10389 if (eir_len > 0)
10390 /* Copy EIR or advertising data into event */
10391 skb_put_data(skb, eir, eir_len);
10392
10393 if (scan_rsp_len > 0)
10394 /* Append scan response data to event */
10395 skb_put_data(skb, scan_rsp, scan_rsp_len);
10396
10397 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10398
10399 mgmt_event_skb(skb, NULL);
10400 }
10401
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10402 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10403 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10404 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10405 u64 instant)
10406 {
10407 struct sk_buff *skb;
10408 struct mgmt_ev_device_found *ev;
10409 bool report_device = hci_discovery_active(hdev);
10410
10411 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10412 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10413 eir, eir_len, scan_rsp, scan_rsp_len,
10414 instant);
10415
10416 /* Don't send events for a non-kernel initiated discovery. With
10417 * LE one exception is if we have pend_le_reports > 0 in which
10418 * case we're doing passive scanning and want these events.
10419 */
10420 if (!hci_discovery_active(hdev)) {
10421 if (link_type == ACL_LINK)
10422 return;
10423 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10424 report_device = true;
10425 else if (!hci_is_adv_monitoring(hdev))
10426 return;
10427 }
10428
10429 if (hdev->discovery.result_filtering) {
10430 /* We are using service discovery */
10431 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10432 scan_rsp_len))
10433 return;
10434 }
10435
10436 if (hdev->discovery.limited) {
10437 /* Check for limited discoverable bit */
10438 if (dev_class) {
10439 if (!(dev_class[1] & 0x20))
10440 return;
10441 } else {
10442 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10443 if (!flags || !(flags[0] & LE_AD_LIMITED))
10444 return;
10445 }
10446 }
10447
10448 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10449 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10450 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10451 if (!skb)
10452 return;
10453
10454 ev = skb_put(skb, sizeof(*ev));
10455
10456 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10457 * RSSI value was reported as 0 when not available. This behavior
10458 * is kept when using device discovery. This is required for full
10459 * backwards compatibility with the API.
10460 *
10461 * However when using service discovery, the value 127 will be
10462 * returned when the RSSI is not available.
10463 */
10464 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10465 link_type == ACL_LINK)
10466 rssi = 0;
10467
10468 bacpy(&ev->addr.bdaddr, bdaddr);
10469 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10470 ev->rssi = rssi;
10471 ev->flags = cpu_to_le32(flags);
10472
10473 if (eir_len > 0)
10474 /* Copy EIR or advertising data into event */
10475 skb_put_data(skb, eir, eir_len);
10476
10477 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10478 u8 eir_cod[5];
10479
10480 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10481 dev_class, 3);
10482 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10483 }
10484
10485 if (scan_rsp_len > 0)
10486 /* Append scan response data to event */
10487 skb_put_data(skb, scan_rsp, scan_rsp_len);
10488
10489 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10490
10491 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10492 }
10493
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10494 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10495 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10496 {
10497 struct sk_buff *skb;
10498 struct mgmt_ev_device_found *ev;
10499 u16 eir_len = 0;
10500 u32 flags = 0;
10501
10502 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10503 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10504 if (!skb)
10505 return;
10506
10507 ev = skb_put(skb, sizeof(*ev));
10508 bacpy(&ev->addr.bdaddr, bdaddr);
10509 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10510 ev->rssi = rssi;
10511
10512 if (name)
10513 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10514 else
10515 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10516
10517 ev->eir_len = cpu_to_le16(eir_len);
10518 ev->flags = cpu_to_le32(flags);
10519
10520 mgmt_event_skb(skb, NULL);
10521 }
10522
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10523 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10524 {
10525 struct mgmt_ev_discovering ev;
10526
10527 bt_dev_dbg(hdev, "discovering %u", discovering);
10528
10529 memset(&ev, 0, sizeof(ev));
10530 ev.type = hdev->discovery.type;
10531 ev.discovering = discovering;
10532
10533 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10534 }
10535
mgmt_suspending(struct hci_dev * hdev,u8 state)10536 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10537 {
10538 struct mgmt_ev_controller_suspend ev;
10539
10540 ev.suspend_state = state;
10541 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10542 }
10543
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10544 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10545 u8 addr_type)
10546 {
10547 struct mgmt_ev_controller_resume ev;
10548
10549 ev.wake_reason = reason;
10550 if (bdaddr) {
10551 bacpy(&ev.addr.bdaddr, bdaddr);
10552 ev.addr.type = addr_type;
10553 } else {
10554 memset(&ev.addr, 0, sizeof(ev.addr));
10555 }
10556
10557 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10558 }
10559
10560 static struct hci_mgmt_chan chan = {
10561 .channel = HCI_CHANNEL_CONTROL,
10562 .handler_count = ARRAY_SIZE(mgmt_handlers),
10563 .handlers = mgmt_handlers,
10564 .hdev_init = mgmt_init_hdev,
10565 };
10566
mgmt_init(void)10567 int mgmt_init(void)
10568 {
10569 return hci_mgmt_chan_register(&chan);
10570 }
10571
mgmt_exit(void)10572 void mgmt_exit(void)
10573 {
10574 hci_mgmt_chan_unregister(&chan);
10575 }
10576
mgmt_cleanup(struct sock * sk)10577 void mgmt_cleanup(struct sock *sk)
10578 {
10579 struct mgmt_mesh_tx *mesh_tx;
10580 struct hci_dev *hdev;
10581
10582 read_lock(&hci_dev_list_lock);
10583
10584 list_for_each_entry(hdev, &hci_dev_list, list) {
10585 do {
10586 mesh_tx = mgmt_mesh_next(hdev, sk);
10587
10588 if (mesh_tx)
10589 mesh_send_complete(hdev, mesh_tx, true);
10590 } while (mesh_tx);
10591 }
10592
10593 read_unlock(&hci_dev_list_lock);
10594 }
10595