• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	18
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_SET_BLOCKED_KEYS,
112 	MGMT_OP_SET_WIDEBAND_SPEECH,
113 	MGMT_OP_READ_SECURITY_INFO,
114 	MGMT_OP_READ_EXP_FEATURES_INFO,
115 	MGMT_OP_SET_EXP_FEATURE,
116 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 	MGMT_OP_GET_DEVICE_FLAGS,
121 	MGMT_OP_SET_DEVICE_FLAGS,
122 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 	MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126 
127 static const u16 mgmt_events[] = {
128 	MGMT_EV_CONTROLLER_ERROR,
129 	MGMT_EV_INDEX_ADDED,
130 	MGMT_EV_INDEX_REMOVED,
131 	MGMT_EV_NEW_SETTINGS,
132 	MGMT_EV_CLASS_OF_DEV_CHANGED,
133 	MGMT_EV_LOCAL_NAME_CHANGED,
134 	MGMT_EV_NEW_LINK_KEY,
135 	MGMT_EV_NEW_LONG_TERM_KEY,
136 	MGMT_EV_DEVICE_CONNECTED,
137 	MGMT_EV_DEVICE_DISCONNECTED,
138 	MGMT_EV_CONNECT_FAILED,
139 	MGMT_EV_PIN_CODE_REQUEST,
140 	MGMT_EV_USER_CONFIRM_REQUEST,
141 	MGMT_EV_USER_PASSKEY_REQUEST,
142 	MGMT_EV_AUTH_FAILED,
143 	MGMT_EV_DEVICE_FOUND,
144 	MGMT_EV_DISCOVERING,
145 	MGMT_EV_DEVICE_BLOCKED,
146 	MGMT_EV_DEVICE_UNBLOCKED,
147 	MGMT_EV_DEVICE_UNPAIRED,
148 	MGMT_EV_PASSKEY_NOTIFY,
149 	MGMT_EV_NEW_IRK,
150 	MGMT_EV_NEW_CSRK,
151 	MGMT_EV_DEVICE_ADDED,
152 	MGMT_EV_DEVICE_REMOVED,
153 	MGMT_EV_NEW_CONN_PARAM,
154 	MGMT_EV_UNCONF_INDEX_ADDED,
155 	MGMT_EV_UNCONF_INDEX_REMOVED,
156 	MGMT_EV_NEW_CONFIG_OPTIONS,
157 	MGMT_EV_EXT_INDEX_ADDED,
158 	MGMT_EV_EXT_INDEX_REMOVED,
159 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 	MGMT_EV_ADVERTISING_ADDED,
161 	MGMT_EV_ADVERTISING_REMOVED,
162 	MGMT_EV_EXT_INFO_CHANGED,
163 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 	MGMT_EV_EXP_FEATURE_CHANGED,
165 	MGMT_EV_DEVICE_FLAGS_CHANGED,
166 	MGMT_EV_CONTROLLER_SUSPEND,
167 	MGMT_EV_CONTROLLER_RESUME,
168 };
169 
170 static const u16 mgmt_untrusted_commands[] = {
171 	MGMT_OP_READ_INDEX_LIST,
172 	MGMT_OP_READ_INFO,
173 	MGMT_OP_READ_UNCONF_INDEX_LIST,
174 	MGMT_OP_READ_CONFIG_INFO,
175 	MGMT_OP_READ_EXT_INDEX_LIST,
176 	MGMT_OP_READ_EXT_INFO,
177 	MGMT_OP_READ_SECURITY_INFO,
178 	MGMT_OP_READ_EXP_FEATURES_INFO,
179 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
181 };
182 
183 static const u16 mgmt_untrusted_events[] = {
184 	MGMT_EV_INDEX_ADDED,
185 	MGMT_EV_INDEX_REMOVED,
186 	MGMT_EV_NEW_SETTINGS,
187 	MGMT_EV_CLASS_OF_DEV_CHANGED,
188 	MGMT_EV_LOCAL_NAME_CHANGED,
189 	MGMT_EV_UNCONF_INDEX_ADDED,
190 	MGMT_EV_UNCONF_INDEX_REMOVED,
191 	MGMT_EV_NEW_CONFIG_OPTIONS,
192 	MGMT_EV_EXT_INDEX_ADDED,
193 	MGMT_EV_EXT_INDEX_REMOVED,
194 	MGMT_EV_EXT_INFO_CHANGED,
195 	MGMT_EV_EXP_FEATURE_CHANGED,
196 	MGMT_EV_ADV_MONITOR_ADDED,
197 	MGMT_EV_ADV_MONITOR_REMOVED,
198 };
199 
200 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
201 
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
204 
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
207 	MGMT_STATUS_SUCCESS,
208 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
209 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
210 	MGMT_STATUS_FAILED,		/* Hardware Failure */
211 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
212 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
213 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
214 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
215 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
216 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
217 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
218 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
219 	MGMT_STATUS_BUSY,		/* Command Disallowed */
220 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
221 	MGMT_STATUS_REJECTED,		/* Rejected Security */
222 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
223 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
225 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
226 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
227 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
228 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
229 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
230 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
231 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
232 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
234 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
235 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
236 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
237 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
238 	MGMT_STATUS_FAILED,		/* Unspecified Error */
239 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
240 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
241 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
242 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
243 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
244 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
245 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
247 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
249 	MGMT_STATUS_FAILED,		/* Transaction Collision */
250 	MGMT_STATUS_FAILED,		/* Reserved for future use */
251 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
252 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
253 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
254 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
255 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
256 	MGMT_STATUS_FAILED,		/* Reserved for future use */
257 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
258 	MGMT_STATUS_FAILED,		/* Reserved for future use */
259 	MGMT_STATUS_FAILED,		/* Slot Violation */
260 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
261 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
263 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
264 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
265 	MGMT_STATUS_BUSY,		/* Controller Busy */
266 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
267 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
268 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
269 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
270 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
271 };
272 
mgmt_status(u8 hci_status)273 static u8 mgmt_status(u8 hci_status)
274 {
275 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 		return mgmt_status_table[hci_status];
277 
278 	return MGMT_STATUS_FAILED;
279 }
280 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
282 			    u16 len, int flag)
283 {
284 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 			       flag, NULL);
286 }
287 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 			      u16 len, int flag, struct sock *skip_sk)
290 {
291 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 			       flag, skip_sk);
293 }
294 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 		      struct sock *skip_sk)
297 {
298 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 			       HCI_SOCK_TRUSTED, skip_sk);
300 }
301 
le_addr_type(u8 mgmt_addr_type)302 static u8 le_addr_type(u8 mgmt_addr_type)
303 {
304 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 		return ADDR_LE_DEV_PUBLIC;
306 	else
307 		return ADDR_LE_DEV_RANDOM;
308 }
309 
mgmt_fill_version_info(void * ver)310 void mgmt_fill_version_info(void *ver)
311 {
312 	struct mgmt_rp_read_version *rp = ver;
313 
314 	rp->version = MGMT_VERSION;
315 	rp->revision = cpu_to_le16(MGMT_REVISION);
316 }
317 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 			u16 data_len)
320 {
321 	struct mgmt_rp_read_version rp;
322 
323 	bt_dev_dbg(hdev, "sock %p", sk);
324 
325 	mgmt_fill_version_info(&rp);
326 
327 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 				 &rp, sizeof(rp));
329 }
330 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 			 u16 data_len)
333 {
334 	struct mgmt_rp_read_commands *rp;
335 	u16 num_commands, num_events;
336 	size_t rp_size;
337 	int i, err;
338 
339 	bt_dev_dbg(hdev, "sock %p", sk);
340 
341 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 		num_commands = ARRAY_SIZE(mgmt_commands);
343 		num_events = ARRAY_SIZE(mgmt_events);
344 	} else {
345 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
347 	}
348 
349 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350 
351 	rp = kmalloc(rp_size, GFP_KERNEL);
352 	if (!rp)
353 		return -ENOMEM;
354 
355 	rp->num_commands = cpu_to_le16(num_commands);
356 	rp->num_events = cpu_to_le16(num_events);
357 
358 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 		__le16 *opcode = rp->opcodes;
360 
361 		for (i = 0; i < num_commands; i++, opcode++)
362 			put_unaligned_le16(mgmt_commands[i], opcode);
363 
364 		for (i = 0; i < num_events; i++, opcode++)
365 			put_unaligned_le16(mgmt_events[i], opcode);
366 	} else {
367 		__le16 *opcode = rp->opcodes;
368 
369 		for (i = 0; i < num_commands; i++, opcode++)
370 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371 
372 		for (i = 0; i < num_events; i++, opcode++)
373 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
374 	}
375 
376 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
377 				rp, rp_size);
378 	kfree(rp);
379 
380 	return err;
381 }
382 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
384 			   u16 data_len)
385 {
386 	struct mgmt_rp_read_index_list *rp;
387 	struct hci_dev *d;
388 	size_t rp_len;
389 	u16 count;
390 	int err;
391 
392 	bt_dev_dbg(hdev, "sock %p", sk);
393 
394 	read_lock(&hci_dev_list_lock);
395 
396 	count = 0;
397 	list_for_each_entry(d, &hci_dev_list, list) {
398 		if (d->dev_type == HCI_PRIMARY &&
399 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 			count++;
401 	}
402 
403 	rp_len = sizeof(*rp) + (2 * count);
404 	rp = kmalloc(rp_len, GFP_ATOMIC);
405 	if (!rp) {
406 		read_unlock(&hci_dev_list_lock);
407 		return -ENOMEM;
408 	}
409 
410 	count = 0;
411 	list_for_each_entry(d, &hci_dev_list, list) {
412 		if (hci_dev_test_flag(d, HCI_SETUP) ||
413 		    hci_dev_test_flag(d, HCI_CONFIG) ||
414 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
415 			continue;
416 
417 		/* Devices marked as raw-only are neither configured
418 		 * nor unconfigured controllers.
419 		 */
420 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 			continue;
422 
423 		if (d->dev_type == HCI_PRIMARY &&
424 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 			rp->index[count++] = cpu_to_le16(d->id);
426 			bt_dev_dbg(hdev, "Added hci%u", d->id);
427 		}
428 	}
429 
430 	rp->num_controllers = cpu_to_le16(count);
431 	rp_len = sizeof(*rp) + (2 * count);
432 
433 	read_unlock(&hci_dev_list_lock);
434 
435 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
436 				0, rp, rp_len);
437 
438 	kfree(rp);
439 
440 	return err;
441 }
442 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 				  void *data, u16 data_len)
445 {
446 	struct mgmt_rp_read_unconf_index_list *rp;
447 	struct hci_dev *d;
448 	size_t rp_len;
449 	u16 count;
450 	int err;
451 
452 	bt_dev_dbg(hdev, "sock %p", sk);
453 
454 	read_lock(&hci_dev_list_lock);
455 
456 	count = 0;
457 	list_for_each_entry(d, &hci_dev_list, list) {
458 		if (d->dev_type == HCI_PRIMARY &&
459 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 			count++;
461 	}
462 
463 	rp_len = sizeof(*rp) + (2 * count);
464 	rp = kmalloc(rp_len, GFP_ATOMIC);
465 	if (!rp) {
466 		read_unlock(&hci_dev_list_lock);
467 		return -ENOMEM;
468 	}
469 
470 	count = 0;
471 	list_for_each_entry(d, &hci_dev_list, list) {
472 		if (hci_dev_test_flag(d, HCI_SETUP) ||
473 		    hci_dev_test_flag(d, HCI_CONFIG) ||
474 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
475 			continue;
476 
477 		/* Devices marked as raw-only are neither configured
478 		 * nor unconfigured controllers.
479 		 */
480 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
481 			continue;
482 
483 		if (d->dev_type == HCI_PRIMARY &&
484 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 			rp->index[count++] = cpu_to_le16(d->id);
486 			bt_dev_dbg(hdev, "Added hci%u", d->id);
487 		}
488 	}
489 
490 	rp->num_controllers = cpu_to_le16(count);
491 	rp_len = sizeof(*rp) + (2 * count);
492 
493 	read_unlock(&hci_dev_list_lock);
494 
495 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497 
498 	kfree(rp);
499 
500 	return err;
501 }
502 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 			       void *data, u16 data_len)
505 {
506 	struct mgmt_rp_read_ext_index_list *rp;
507 	struct hci_dev *d;
508 	u16 count;
509 	int err;
510 
511 	bt_dev_dbg(hdev, "sock %p", sk);
512 
513 	read_lock(&hci_dev_list_lock);
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 			count++;
519 	}
520 
521 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
522 	if (!rp) {
523 		read_unlock(&hci_dev_list_lock);
524 		return -ENOMEM;
525 	}
526 
527 	count = 0;
528 	list_for_each_entry(d, &hci_dev_list, list) {
529 		if (hci_dev_test_flag(d, HCI_SETUP) ||
530 		    hci_dev_test_flag(d, HCI_CONFIG) ||
531 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
532 			continue;
533 
534 		/* Devices marked as raw-only are neither configured
535 		 * nor unconfigured controllers.
536 		 */
537 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
538 			continue;
539 
540 		if (d->dev_type == HCI_PRIMARY) {
541 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 				rp->entry[count].type = 0x01;
543 			else
544 				rp->entry[count].type = 0x00;
545 		} else if (d->dev_type == HCI_AMP) {
546 			rp->entry[count].type = 0x02;
547 		} else {
548 			continue;
549 		}
550 
551 		rp->entry[count].bus = d->bus;
552 		rp->entry[count++].index = cpu_to_le16(d->id);
553 		bt_dev_dbg(hdev, "Added hci%u", d->id);
554 	}
555 
556 	rp->num_controllers = cpu_to_le16(count);
557 
558 	read_unlock(&hci_dev_list_lock);
559 
560 	/* If this command is called at least once, then all the
561 	 * default index and unconfigured index events are disabled
562 	 * and from now on only extended index events are used.
563 	 */
564 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567 
568 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 				struct_size(rp, entry, count));
571 
572 	kfree(rp);
573 
574 	return err;
575 }
576 
is_configured(struct hci_dev * hdev)577 static bool is_configured(struct hci_dev *hdev)
578 {
579 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
581 		return false;
582 
583 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
586 		return false;
587 
588 	return true;
589 }
590 
get_missing_options(struct hci_dev * hdev)591 static __le32 get_missing_options(struct hci_dev *hdev)
592 {
593 	u32 options = 0;
594 
595 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
598 
599 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
602 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
603 
604 	return cpu_to_le32(options);
605 }
606 
new_options(struct hci_dev * hdev,struct sock * skip)607 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 {
609 	__le32 options = get_missing_options(hdev);
610 
611 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
613 }
614 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 {
617 	__le32 options = get_missing_options(hdev);
618 
619 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 				 sizeof(options));
621 }
622 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 			    void *data, u16 data_len)
625 {
626 	struct mgmt_rp_read_config_info rp;
627 	u32 options = 0;
628 
629 	bt_dev_dbg(hdev, "sock %p", sk);
630 
631 	hci_dev_lock(hdev);
632 
633 	memset(&rp, 0, sizeof(rp));
634 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635 
636 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
638 
639 	if (hdev->set_bdaddr)
640 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
641 
642 	rp.supported_options = cpu_to_le32(options);
643 	rp.missing_options = get_missing_options(hdev);
644 
645 	hci_dev_unlock(hdev);
646 
647 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 				 &rp, sizeof(rp));
649 }
650 
get_supported_phys(struct hci_dev * hdev)651 static u32 get_supported_phys(struct hci_dev *hdev)
652 {
653 	u32 supported_phys = 0;
654 
655 	if (lmp_bredr_capable(hdev)) {
656 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657 
658 		if (hdev->features[0][0] & LMP_3SLOT)
659 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660 
661 		if (hdev->features[0][0] & LMP_5SLOT)
662 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663 
664 		if (lmp_edr_2m_capable(hdev)) {
665 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666 
667 			if (lmp_edr_3slot_capable(hdev))
668 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669 
670 			if (lmp_edr_5slot_capable(hdev))
671 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672 
673 			if (lmp_edr_3m_capable(hdev)) {
674 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675 
676 				if (lmp_edr_3slot_capable(hdev))
677 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678 
679 				if (lmp_edr_5slot_capable(hdev))
680 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
681 			}
682 		}
683 	}
684 
685 	if (lmp_le_capable(hdev)) {
686 		supported_phys |= MGMT_PHY_LE_1M_TX;
687 		supported_phys |= MGMT_PHY_LE_1M_RX;
688 
689 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 			supported_phys |= MGMT_PHY_LE_2M_TX;
691 			supported_phys |= MGMT_PHY_LE_2M_RX;
692 		}
693 
694 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 			supported_phys |= MGMT_PHY_LE_CODED_TX;
696 			supported_phys |= MGMT_PHY_LE_CODED_RX;
697 		}
698 	}
699 
700 	return supported_phys;
701 }
702 
get_selected_phys(struct hci_dev * hdev)703 static u32 get_selected_phys(struct hci_dev *hdev)
704 {
705 	u32 selected_phys = 0;
706 
707 	if (lmp_bredr_capable(hdev)) {
708 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709 
710 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712 
713 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715 
716 		if (lmp_edr_2m_capable(hdev)) {
717 			if (!(hdev->pkt_type & HCI_2DH1))
718 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 
720 			if (lmp_edr_3slot_capable(hdev) &&
721 			    !(hdev->pkt_type & HCI_2DH3))
722 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723 
724 			if (lmp_edr_5slot_capable(hdev) &&
725 			    !(hdev->pkt_type & HCI_2DH5))
726 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727 
728 			if (lmp_edr_3m_capable(hdev)) {
729 				if (!(hdev->pkt_type & HCI_3DH1))
730 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731 
732 				if (lmp_edr_3slot_capable(hdev) &&
733 				    !(hdev->pkt_type & HCI_3DH3))
734 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735 
736 				if (lmp_edr_5slot_capable(hdev) &&
737 				    !(hdev->pkt_type & HCI_3DH5))
738 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
739 			}
740 		}
741 	}
742 
743 	if (lmp_le_capable(hdev)) {
744 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 			selected_phys |= MGMT_PHY_LE_1M_TX;
746 
747 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 			selected_phys |= MGMT_PHY_LE_1M_RX;
749 
750 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 			selected_phys |= MGMT_PHY_LE_2M_TX;
752 
753 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 			selected_phys |= MGMT_PHY_LE_2M_RX;
755 
756 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 			selected_phys |= MGMT_PHY_LE_CODED_TX;
758 
759 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 			selected_phys |= MGMT_PHY_LE_CODED_RX;
761 	}
762 
763 	return selected_phys;
764 }
765 
get_configurable_phys(struct hci_dev * hdev)766 static u32 get_configurable_phys(struct hci_dev *hdev)
767 {
768 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
770 }
771 
get_supported_settings(struct hci_dev * hdev)772 static u32 get_supported_settings(struct hci_dev *hdev)
773 {
774 	u32 settings = 0;
775 
776 	settings |= MGMT_SETTING_POWERED;
777 	settings |= MGMT_SETTING_BONDABLE;
778 	settings |= MGMT_SETTING_DEBUG_KEYS;
779 	settings |= MGMT_SETTING_CONNECTABLE;
780 	settings |= MGMT_SETTING_DISCOVERABLE;
781 
782 	if (lmp_bredr_capable(hdev)) {
783 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 		settings |= MGMT_SETTING_BREDR;
786 		settings |= MGMT_SETTING_LINK_SECURITY;
787 
788 		if (lmp_ssp_capable(hdev)) {
789 			settings |= MGMT_SETTING_SSP;
790 			if (IS_ENABLED(CONFIG_BT_HS))
791 				settings |= MGMT_SETTING_HS;
792 		}
793 
794 		if (lmp_sc_capable(hdev))
795 			settings |= MGMT_SETTING_SECURE_CONN;
796 
797 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
798 			     &hdev->quirks))
799 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
800 	}
801 
802 	if (lmp_le_capable(hdev)) {
803 		settings |= MGMT_SETTING_LE;
804 		settings |= MGMT_SETTING_SECURE_CONN;
805 		settings |= MGMT_SETTING_PRIVACY;
806 		settings |= MGMT_SETTING_STATIC_ADDRESS;
807 
808 		/* When the experimental feature for LL Privacy support is
809 		 * enabled, then advertising is no longer supported.
810 		 */
811 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 			settings |= MGMT_SETTING_ADVERTISING;
813 	}
814 
815 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
816 	    hdev->set_bdaddr)
817 		settings |= MGMT_SETTING_CONFIGURATION;
818 
819 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
820 
821 	return settings;
822 }
823 
get_current_settings(struct hci_dev * hdev)824 static u32 get_current_settings(struct hci_dev *hdev)
825 {
826 	u32 settings = 0;
827 
828 	if (hdev_is_powered(hdev))
829 		settings |= MGMT_SETTING_POWERED;
830 
831 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 		settings |= MGMT_SETTING_CONNECTABLE;
833 
834 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
836 
837 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 		settings |= MGMT_SETTING_DISCOVERABLE;
839 
840 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 		settings |= MGMT_SETTING_BONDABLE;
842 
843 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 		settings |= MGMT_SETTING_BREDR;
845 
846 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 		settings |= MGMT_SETTING_LE;
848 
849 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 		settings |= MGMT_SETTING_LINK_SECURITY;
851 
852 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 		settings |= MGMT_SETTING_SSP;
854 
855 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 		settings |= MGMT_SETTING_HS;
857 
858 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 		settings |= MGMT_SETTING_ADVERTISING;
860 
861 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 		settings |= MGMT_SETTING_SECURE_CONN;
863 
864 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 		settings |= MGMT_SETTING_DEBUG_KEYS;
866 
867 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 		settings |= MGMT_SETTING_PRIVACY;
869 
870 	/* The current setting for static address has two purposes. The
871 	 * first is to indicate if the static address will be used and
872 	 * the second is to indicate if it is actually set.
873 	 *
874 	 * This means if the static address is not configured, this flag
875 	 * will never be set. If the address is configured, then if the
876 	 * address is actually used decides if the flag is set or not.
877 	 *
878 	 * For single mode LE only controllers and dual-mode controllers
879 	 * with BR/EDR disabled, the existence of the static address will
880 	 * be evaluated.
881 	 */
882 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 			settings |= MGMT_SETTING_STATIC_ADDRESS;
887 	}
888 
889 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891 
892 	return settings;
893 }
894 
pending_find(u16 opcode,struct hci_dev * hdev)895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 {
897 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
898 }
899 
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 						  struct hci_dev *hdev,
902 						  const void *data)
903 {
904 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
905 }
906 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 {
909 	struct mgmt_pending_cmd *cmd;
910 
911 	/* If there's a pending mgmt command the flags will not yet have
912 	 * their final values, so check for this first.
913 	 */
914 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
915 	if (cmd) {
916 		struct mgmt_mode *cp = cmd->param;
917 		if (cp->val == 0x01)
918 			return LE_AD_GENERAL;
919 		else if (cp->val == 0x02)
920 			return LE_AD_LIMITED;
921 	} else {
922 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 			return LE_AD_LIMITED;
924 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 			return LE_AD_GENERAL;
926 	}
927 
928 	return 0;
929 }
930 
mgmt_get_connectable(struct hci_dev * hdev)931 bool mgmt_get_connectable(struct hci_dev *hdev)
932 {
933 	struct mgmt_pending_cmd *cmd;
934 
935 	/* If there's a pending mgmt command the flag will not yet have
936 	 * it's final value, so check for this first.
937 	 */
938 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
939 	if (cmd) {
940 		struct mgmt_mode *cp = cmd->param;
941 
942 		return cp->val;
943 	}
944 
945 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
946 }
947 
service_cache_off(struct work_struct * work)948 static void service_cache_off(struct work_struct *work)
949 {
950 	struct hci_dev *hdev = container_of(work, struct hci_dev,
951 					    service_cache.work);
952 	struct hci_request req;
953 
954 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
955 		return;
956 
957 	hci_req_init(&req, hdev);
958 
959 	hci_dev_lock(hdev);
960 
961 	__hci_req_update_eir(&req);
962 	__hci_req_update_class(&req);
963 
964 	hci_dev_unlock(hdev);
965 
966 	hci_req_run(&req, NULL);
967 }
968 
rpa_expired(struct work_struct * work)969 static void rpa_expired(struct work_struct *work)
970 {
971 	struct hci_dev *hdev = container_of(work, struct hci_dev,
972 					    rpa_expired.work);
973 	struct hci_request req;
974 
975 	bt_dev_dbg(hdev, "");
976 
977 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978 
979 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
980 		return;
981 
982 	/* The generation of a new RPA and programming it into the
983 	 * controller happens in the hci_req_enable_advertising()
984 	 * function.
985 	 */
986 	hci_req_init(&req, hdev);
987 	if (ext_adv_capable(hdev))
988 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
989 	else
990 		__hci_req_enable_advertising(&req);
991 	hci_req_run(&req, NULL);
992 }
993 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 {
996 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
997 		return;
998 
999 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001 
1002 	/* Non-mgmt controlled devices get this bit set
1003 	 * implicitly so that pairing works for them, however
1004 	 * for mgmt we require user-space to explicitly enable
1005 	 * it
1006 	 */
1007 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1008 }
1009 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 				void *data, u16 data_len)
1012 {
1013 	struct mgmt_rp_read_info rp;
1014 
1015 	bt_dev_dbg(hdev, "sock %p", sk);
1016 
1017 	hci_dev_lock(hdev);
1018 
1019 	memset(&rp, 0, sizeof(rp));
1020 
1021 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1022 
1023 	rp.version = hdev->hci_ver;
1024 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025 
1026 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028 
1029 	memcpy(rp.dev_class, hdev->dev_class, 3);
1030 
1031 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033 
1034 	hci_dev_unlock(hdev);
1035 
1036 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 				 sizeof(rp));
1038 }
1039 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 {
1042 	u16 eir_len = 0;
1043 	size_t name_len;
1044 
1045 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 					  hdev->dev_class, 3);
1048 
1049 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1051 					  hdev->appearance);
1052 
1053 	name_len = strlen(hdev->dev_name);
1054 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 				  hdev->dev_name, name_len);
1056 
1057 	name_len = strlen(hdev->short_name);
1058 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 				  hdev->short_name, name_len);
1060 
1061 	return eir_len;
1062 }
1063 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 				    void *data, u16 data_len)
1066 {
1067 	char buf[512];
1068 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1069 	u16 eir_len;
1070 
1071 	bt_dev_dbg(hdev, "sock %p", sk);
1072 
1073 	memset(&buf, 0, sizeof(buf));
1074 
1075 	hci_dev_lock(hdev);
1076 
1077 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1078 
1079 	rp->version = hdev->hci_ver;
1080 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081 
1082 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1084 
1085 
1086 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 	rp->eir_len = cpu_to_le16(eir_len);
1088 
1089 	hci_dev_unlock(hdev);
1090 
1091 	/* If this command is called at least once, then the events
1092 	 * for class of device and local name changes are disabled
1093 	 * and only the new extended controller information event
1094 	 * is used.
1095 	 */
1096 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099 
1100 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 				 sizeof(*rp) + eir_len);
1102 }
1103 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 {
1106 	char buf[512];
1107 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1108 	u16 eir_len;
1109 
1110 	memset(buf, 0, sizeof(buf));
1111 
1112 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 	ev->eir_len = cpu_to_le16(eir_len);
1114 
1115 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 				  sizeof(*ev) + eir_len,
1117 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1118 }
1119 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 {
1122 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1123 
1124 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 				 sizeof(settings));
1126 }
1127 
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 {
1130 	bt_dev_dbg(hdev, "status 0x%02x", status);
1131 
1132 	if (hci_conn_count(hdev) == 0) {
1133 		cancel_delayed_work(&hdev->power_off);
1134 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 	}
1136 }
1137 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 {
1140 	struct mgmt_ev_advertising_added ev;
1141 
1142 	ev.instance = instance;
1143 
1144 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1145 }
1146 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1148 			      u8 instance)
1149 {
1150 	struct mgmt_ev_advertising_removed ev;
1151 
1152 	ev.instance = instance;
1153 
1154 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1155 }
1156 
cancel_adv_timeout(struct hci_dev * hdev)1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 {
1159 	if (hdev->adv_instance_timeout) {
1160 		hdev->adv_instance_timeout = 0;
1161 		cancel_delayed_work(&hdev->adv_instance_expire);
1162 	}
1163 }
1164 
clean_up_hci_state(struct hci_dev * hdev)1165 static int clean_up_hci_state(struct hci_dev *hdev)
1166 {
1167 	struct hci_request req;
1168 	struct hci_conn *conn;
1169 	bool discov_stopped;
1170 	int err;
1171 
1172 	hci_req_init(&req, hdev);
1173 
1174 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1176 		u8 scan = 0x00;
1177 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 	}
1179 
1180 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181 
1182 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 		__hci_req_disable_advertising(&req);
1184 
1185 	discov_stopped = hci_req_stop_discovery(&req);
1186 
1187 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 		/* 0x15 == Terminated due to Power Off */
1189 		__hci_abort_conn(&req, conn, 0x15);
1190 	}
1191 
1192 	err = hci_req_run(&req, clean_up_hci_complete);
1193 	if (!err && discov_stopped)
1194 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195 
1196 	return err;
1197 }
1198 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1200 		       u16 len)
1201 {
1202 	struct mgmt_mode *cp = data;
1203 	struct mgmt_pending_cmd *cmd;
1204 	int err;
1205 
1206 	bt_dev_dbg(hdev, "sock %p", sk);
1207 
1208 	if (cp->val != 0x00 && cp->val != 0x01)
1209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 				       MGMT_STATUS_INVALID_PARAMS);
1211 
1212 	hci_dev_lock(hdev);
1213 
1214 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 				      MGMT_STATUS_BUSY);
1217 		goto failed;
1218 	}
1219 
1220 	if (!!cp->val == hdev_is_powered(hdev)) {
1221 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 		goto failed;
1223 	}
1224 
1225 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1226 	if (!cmd) {
1227 		err = -ENOMEM;
1228 		goto failed;
1229 	}
1230 
1231 	if (cp->val) {
1232 		queue_work(hdev->req_workqueue, &hdev->power_on);
1233 		err = 0;
1234 	} else {
1235 		/* Disconnect connections, stop scans, etc */
1236 		err = clean_up_hci_state(hdev);
1237 		if (!err)
1238 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 					   HCI_POWER_OFF_TIMEOUT);
1240 
1241 		/* ENODATA means there were no HCI commands queued */
1242 		if (err == -ENODATA) {
1243 			cancel_delayed_work(&hdev->power_off);
1244 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1245 			err = 0;
1246 		}
1247 	}
1248 
1249 failed:
1250 	hci_dev_unlock(hdev);
1251 	return err;
1252 }
1253 
new_settings(struct hci_dev * hdev,struct sock * skip)1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1257 
1258 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1260 }
1261 
mgmt_new_settings(struct hci_dev * hdev)1262 int mgmt_new_settings(struct hci_dev *hdev)
1263 {
1264 	return new_settings(hdev, NULL);
1265 }
1266 
1267 struct cmd_lookup {
1268 	struct sock *sk;
1269 	struct hci_dev *hdev;
1270 	u8 mgmt_status;
1271 };
1272 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 {
1275 	struct cmd_lookup *match = data;
1276 
1277 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278 
1279 	list_del(&cmd->list);
1280 
1281 	if (match->sk == NULL) {
1282 		match->sk = cmd->sk;
1283 		sock_hold(match->sk);
1284 	}
1285 
1286 	mgmt_pending_free(cmd);
1287 }
1288 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 {
1291 	u8 *status = data;
1292 
1293 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 	mgmt_pending_remove(cmd);
1295 }
1296 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 {
1299 	if (cmd->cmd_complete) {
1300 		u8 *status = data;
1301 
1302 		cmd->cmd_complete(cmd, *status);
1303 		mgmt_pending_remove(cmd);
1304 
1305 		return;
1306 	}
1307 
1308 	cmd_status_rsp(cmd, data);
1309 }
1310 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 {
1313 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 				 cmd->param, cmd->param_len);
1315 }
1316 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 {
1319 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 				 cmd->param, sizeof(struct mgmt_addr_info));
1321 }
1322 
mgmt_bredr_support(struct hci_dev * hdev)1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1324 {
1325 	if (!lmp_bredr_capable(hdev))
1326 		return MGMT_STATUS_NOT_SUPPORTED;
1327 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 		return MGMT_STATUS_REJECTED;
1329 	else
1330 		return MGMT_STATUS_SUCCESS;
1331 }
1332 
mgmt_le_support(struct hci_dev * hdev)1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1334 {
1335 	if (!lmp_le_capable(hdev))
1336 		return MGMT_STATUS_NOT_SUPPORTED;
1337 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 		return MGMT_STATUS_REJECTED;
1339 	else
1340 		return MGMT_STATUS_SUCCESS;
1341 }
1342 
mgmt_set_discoverable_complete(struct hci_dev * hdev,u8 status)1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1344 {
1345 	struct mgmt_pending_cmd *cmd;
1346 
1347 	bt_dev_dbg(hdev, "status 0x%02x", status);
1348 
1349 	hci_dev_lock(hdev);
1350 
1351 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1352 	if (!cmd)
1353 		goto unlock;
1354 
1355 	if (status) {
1356 		u8 mgmt_err = mgmt_status(status);
1357 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1359 		goto remove_cmd;
1360 	}
1361 
1362 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 	    hdev->discov_timeout > 0) {
1364 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1366 	}
1367 
1368 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 	new_settings(hdev, cmd->sk);
1370 
1371 remove_cmd:
1372 	mgmt_pending_remove(cmd);
1373 
1374 unlock:
1375 	hci_dev_unlock(hdev);
1376 }
1377 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1379 			    u16 len)
1380 {
1381 	struct mgmt_cp_set_discoverable *cp = data;
1382 	struct mgmt_pending_cmd *cmd;
1383 	u16 timeout;
1384 	int err;
1385 
1386 	bt_dev_dbg(hdev, "sock %p", sk);
1387 
1388 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 				       MGMT_STATUS_REJECTED);
1392 
1393 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 				       MGMT_STATUS_INVALID_PARAMS);
1396 
1397 	timeout = __le16_to_cpu(cp->timeout);
1398 
1399 	/* Disabling discoverable requires that no timeout is set,
1400 	 * and enabling limited discoverable requires a timeout.
1401 	 */
1402 	if ((cp->val == 0x00 && timeout > 0) ||
1403 	    (cp->val == 0x02 && timeout == 0))
1404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 				       MGMT_STATUS_INVALID_PARAMS);
1406 
1407 	hci_dev_lock(hdev);
1408 
1409 	if (!hdev_is_powered(hdev) && timeout > 0) {
1410 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 				      MGMT_STATUS_NOT_POWERED);
1412 		goto failed;
1413 	}
1414 
1415 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 				      MGMT_STATUS_BUSY);
1419 		goto failed;
1420 	}
1421 
1422 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 				      MGMT_STATUS_REJECTED);
1425 		goto failed;
1426 	}
1427 
1428 	if (hdev->advertising_paused) {
1429 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 				      MGMT_STATUS_BUSY);
1431 		goto failed;
1432 	}
1433 
1434 	if (!hdev_is_powered(hdev)) {
1435 		bool changed = false;
1436 
1437 		/* Setting limited discoverable when powered off is
1438 		 * not a valid operation since it requires a timeout
1439 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 		 */
1441 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1443 			changed = true;
1444 		}
1445 
1446 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1447 		if (err < 0)
1448 			goto failed;
1449 
1450 		if (changed)
1451 			err = new_settings(hdev, sk);
1452 
1453 		goto failed;
1454 	}
1455 
1456 	/* If the current mode is the same, then just update the timeout
1457 	 * value with the new value. And if only the timeout gets updated,
1458 	 * then no need for any HCI transactions.
1459 	 */
1460 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 						   HCI_LIMITED_DISCOVERABLE)) {
1463 		cancel_delayed_work(&hdev->discov_off);
1464 		hdev->discov_timeout = timeout;
1465 
1466 		if (cp->val && hdev->discov_timeout > 0) {
1467 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 			queue_delayed_work(hdev->req_workqueue,
1469 					   &hdev->discov_off, to);
1470 		}
1471 
1472 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1473 		goto failed;
1474 	}
1475 
1476 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1477 	if (!cmd) {
1478 		err = -ENOMEM;
1479 		goto failed;
1480 	}
1481 
1482 	/* Cancel any potential discoverable timeout that might be
1483 	 * still active and store new timeout value. The arming of
1484 	 * the timeout happens in the complete handler.
1485 	 */
1486 	cancel_delayed_work(&hdev->discov_off);
1487 	hdev->discov_timeout = timeout;
1488 
1489 	if (cp->val)
1490 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1491 	else
1492 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1493 
1494 	/* Limited discoverable mode */
1495 	if (cp->val == 0x02)
1496 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1497 	else
1498 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499 
1500 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1501 	err = 0;
1502 
1503 failed:
1504 	hci_dev_unlock(hdev);
1505 	return err;
1506 }
1507 
mgmt_set_connectable_complete(struct hci_dev * hdev,u8 status)1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 {
1510 	struct mgmt_pending_cmd *cmd;
1511 
1512 	bt_dev_dbg(hdev, "status 0x%02x", status);
1513 
1514 	hci_dev_lock(hdev);
1515 
1516 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1517 	if (!cmd)
1518 		goto unlock;
1519 
1520 	if (status) {
1521 		u8 mgmt_err = mgmt_status(status);
1522 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1523 		goto remove_cmd;
1524 	}
1525 
1526 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 	new_settings(hdev, cmd->sk);
1528 
1529 remove_cmd:
1530 	mgmt_pending_remove(cmd);
1531 
1532 unlock:
1533 	hci_dev_unlock(hdev);
1534 }
1535 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 					   struct sock *sk, u8 val)
1538 {
1539 	bool changed = false;
1540 	int err;
1541 
1542 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1543 		changed = true;
1544 
1545 	if (val) {
1546 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1547 	} else {
1548 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1550 	}
1551 
1552 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1553 	if (err < 0)
1554 		return err;
1555 
1556 	if (changed) {
1557 		hci_req_update_scan(hdev);
1558 		hci_update_background_scan(hdev);
1559 		return new_settings(hdev, sk);
1560 	}
1561 
1562 	return 0;
1563 }
1564 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1566 			   u16 len)
1567 {
1568 	struct mgmt_mode *cp = data;
1569 	struct mgmt_pending_cmd *cmd;
1570 	int err;
1571 
1572 	bt_dev_dbg(hdev, "sock %p", sk);
1573 
1574 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 				       MGMT_STATUS_REJECTED);
1578 
1579 	if (cp->val != 0x00 && cp->val != 0x01)
1580 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 				       MGMT_STATUS_INVALID_PARAMS);
1582 
1583 	hci_dev_lock(hdev);
1584 
1585 	if (!hdev_is_powered(hdev)) {
1586 		err = set_connectable_update_settings(hdev, sk, cp->val);
1587 		goto failed;
1588 	}
1589 
1590 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 				      MGMT_STATUS_BUSY);
1594 		goto failed;
1595 	}
1596 
1597 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1598 	if (!cmd) {
1599 		err = -ENOMEM;
1600 		goto failed;
1601 	}
1602 
1603 	if (cp->val) {
1604 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 	} else {
1606 		if (hdev->discov_timeout > 0)
1607 			cancel_delayed_work(&hdev->discov_off);
1608 
1609 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1612 	}
1613 
1614 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1615 	err = 0;
1616 
1617 failed:
1618 	hci_dev_unlock(hdev);
1619 	return err;
1620 }
1621 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1623 			u16 len)
1624 {
1625 	struct mgmt_mode *cp = data;
1626 	bool changed;
1627 	int err;
1628 
1629 	bt_dev_dbg(hdev, "sock %p", sk);
1630 
1631 	if (cp->val != 0x00 && cp->val != 0x01)
1632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 				       MGMT_STATUS_INVALID_PARAMS);
1634 
1635 	hci_dev_lock(hdev);
1636 
1637 	if (cp->val)
1638 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1639 	else
1640 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1641 
1642 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1643 	if (err < 0)
1644 		goto unlock;
1645 
1646 	if (changed) {
1647 		/* In limited privacy mode the change of bondable mode
1648 		 * may affect the local advertising address.
1649 		 */
1650 		if (hdev_is_powered(hdev) &&
1651 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 			queue_work(hdev->req_workqueue,
1655 				   &hdev->discoverable_update);
1656 
1657 		err = new_settings(hdev, sk);
1658 	}
1659 
1660 unlock:
1661 	hci_dev_unlock(hdev);
1662 	return err;
1663 }
1664 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1666 			     u16 len)
1667 {
1668 	struct mgmt_mode *cp = data;
1669 	struct mgmt_pending_cmd *cmd;
1670 	u8 val, status;
1671 	int err;
1672 
1673 	bt_dev_dbg(hdev, "sock %p", sk);
1674 
1675 	status = mgmt_bredr_support(hdev);
1676 	if (status)
1677 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1678 				       status);
1679 
1680 	if (cp->val != 0x00 && cp->val != 0x01)
1681 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 				       MGMT_STATUS_INVALID_PARAMS);
1683 
1684 	hci_dev_lock(hdev);
1685 
1686 	if (!hdev_is_powered(hdev)) {
1687 		bool changed = false;
1688 
1689 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1691 			changed = true;
1692 		}
1693 
1694 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1695 		if (err < 0)
1696 			goto failed;
1697 
1698 		if (changed)
1699 			err = new_settings(hdev, sk);
1700 
1701 		goto failed;
1702 	}
1703 
1704 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1706 				      MGMT_STATUS_BUSY);
1707 		goto failed;
1708 	}
1709 
1710 	val = !!cp->val;
1711 
1712 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1714 		goto failed;
1715 	}
1716 
1717 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1718 	if (!cmd) {
1719 		err = -ENOMEM;
1720 		goto failed;
1721 	}
1722 
1723 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1724 	if (err < 0) {
1725 		mgmt_pending_remove(cmd);
1726 		goto failed;
1727 	}
1728 
1729 failed:
1730 	hci_dev_unlock(hdev);
1731 	return err;
1732 }
1733 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1735 {
1736 	struct mgmt_mode *cp = data;
1737 	struct mgmt_pending_cmd *cmd;
1738 	u8 status;
1739 	int err;
1740 
1741 	bt_dev_dbg(hdev, "sock %p", sk);
1742 
1743 	status = mgmt_bredr_support(hdev);
1744 	if (status)
1745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1746 
1747 	if (!lmp_ssp_capable(hdev))
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 				       MGMT_STATUS_NOT_SUPPORTED);
1750 
1751 	if (cp->val != 0x00 && cp->val != 0x01)
1752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 				       MGMT_STATUS_INVALID_PARAMS);
1754 
1755 	hci_dev_lock(hdev);
1756 
1757 	if (!hdev_is_powered(hdev)) {
1758 		bool changed;
1759 
1760 		if (cp->val) {
1761 			changed = !hci_dev_test_and_set_flag(hdev,
1762 							     HCI_SSP_ENABLED);
1763 		} else {
1764 			changed = hci_dev_test_and_clear_flag(hdev,
1765 							      HCI_SSP_ENABLED);
1766 			if (!changed)
1767 				changed = hci_dev_test_and_clear_flag(hdev,
1768 								      HCI_HS_ENABLED);
1769 			else
1770 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1771 		}
1772 
1773 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1774 		if (err < 0)
1775 			goto failed;
1776 
1777 		if (changed)
1778 			err = new_settings(hdev, sk);
1779 
1780 		goto failed;
1781 	}
1782 
1783 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1785 				      MGMT_STATUS_BUSY);
1786 		goto failed;
1787 	}
1788 
1789 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1791 		goto failed;
1792 	}
1793 
1794 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1795 	if (!cmd) {
1796 		err = -ENOMEM;
1797 		goto failed;
1798 	}
1799 
1800 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 			     sizeof(cp->val), &cp->val);
1803 
1804 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1805 	if (err < 0) {
1806 		mgmt_pending_remove(cmd);
1807 		goto failed;
1808 	}
1809 
1810 failed:
1811 	hci_dev_unlock(hdev);
1812 	return err;
1813 }
1814 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1816 {
1817 	struct mgmt_mode *cp = data;
1818 	bool changed;
1819 	u8 status;
1820 	int err;
1821 
1822 	bt_dev_dbg(hdev, "sock %p", sk);
1823 
1824 	if (!IS_ENABLED(CONFIG_BT_HS))
1825 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 				       MGMT_STATUS_NOT_SUPPORTED);
1827 
1828 	status = mgmt_bredr_support(hdev);
1829 	if (status)
1830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1831 
1832 	if (!lmp_ssp_capable(hdev))
1833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 				       MGMT_STATUS_NOT_SUPPORTED);
1835 
1836 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 				       MGMT_STATUS_REJECTED);
1839 
1840 	if (cp->val != 0x00 && cp->val != 0x01)
1841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 				       MGMT_STATUS_INVALID_PARAMS);
1843 
1844 	hci_dev_lock(hdev);
1845 
1846 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 				      MGMT_STATUS_BUSY);
1849 		goto unlock;
1850 	}
1851 
1852 	if (cp->val) {
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1854 	} else {
1855 		if (hdev_is_powered(hdev)) {
1856 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 					      MGMT_STATUS_REJECTED);
1858 			goto unlock;
1859 		}
1860 
1861 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1862 	}
1863 
1864 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1865 	if (err < 0)
1866 		goto unlock;
1867 
1868 	if (changed)
1869 		err = new_settings(hdev, sk);
1870 
1871 unlock:
1872 	hci_dev_unlock(hdev);
1873 	return err;
1874 }
1875 
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1877 {
1878 	struct cmd_lookup match = { NULL, hdev };
1879 
1880 	hci_dev_lock(hdev);
1881 
1882 	if (status) {
1883 		u8 mgmt_err = mgmt_status(status);
1884 
1885 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1886 				     &mgmt_err);
1887 		goto unlock;
1888 	}
1889 
1890 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1891 
1892 	new_settings(hdev, match.sk);
1893 
1894 	if (match.sk)
1895 		sock_put(match.sk);
1896 
1897 	/* Make sure the controller has a good default for
1898 	 * advertising data. Restrict the update to when LE
1899 	 * has actually been enabled. During power on, the
1900 	 * update in powered_update_hci will take care of it.
1901 	 */
1902 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 		struct hci_request req;
1904 		hci_req_init(&req, hdev);
1905 		if (ext_adv_capable(hdev)) {
1906 			int err;
1907 
1908 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1909 			if (!err)
1910 				__hci_req_update_scan_rsp_data(&req, 0x00);
1911 		} else {
1912 			__hci_req_update_adv_data(&req, 0x00);
1913 			__hci_req_update_scan_rsp_data(&req, 0x00);
1914 		}
1915 		hci_req_run(&req, NULL);
1916 		hci_update_background_scan(hdev);
1917 	}
1918 
1919 unlock:
1920 	hci_dev_unlock(hdev);
1921 }
1922 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 {
1925 	struct mgmt_mode *cp = data;
1926 	struct hci_cp_write_le_host_supported hci_cp;
1927 	struct mgmt_pending_cmd *cmd;
1928 	struct hci_request req;
1929 	int err;
1930 	u8 val, enabled;
1931 
1932 	bt_dev_dbg(hdev, "sock %p", sk);
1933 
1934 	if (!lmp_le_capable(hdev))
1935 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 				       MGMT_STATUS_NOT_SUPPORTED);
1937 
1938 	if (cp->val != 0x00 && cp->val != 0x01)
1939 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 				       MGMT_STATUS_INVALID_PARAMS);
1941 
1942 	/* Bluetooth single mode LE only controllers or dual-mode
1943 	 * controllers configured as LE only devices, do not allow
1944 	 * switching LE off. These have either LE enabled explicitly
1945 	 * or BR/EDR has been previously switched off.
1946 	 *
1947 	 * When trying to enable an already enabled LE, then gracefully
1948 	 * send a positive response. Trying to disable it however will
1949 	 * result into rejection.
1950 	 */
1951 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 		if (cp->val == 0x01)
1953 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1954 
1955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 				       MGMT_STATUS_REJECTED);
1957 	}
1958 
1959 	hci_dev_lock(hdev);
1960 
1961 	val = !!cp->val;
1962 	enabled = lmp_host_le_capable(hdev);
1963 
1964 	if (!val)
1965 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1966 
1967 	if (!hdev_is_powered(hdev) || val == enabled) {
1968 		bool changed = false;
1969 
1970 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1972 			changed = true;
1973 		}
1974 
1975 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1977 			changed = true;
1978 		}
1979 
1980 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1981 		if (err < 0)
1982 			goto unlock;
1983 
1984 		if (changed)
1985 			err = new_settings(hdev, sk);
1986 
1987 		goto unlock;
1988 	}
1989 
1990 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 				      MGMT_STATUS_BUSY);
1994 		goto unlock;
1995 	}
1996 
1997 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1998 	if (!cmd) {
1999 		err = -ENOMEM;
2000 		goto unlock;
2001 	}
2002 
2003 	hci_req_init(&req, hdev);
2004 
2005 	memset(&hci_cp, 0, sizeof(hci_cp));
2006 
2007 	if (val) {
2008 		hci_cp.le = val;
2009 		hci_cp.simul = 0x00;
2010 	} else {
2011 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 			__hci_req_disable_advertising(&req);
2013 
2014 		if (ext_adv_capable(hdev))
2015 			__hci_req_clear_ext_adv_sets(&req);
2016 	}
2017 
2018 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2019 		    &hci_cp);
2020 
2021 	err = hci_req_run(&req, le_enable_complete);
2022 	if (err < 0)
2023 		mgmt_pending_remove(cmd);
2024 
2025 unlock:
2026 	hci_dev_unlock(hdev);
2027 	return err;
2028 }
2029 
2030 /* This is a helper function to test for pending mgmt commands that can
2031  * cause CoD or EIR HCI commands. We can only allow one such pending
2032  * mgmt command at a time since otherwise we cannot easily track what
2033  * the current values are, will be, and based on that calculate if a new
2034  * HCI command needs to be sent and if yes with what value.
2035  */
pending_eir_or_class(struct hci_dev * hdev)2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2037 {
2038 	struct mgmt_pending_cmd *cmd;
2039 
2040 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 		switch (cmd->opcode) {
2042 		case MGMT_OP_ADD_UUID:
2043 		case MGMT_OP_REMOVE_UUID:
2044 		case MGMT_OP_SET_DEV_CLASS:
2045 		case MGMT_OP_SET_POWERED:
2046 			return true;
2047 		}
2048 	}
2049 
2050 	return false;
2051 }
2052 
2053 static const u8 bluetooth_base_uuid[] = {
2054 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2056 };
2057 
get_uuid_size(const u8 * uuid)2058 static u8 get_uuid_size(const u8 *uuid)
2059 {
2060 	u32 val;
2061 
2062 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2063 		return 128;
2064 
2065 	val = get_unaligned_le32(&uuid[12]);
2066 	if (val > 0xffff)
2067 		return 32;
2068 
2069 	return 16;
2070 }
2071 
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2073 {
2074 	struct mgmt_pending_cmd *cmd;
2075 
2076 	hci_dev_lock(hdev);
2077 
2078 	cmd = pending_find(mgmt_op, hdev);
2079 	if (!cmd)
2080 		goto unlock;
2081 
2082 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 			  mgmt_status(status), hdev->dev_class, 3);
2084 
2085 	mgmt_pending_remove(cmd);
2086 
2087 unlock:
2088 	hci_dev_unlock(hdev);
2089 }
2090 
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2092 {
2093 	bt_dev_dbg(hdev, "status 0x%02x", status);
2094 
2095 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2096 }
2097 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2099 {
2100 	struct mgmt_cp_add_uuid *cp = data;
2101 	struct mgmt_pending_cmd *cmd;
2102 	struct hci_request req;
2103 	struct bt_uuid *uuid;
2104 	int err;
2105 
2106 	bt_dev_dbg(hdev, "sock %p", sk);
2107 
2108 	hci_dev_lock(hdev);
2109 
2110 	if (pending_eir_or_class(hdev)) {
2111 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2112 				      MGMT_STATUS_BUSY);
2113 		goto failed;
2114 	}
2115 
2116 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2117 	if (!uuid) {
2118 		err = -ENOMEM;
2119 		goto failed;
2120 	}
2121 
2122 	memcpy(uuid->uuid, cp->uuid, 16);
2123 	uuid->svc_hint = cp->svc_hint;
2124 	uuid->size = get_uuid_size(cp->uuid);
2125 
2126 	list_add_tail(&uuid->list, &hdev->uuids);
2127 
2128 	hci_req_init(&req, hdev);
2129 
2130 	__hci_req_update_class(&req);
2131 	__hci_req_update_eir(&req);
2132 
2133 	err = hci_req_run(&req, add_uuid_complete);
2134 	if (err < 0) {
2135 		if (err != -ENODATA)
2136 			goto failed;
2137 
2138 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 					hdev->dev_class, 3);
2140 		goto failed;
2141 	}
2142 
2143 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2144 	if (!cmd) {
2145 		err = -ENOMEM;
2146 		goto failed;
2147 	}
2148 
2149 	err = 0;
2150 
2151 failed:
2152 	hci_dev_unlock(hdev);
2153 	return err;
2154 }
2155 
enable_service_cache(struct hci_dev * hdev)2156 static bool enable_service_cache(struct hci_dev *hdev)
2157 {
2158 	if (!hdev_is_powered(hdev))
2159 		return false;
2160 
2161 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2163 				   CACHE_TIMEOUT);
2164 		return true;
2165 	}
2166 
2167 	return false;
2168 }
2169 
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2171 {
2172 	bt_dev_dbg(hdev, "status 0x%02x", status);
2173 
2174 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2175 }
2176 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2178 		       u16 len)
2179 {
2180 	struct mgmt_cp_remove_uuid *cp = data;
2181 	struct mgmt_pending_cmd *cmd;
2182 	struct bt_uuid *match, *tmp;
2183 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 	struct hci_request req;
2185 	int err, found;
2186 
2187 	bt_dev_dbg(hdev, "sock %p", sk);
2188 
2189 	hci_dev_lock(hdev);
2190 
2191 	if (pending_eir_or_class(hdev)) {
2192 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2193 				      MGMT_STATUS_BUSY);
2194 		goto unlock;
2195 	}
2196 
2197 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 		hci_uuids_clear(hdev);
2199 
2200 		if (enable_service_cache(hdev)) {
2201 			err = mgmt_cmd_complete(sk, hdev->id,
2202 						MGMT_OP_REMOVE_UUID,
2203 						0, hdev->dev_class, 3);
2204 			goto unlock;
2205 		}
2206 
2207 		goto update_class;
2208 	}
2209 
2210 	found = 0;
2211 
2212 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2214 			continue;
2215 
2216 		list_del(&match->list);
2217 		kfree(match);
2218 		found++;
2219 	}
2220 
2221 	if (found == 0) {
2222 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 				      MGMT_STATUS_INVALID_PARAMS);
2224 		goto unlock;
2225 	}
2226 
2227 update_class:
2228 	hci_req_init(&req, hdev);
2229 
2230 	__hci_req_update_class(&req);
2231 	__hci_req_update_eir(&req);
2232 
2233 	err = hci_req_run(&req, remove_uuid_complete);
2234 	if (err < 0) {
2235 		if (err != -ENODATA)
2236 			goto unlock;
2237 
2238 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 					hdev->dev_class, 3);
2240 		goto unlock;
2241 	}
2242 
2243 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2244 	if (!cmd) {
2245 		err = -ENOMEM;
2246 		goto unlock;
2247 	}
2248 
2249 	err = 0;
2250 
2251 unlock:
2252 	hci_dev_unlock(hdev);
2253 	return err;
2254 }
2255 
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2257 {
2258 	bt_dev_dbg(hdev, "status 0x%02x", status);
2259 
2260 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2261 }
2262 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2264 			 u16 len)
2265 {
2266 	struct mgmt_cp_set_dev_class *cp = data;
2267 	struct mgmt_pending_cmd *cmd;
2268 	struct hci_request req;
2269 	int err;
2270 
2271 	bt_dev_dbg(hdev, "sock %p", sk);
2272 
2273 	if (!lmp_bredr_capable(hdev))
2274 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 				       MGMT_STATUS_NOT_SUPPORTED);
2276 
2277 	hci_dev_lock(hdev);
2278 
2279 	if (pending_eir_or_class(hdev)) {
2280 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 				      MGMT_STATUS_BUSY);
2282 		goto unlock;
2283 	}
2284 
2285 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 				      MGMT_STATUS_INVALID_PARAMS);
2288 		goto unlock;
2289 	}
2290 
2291 	hdev->major_class = cp->major;
2292 	hdev->minor_class = cp->minor;
2293 
2294 	if (!hdev_is_powered(hdev)) {
2295 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 					hdev->dev_class, 3);
2297 		goto unlock;
2298 	}
2299 
2300 	hci_req_init(&req, hdev);
2301 
2302 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 		hci_dev_unlock(hdev);
2304 		cancel_delayed_work_sync(&hdev->service_cache);
2305 		hci_dev_lock(hdev);
2306 		__hci_req_update_eir(&req);
2307 	}
2308 
2309 	__hci_req_update_class(&req);
2310 
2311 	err = hci_req_run(&req, set_class_complete);
2312 	if (err < 0) {
2313 		if (err != -ENODATA)
2314 			goto unlock;
2315 
2316 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 					hdev->dev_class, 3);
2318 		goto unlock;
2319 	}
2320 
2321 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2322 	if (!cmd) {
2323 		err = -ENOMEM;
2324 		goto unlock;
2325 	}
2326 
2327 	err = 0;
2328 
2329 unlock:
2330 	hci_dev_unlock(hdev);
2331 	return err;
2332 }
2333 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2335 			  u16 len)
2336 {
2337 	struct mgmt_cp_load_link_keys *cp = data;
2338 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 				   sizeof(struct mgmt_link_key_info));
2340 	u16 key_count, expected_len;
2341 	bool changed;
2342 	int i;
2343 
2344 	bt_dev_dbg(hdev, "sock %p", sk);
2345 
2346 	if (!lmp_bredr_capable(hdev))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 				       MGMT_STATUS_NOT_SUPPORTED);
2349 
2350 	key_count = __le16_to_cpu(cp->key_count);
2351 	if (key_count > max_key_count) {
2352 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2353 			   key_count);
2354 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 				       MGMT_STATUS_INVALID_PARAMS);
2356 	}
2357 
2358 	expected_len = struct_size(cp, keys, key_count);
2359 	if (expected_len != len) {
2360 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2361 			   expected_len, len);
2362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 				       MGMT_STATUS_INVALID_PARAMS);
2364 	}
2365 
2366 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 				       MGMT_STATUS_INVALID_PARAMS);
2369 
2370 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2371 		   key_count);
2372 
2373 	for (i = 0; i < key_count; i++) {
2374 		struct mgmt_link_key_info *key = &cp->keys[i];
2375 
2376 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2377 			return mgmt_cmd_status(sk, hdev->id,
2378 					       MGMT_OP_LOAD_LINK_KEYS,
2379 					       MGMT_STATUS_INVALID_PARAMS);
2380 	}
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	hci_link_keys_clear(hdev);
2385 
2386 	if (cp->debug_keys)
2387 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2388 	else
2389 		changed = hci_dev_test_and_clear_flag(hdev,
2390 						      HCI_KEEP_DEBUG_KEYS);
2391 
2392 	if (changed)
2393 		new_settings(hdev, NULL);
2394 
2395 	for (i = 0; i < key_count; i++) {
2396 		struct mgmt_link_key_info *key = &cp->keys[i];
2397 
2398 		if (hci_is_blocked_key(hdev,
2399 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2400 				       key->val)) {
2401 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2402 				    &key->addr.bdaddr);
2403 			continue;
2404 		}
2405 
2406 		/* Always ignore debug keys and require a new pairing if
2407 		 * the user wants to use them.
2408 		 */
2409 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2410 			continue;
2411 
2412 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2413 				 key->type, key->pin_len, NULL);
2414 	}
2415 
2416 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2417 
2418 	hci_dev_unlock(hdev);
2419 
2420 	return 0;
2421 }
2422 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2423 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2424 			   u8 addr_type, struct sock *skip_sk)
2425 {
2426 	struct mgmt_ev_device_unpaired ev;
2427 
2428 	bacpy(&ev.addr.bdaddr, bdaddr);
2429 	ev.addr.type = addr_type;
2430 
2431 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2432 			  skip_sk);
2433 }
2434 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2435 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2436 			 u16 len)
2437 {
2438 	struct mgmt_cp_unpair_device *cp = data;
2439 	struct mgmt_rp_unpair_device rp;
2440 	struct hci_conn_params *params;
2441 	struct mgmt_pending_cmd *cmd;
2442 	struct hci_conn *conn;
2443 	u8 addr_type;
2444 	int err;
2445 
2446 	memset(&rp, 0, sizeof(rp));
2447 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2448 	rp.addr.type = cp->addr.type;
2449 
2450 	if (!bdaddr_type_is_valid(cp->addr.type))
2451 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2452 					 MGMT_STATUS_INVALID_PARAMS,
2453 					 &rp, sizeof(rp));
2454 
2455 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2456 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2457 					 MGMT_STATUS_INVALID_PARAMS,
2458 					 &rp, sizeof(rp));
2459 
2460 	hci_dev_lock(hdev);
2461 
2462 	if (!hdev_is_powered(hdev)) {
2463 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 					MGMT_STATUS_NOT_POWERED, &rp,
2465 					sizeof(rp));
2466 		goto unlock;
2467 	}
2468 
2469 	if (cp->addr.type == BDADDR_BREDR) {
2470 		/* If disconnection is requested, then look up the
2471 		 * connection. If the remote device is connected, it
2472 		 * will be later used to terminate the link.
2473 		 *
2474 		 * Setting it to NULL explicitly will cause no
2475 		 * termination of the link.
2476 		 */
2477 		if (cp->disconnect)
2478 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2479 						       &cp->addr.bdaddr);
2480 		else
2481 			conn = NULL;
2482 
2483 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2484 		if (err < 0) {
2485 			err = mgmt_cmd_complete(sk, hdev->id,
2486 						MGMT_OP_UNPAIR_DEVICE,
2487 						MGMT_STATUS_NOT_PAIRED, &rp,
2488 						sizeof(rp));
2489 			goto unlock;
2490 		}
2491 
2492 		goto done;
2493 	}
2494 
2495 	/* LE address type */
2496 	addr_type = le_addr_type(cp->addr.type);
2497 
2498 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2499 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2500 	if (err < 0) {
2501 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 					MGMT_STATUS_NOT_PAIRED, &rp,
2503 					sizeof(rp));
2504 		goto unlock;
2505 	}
2506 
2507 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2508 	if (!conn) {
2509 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2510 		goto done;
2511 	}
2512 
2513 
2514 	/* Defer clearing up the connection parameters until closing to
2515 	 * give a chance of keeping them if a repairing happens.
2516 	 */
2517 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2518 
2519 	/* Disable auto-connection parameters if present */
2520 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2521 	if (params) {
2522 		if (params->explicit_connect)
2523 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2524 		else
2525 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2526 	}
2527 
2528 	/* If disconnection is not requested, then clear the connection
2529 	 * variable so that the link is not terminated.
2530 	 */
2531 	if (!cp->disconnect)
2532 		conn = NULL;
2533 
2534 done:
2535 	/* If the connection variable is set, then termination of the
2536 	 * link is requested.
2537 	 */
2538 	if (!conn) {
2539 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2540 					&rp, sizeof(rp));
2541 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2542 		goto unlock;
2543 	}
2544 
2545 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2546 			       sizeof(*cp));
2547 	if (!cmd) {
2548 		err = -ENOMEM;
2549 		goto unlock;
2550 	}
2551 
2552 	cmd->cmd_complete = addr_cmd_complete;
2553 
2554 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2555 	if (err < 0)
2556 		mgmt_pending_remove(cmd);
2557 
2558 unlock:
2559 	hci_dev_unlock(hdev);
2560 	return err;
2561 }
2562 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2563 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2564 		      u16 len)
2565 {
2566 	struct mgmt_cp_disconnect *cp = data;
2567 	struct mgmt_rp_disconnect rp;
2568 	struct mgmt_pending_cmd *cmd;
2569 	struct hci_conn *conn;
2570 	int err;
2571 
2572 	bt_dev_dbg(hdev, "sock %p", sk);
2573 
2574 	memset(&rp, 0, sizeof(rp));
2575 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2576 	rp.addr.type = cp->addr.type;
2577 
2578 	if (!bdaddr_type_is_valid(cp->addr.type))
2579 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2580 					 MGMT_STATUS_INVALID_PARAMS,
2581 					 &rp, sizeof(rp));
2582 
2583 	hci_dev_lock(hdev);
2584 
2585 	if (!test_bit(HCI_UP, &hdev->flags)) {
2586 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2587 					MGMT_STATUS_NOT_POWERED, &rp,
2588 					sizeof(rp));
2589 		goto failed;
2590 	}
2591 
2592 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2593 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2594 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2595 		goto failed;
2596 	}
2597 
2598 	if (cp->addr.type == BDADDR_BREDR)
2599 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2600 					       &cp->addr.bdaddr);
2601 	else
2602 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2603 					       le_addr_type(cp->addr.type));
2604 
2605 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2606 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2607 					MGMT_STATUS_NOT_CONNECTED, &rp,
2608 					sizeof(rp));
2609 		goto failed;
2610 	}
2611 
2612 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2613 	if (!cmd) {
2614 		err = -ENOMEM;
2615 		goto failed;
2616 	}
2617 
2618 	cmd->cmd_complete = generic_cmd_complete;
2619 
2620 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2621 	if (err < 0)
2622 		mgmt_pending_remove(cmd);
2623 
2624 failed:
2625 	hci_dev_unlock(hdev);
2626 	return err;
2627 }
2628 
link_to_bdaddr(u8 link_type,u8 addr_type)2629 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2630 {
2631 	switch (link_type) {
2632 	case LE_LINK:
2633 		switch (addr_type) {
2634 		case ADDR_LE_DEV_PUBLIC:
2635 			return BDADDR_LE_PUBLIC;
2636 
2637 		default:
2638 			/* Fallback to LE Random address type */
2639 			return BDADDR_LE_RANDOM;
2640 		}
2641 
2642 	default:
2643 		/* Fallback to BR/EDR type */
2644 		return BDADDR_BREDR;
2645 	}
2646 }
2647 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2648 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2649 			   u16 data_len)
2650 {
2651 	struct mgmt_rp_get_connections *rp;
2652 	struct hci_conn *c;
2653 	int err;
2654 	u16 i;
2655 
2656 	bt_dev_dbg(hdev, "sock %p", sk);
2657 
2658 	hci_dev_lock(hdev);
2659 
2660 	if (!hdev_is_powered(hdev)) {
2661 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2662 				      MGMT_STATUS_NOT_POWERED);
2663 		goto unlock;
2664 	}
2665 
2666 	i = 0;
2667 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2668 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2669 			i++;
2670 	}
2671 
2672 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2673 	if (!rp) {
2674 		err = -ENOMEM;
2675 		goto unlock;
2676 	}
2677 
2678 	i = 0;
2679 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2680 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 			continue;
2682 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2683 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2684 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2685 			continue;
2686 		i++;
2687 	}
2688 
2689 	rp->conn_count = cpu_to_le16(i);
2690 
2691 	/* Recalculate length in case of filtered SCO connections, etc */
2692 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2693 				struct_size(rp, addr, i));
2694 
2695 	kfree(rp);
2696 
2697 unlock:
2698 	hci_dev_unlock(hdev);
2699 	return err;
2700 }
2701 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2702 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2703 				   struct mgmt_cp_pin_code_neg_reply *cp)
2704 {
2705 	struct mgmt_pending_cmd *cmd;
2706 	int err;
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2709 			       sizeof(*cp));
2710 	if (!cmd)
2711 		return -ENOMEM;
2712 
2713 	cmd->cmd_complete = addr_cmd_complete;
2714 
2715 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2716 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 	return err;
2721 }
2722 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2723 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2724 			  u16 len)
2725 {
2726 	struct hci_conn *conn;
2727 	struct mgmt_cp_pin_code_reply *cp = data;
2728 	struct hci_cp_pin_code_reply reply;
2729 	struct mgmt_pending_cmd *cmd;
2730 	int err;
2731 
2732 	bt_dev_dbg(hdev, "sock %p", sk);
2733 
2734 	hci_dev_lock(hdev);
2735 
2736 	if (!hdev_is_powered(hdev)) {
2737 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 				      MGMT_STATUS_NOT_POWERED);
2739 		goto failed;
2740 	}
2741 
2742 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2743 	if (!conn) {
2744 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2745 				      MGMT_STATUS_NOT_CONNECTED);
2746 		goto failed;
2747 	}
2748 
2749 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2750 		struct mgmt_cp_pin_code_neg_reply ncp;
2751 
2752 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2753 
2754 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2755 
2756 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2757 		if (err >= 0)
2758 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2759 					      MGMT_STATUS_INVALID_PARAMS);
2760 
2761 		goto failed;
2762 	}
2763 
2764 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2765 	if (!cmd) {
2766 		err = -ENOMEM;
2767 		goto failed;
2768 	}
2769 
2770 	cmd->cmd_complete = addr_cmd_complete;
2771 
2772 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2773 	reply.pin_len = cp->pin_len;
2774 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2775 
2776 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2777 	if (err < 0)
2778 		mgmt_pending_remove(cmd);
2779 
2780 failed:
2781 	hci_dev_unlock(hdev);
2782 	return err;
2783 }
2784 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2785 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2786 			     u16 len)
2787 {
2788 	struct mgmt_cp_set_io_capability *cp = data;
2789 
2790 	bt_dev_dbg(hdev, "sock %p", sk);
2791 
2792 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2794 				       MGMT_STATUS_INVALID_PARAMS);
2795 
2796 	hci_dev_lock(hdev);
2797 
2798 	hdev->io_capability = cp->io_capability;
2799 
2800 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2801 
2802 	hci_dev_unlock(hdev);
2803 
2804 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2805 				 NULL, 0);
2806 }
2807 
find_pairing(struct hci_conn * conn)2808 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2809 {
2810 	struct hci_dev *hdev = conn->hdev;
2811 	struct mgmt_pending_cmd *cmd;
2812 
2813 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2814 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2815 			continue;
2816 
2817 		if (cmd->user_data != conn)
2818 			continue;
2819 
2820 		return cmd;
2821 	}
2822 
2823 	return NULL;
2824 }
2825 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2826 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2827 {
2828 	struct mgmt_rp_pair_device rp;
2829 	struct hci_conn *conn = cmd->user_data;
2830 	int err;
2831 
2832 	bacpy(&rp.addr.bdaddr, &conn->dst);
2833 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2834 
2835 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2836 				status, &rp, sizeof(rp));
2837 
2838 	/* So we don't get further callbacks for this connection */
2839 	conn->connect_cfm_cb = NULL;
2840 	conn->security_cfm_cb = NULL;
2841 	conn->disconn_cfm_cb = NULL;
2842 
2843 	hci_conn_drop(conn);
2844 
2845 	/* The device is paired so there is no need to remove
2846 	 * its connection parameters anymore.
2847 	 */
2848 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2849 
2850 	hci_conn_put(conn);
2851 
2852 	return err;
2853 }
2854 
mgmt_smp_complete(struct hci_conn * conn,bool complete)2855 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2856 {
2857 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2858 	struct mgmt_pending_cmd *cmd;
2859 
2860 	cmd = find_pairing(conn);
2861 	if (cmd) {
2862 		cmd->cmd_complete(cmd, status);
2863 		mgmt_pending_remove(cmd);
2864 	}
2865 }
2866 
pairing_complete_cb(struct hci_conn * conn,u8 status)2867 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2868 {
2869 	struct mgmt_pending_cmd *cmd;
2870 
2871 	BT_DBG("status %u", status);
2872 
2873 	cmd = find_pairing(conn);
2874 	if (!cmd) {
2875 		BT_DBG("Unable to find a pending command");
2876 		return;
2877 	}
2878 
2879 	cmd->cmd_complete(cmd, mgmt_status(status));
2880 	mgmt_pending_remove(cmd);
2881 }
2882 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2883 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2884 {
2885 	struct mgmt_pending_cmd *cmd;
2886 
2887 	BT_DBG("status %u", status);
2888 
2889 	if (!status)
2890 		return;
2891 
2892 	cmd = find_pairing(conn);
2893 	if (!cmd) {
2894 		BT_DBG("Unable to find a pending command");
2895 		return;
2896 	}
2897 
2898 	cmd->cmd_complete(cmd, mgmt_status(status));
2899 	mgmt_pending_remove(cmd);
2900 }
2901 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2902 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2903 		       u16 len)
2904 {
2905 	struct mgmt_cp_pair_device *cp = data;
2906 	struct mgmt_rp_pair_device rp;
2907 	struct mgmt_pending_cmd *cmd;
2908 	u8 sec_level, auth_type;
2909 	struct hci_conn *conn;
2910 	int err;
2911 
2912 	bt_dev_dbg(hdev, "sock %p", sk);
2913 
2914 	memset(&rp, 0, sizeof(rp));
2915 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2916 	rp.addr.type = cp->addr.type;
2917 
2918 	if (!bdaddr_type_is_valid(cp->addr.type))
2919 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 					 MGMT_STATUS_INVALID_PARAMS,
2921 					 &rp, sizeof(rp));
2922 
2923 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2924 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 					 MGMT_STATUS_INVALID_PARAMS,
2926 					 &rp, sizeof(rp));
2927 
2928 	hci_dev_lock(hdev);
2929 
2930 	if (!hdev_is_powered(hdev)) {
2931 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 					MGMT_STATUS_NOT_POWERED, &rp,
2933 					sizeof(rp));
2934 		goto unlock;
2935 	}
2936 
2937 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2938 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2939 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2940 					sizeof(rp));
2941 		goto unlock;
2942 	}
2943 
2944 	sec_level = BT_SECURITY_MEDIUM;
2945 	auth_type = HCI_AT_DEDICATED_BONDING;
2946 
2947 	if (cp->addr.type == BDADDR_BREDR) {
2948 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2949 				       auth_type, CONN_REASON_PAIR_DEVICE);
2950 	} else {
2951 		u8 addr_type = le_addr_type(cp->addr.type);
2952 		struct hci_conn_params *p;
2953 
2954 		/* When pairing a new device, it is expected to remember
2955 		 * this device for future connections. Adding the connection
2956 		 * parameter information ahead of time allows tracking
2957 		 * of the slave preferred values and will speed up any
2958 		 * further connection establishment.
2959 		 *
2960 		 * If connection parameters already exist, then they
2961 		 * will be kept and this function does nothing.
2962 		 */
2963 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2964 
2965 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2966 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2967 
2968 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2969 					   sec_level, HCI_LE_CONN_TIMEOUT,
2970 					   CONN_REASON_PAIR_DEVICE);
2971 	}
2972 
2973 	if (IS_ERR(conn)) {
2974 		int status;
2975 
2976 		if (PTR_ERR(conn) == -EBUSY)
2977 			status = MGMT_STATUS_BUSY;
2978 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2979 			status = MGMT_STATUS_NOT_SUPPORTED;
2980 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2981 			status = MGMT_STATUS_REJECTED;
2982 		else
2983 			status = MGMT_STATUS_CONNECT_FAILED;
2984 
2985 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2986 					status, &rp, sizeof(rp));
2987 		goto unlock;
2988 	}
2989 
2990 	if (conn->connect_cfm_cb) {
2991 		hci_conn_drop(conn);
2992 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2993 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2994 		goto unlock;
2995 	}
2996 
2997 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2998 	if (!cmd) {
2999 		err = -ENOMEM;
3000 		hci_conn_drop(conn);
3001 		goto unlock;
3002 	}
3003 
3004 	cmd->cmd_complete = pairing_complete;
3005 
3006 	/* For LE, just connecting isn't a proof that the pairing finished */
3007 	if (cp->addr.type == BDADDR_BREDR) {
3008 		conn->connect_cfm_cb = pairing_complete_cb;
3009 		conn->security_cfm_cb = pairing_complete_cb;
3010 		conn->disconn_cfm_cb = pairing_complete_cb;
3011 	} else {
3012 		conn->connect_cfm_cb = le_pairing_complete_cb;
3013 		conn->security_cfm_cb = le_pairing_complete_cb;
3014 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3015 	}
3016 
3017 	conn->io_capability = cp->io_cap;
3018 	cmd->user_data = hci_conn_get(conn);
3019 
3020 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3021 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3022 		cmd->cmd_complete(cmd, 0);
3023 		mgmt_pending_remove(cmd);
3024 	}
3025 
3026 	err = 0;
3027 
3028 unlock:
3029 	hci_dev_unlock(hdev);
3030 	return err;
3031 }
3032 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3033 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3034 			      u16 len)
3035 {
3036 	struct mgmt_addr_info *addr = data;
3037 	struct mgmt_pending_cmd *cmd;
3038 	struct hci_conn *conn;
3039 	int err;
3040 
3041 	bt_dev_dbg(hdev, "sock %p", sk);
3042 
3043 	hci_dev_lock(hdev);
3044 
3045 	if (!hdev_is_powered(hdev)) {
3046 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 				      MGMT_STATUS_NOT_POWERED);
3048 		goto unlock;
3049 	}
3050 
3051 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3052 	if (!cmd) {
3053 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3054 				      MGMT_STATUS_INVALID_PARAMS);
3055 		goto unlock;
3056 	}
3057 
3058 	conn = cmd->user_data;
3059 
3060 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3061 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 				      MGMT_STATUS_INVALID_PARAMS);
3063 		goto unlock;
3064 	}
3065 
3066 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3067 	mgmt_pending_remove(cmd);
3068 
3069 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3070 				addr, sizeof(*addr));
3071 
3072 	/* Since user doesn't want to proceed with the connection, abort any
3073 	 * ongoing pairing and then terminate the link if it was created
3074 	 * because of the pair device action.
3075 	 */
3076 	if (addr->type == BDADDR_BREDR)
3077 		hci_remove_link_key(hdev, &addr->bdaddr);
3078 	else
3079 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3080 					      le_addr_type(addr->type));
3081 
3082 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3083 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3084 
3085 unlock:
3086 	hci_dev_unlock(hdev);
3087 	return err;
3088 }
3089 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3090 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3091 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3092 			     u16 hci_op, __le32 passkey)
3093 {
3094 	struct mgmt_pending_cmd *cmd;
3095 	struct hci_conn *conn;
3096 	int err;
3097 
3098 	hci_dev_lock(hdev);
3099 
3100 	if (!hdev_is_powered(hdev)) {
3101 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3102 					MGMT_STATUS_NOT_POWERED, addr,
3103 					sizeof(*addr));
3104 		goto done;
3105 	}
3106 
3107 	if (addr->type == BDADDR_BREDR)
3108 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3109 	else
3110 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3111 					       le_addr_type(addr->type));
3112 
3113 	if (!conn) {
3114 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3115 					MGMT_STATUS_NOT_CONNECTED, addr,
3116 					sizeof(*addr));
3117 		goto done;
3118 	}
3119 
3120 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3121 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3122 		if (!err)
3123 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 						MGMT_STATUS_SUCCESS, addr,
3125 						sizeof(*addr));
3126 		else
3127 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3128 						MGMT_STATUS_FAILED, addr,
3129 						sizeof(*addr));
3130 
3131 		goto done;
3132 	}
3133 
3134 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3135 	if (!cmd) {
3136 		err = -ENOMEM;
3137 		goto done;
3138 	}
3139 
3140 	cmd->cmd_complete = addr_cmd_complete;
3141 
3142 	/* Continue with pairing via HCI */
3143 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3144 		struct hci_cp_user_passkey_reply cp;
3145 
3146 		bacpy(&cp.bdaddr, &addr->bdaddr);
3147 		cp.passkey = passkey;
3148 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3149 	} else
3150 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3151 				   &addr->bdaddr);
3152 
3153 	if (err < 0)
3154 		mgmt_pending_remove(cmd);
3155 
3156 done:
3157 	hci_dev_unlock(hdev);
3158 	return err;
3159 }
3160 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3161 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3162 			      void *data, u16 len)
3163 {
3164 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3165 
3166 	bt_dev_dbg(hdev, "sock %p", sk);
3167 
3168 	return user_pairing_resp(sk, hdev, &cp->addr,
3169 				MGMT_OP_PIN_CODE_NEG_REPLY,
3170 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3171 }
3172 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3173 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3174 			      u16 len)
3175 {
3176 	struct mgmt_cp_user_confirm_reply *cp = data;
3177 
3178 	bt_dev_dbg(hdev, "sock %p", sk);
3179 
3180 	if (len != sizeof(*cp))
3181 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3182 				       MGMT_STATUS_INVALID_PARAMS);
3183 
3184 	return user_pairing_resp(sk, hdev, &cp->addr,
3185 				 MGMT_OP_USER_CONFIRM_REPLY,
3186 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3187 }
3188 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3189 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3190 				  void *data, u16 len)
3191 {
3192 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3193 
3194 	bt_dev_dbg(hdev, "sock %p", sk);
3195 
3196 	return user_pairing_resp(sk, hdev, &cp->addr,
3197 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3198 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3199 }
3200 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3201 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3202 			      u16 len)
3203 {
3204 	struct mgmt_cp_user_passkey_reply *cp = data;
3205 
3206 	bt_dev_dbg(hdev, "sock %p", sk);
3207 
3208 	return user_pairing_resp(sk, hdev, &cp->addr,
3209 				 MGMT_OP_USER_PASSKEY_REPLY,
3210 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3211 }
3212 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3213 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3214 				  void *data, u16 len)
3215 {
3216 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3217 
3218 	bt_dev_dbg(hdev, "sock %p", sk);
3219 
3220 	return user_pairing_resp(sk, hdev, &cp->addr,
3221 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3222 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3223 }
3224 
adv_expire(struct hci_dev * hdev,u32 flags)3225 static void adv_expire(struct hci_dev *hdev, u32 flags)
3226 {
3227 	struct adv_info *adv_instance;
3228 	struct hci_request req;
3229 	int err;
3230 
3231 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3232 	if (!adv_instance)
3233 		return;
3234 
3235 	/* stop if current instance doesn't need to be changed */
3236 	if (!(adv_instance->flags & flags))
3237 		return;
3238 
3239 	cancel_adv_timeout(hdev);
3240 
3241 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3242 	if (!adv_instance)
3243 		return;
3244 
3245 	hci_req_init(&req, hdev);
3246 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3247 					      true);
3248 	if (err)
3249 		return;
3250 
3251 	hci_req_run(&req, NULL);
3252 }
3253 
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3254 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3255 {
3256 	struct mgmt_cp_set_local_name *cp;
3257 	struct mgmt_pending_cmd *cmd;
3258 
3259 	bt_dev_dbg(hdev, "status 0x%02x", status);
3260 
3261 	hci_dev_lock(hdev);
3262 
3263 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3264 	if (!cmd)
3265 		goto unlock;
3266 
3267 	cp = cmd->param;
3268 
3269 	if (status) {
3270 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3271 			        mgmt_status(status));
3272 	} else {
3273 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3274 				  cp, sizeof(*cp));
3275 
3276 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3277 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3278 	}
3279 
3280 	mgmt_pending_remove(cmd);
3281 
3282 unlock:
3283 	hci_dev_unlock(hdev);
3284 }
3285 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3286 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3287 			  u16 len)
3288 {
3289 	struct mgmt_cp_set_local_name *cp = data;
3290 	struct mgmt_pending_cmd *cmd;
3291 	struct hci_request req;
3292 	int err;
3293 
3294 	bt_dev_dbg(hdev, "sock %p", sk);
3295 
3296 	hci_dev_lock(hdev);
3297 
3298 	/* If the old values are the same as the new ones just return a
3299 	 * direct command complete event.
3300 	 */
3301 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3302 	    !memcmp(hdev->short_name, cp->short_name,
3303 		    sizeof(hdev->short_name))) {
3304 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3305 					data, len);
3306 		goto failed;
3307 	}
3308 
3309 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3310 
3311 	if (!hdev_is_powered(hdev)) {
3312 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3313 
3314 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3315 					data, len);
3316 		if (err < 0)
3317 			goto failed;
3318 
3319 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3320 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3321 		ext_info_changed(hdev, sk);
3322 
3323 		goto failed;
3324 	}
3325 
3326 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3327 	if (!cmd) {
3328 		err = -ENOMEM;
3329 		goto failed;
3330 	}
3331 
3332 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3333 
3334 	hci_req_init(&req, hdev);
3335 
3336 	if (lmp_bredr_capable(hdev)) {
3337 		__hci_req_update_name(&req);
3338 		__hci_req_update_eir(&req);
3339 	}
3340 
3341 	/* The name is stored in the scan response data and so
3342 	 * no need to udpate the advertising data here.
3343 	 */
3344 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3345 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3346 
3347 	err = hci_req_run(&req, set_name_complete);
3348 	if (err < 0)
3349 		mgmt_pending_remove(cmd);
3350 
3351 failed:
3352 	hci_dev_unlock(hdev);
3353 	return err;
3354 }
3355 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3356 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3357 			  u16 len)
3358 {
3359 	struct mgmt_cp_set_appearance *cp = data;
3360 	u16 appearance;
3361 	int err;
3362 
3363 	bt_dev_dbg(hdev, "sock %p", sk);
3364 
3365 	if (!lmp_le_capable(hdev))
3366 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3367 				       MGMT_STATUS_NOT_SUPPORTED);
3368 
3369 	appearance = le16_to_cpu(cp->appearance);
3370 
3371 	hci_dev_lock(hdev);
3372 
3373 	if (hdev->appearance != appearance) {
3374 		hdev->appearance = appearance;
3375 
3376 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3377 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3378 
3379 		ext_info_changed(hdev, sk);
3380 	}
3381 
3382 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3383 				0);
3384 
3385 	hci_dev_unlock(hdev);
3386 
3387 	return err;
3388 }
3389 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3390 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3391 				 void *data, u16 len)
3392 {
3393 	struct mgmt_rp_get_phy_confguration rp;
3394 
3395 	bt_dev_dbg(hdev, "sock %p", sk);
3396 
3397 	hci_dev_lock(hdev);
3398 
3399 	memset(&rp, 0, sizeof(rp));
3400 
3401 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3402 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3403 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3404 
3405 	hci_dev_unlock(hdev);
3406 
3407 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3408 				 &rp, sizeof(rp));
3409 }
3410 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3411 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3412 {
3413 	struct mgmt_ev_phy_configuration_changed ev;
3414 
3415 	memset(&ev, 0, sizeof(ev));
3416 
3417 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3418 
3419 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3420 			  sizeof(ev), skip);
3421 }
3422 
set_default_phy_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3423 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3424 				     u16 opcode, struct sk_buff *skb)
3425 {
3426 	struct mgmt_pending_cmd *cmd;
3427 
3428 	bt_dev_dbg(hdev, "status 0x%02x", status);
3429 
3430 	hci_dev_lock(hdev);
3431 
3432 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3433 	if (!cmd)
3434 		goto unlock;
3435 
3436 	if (status) {
3437 		mgmt_cmd_status(cmd->sk, hdev->id,
3438 				MGMT_OP_SET_PHY_CONFIGURATION,
3439 				mgmt_status(status));
3440 	} else {
3441 		mgmt_cmd_complete(cmd->sk, hdev->id,
3442 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3443 				  NULL, 0);
3444 
3445 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3446 	}
3447 
3448 	mgmt_pending_remove(cmd);
3449 
3450 unlock:
3451 	hci_dev_unlock(hdev);
3452 }
3453 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3454 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3455 				 void *data, u16 len)
3456 {
3457 	struct mgmt_cp_set_phy_confguration *cp = data;
3458 	struct hci_cp_le_set_default_phy cp_phy;
3459 	struct mgmt_pending_cmd *cmd;
3460 	struct hci_request req;
3461 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3462 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3463 	bool changed = false;
3464 	int err;
3465 
3466 	bt_dev_dbg(hdev, "sock %p", sk);
3467 
3468 	configurable_phys = get_configurable_phys(hdev);
3469 	supported_phys = get_supported_phys(hdev);
3470 	selected_phys = __le32_to_cpu(cp->selected_phys);
3471 
3472 	if (selected_phys & ~supported_phys)
3473 		return mgmt_cmd_status(sk, hdev->id,
3474 				       MGMT_OP_SET_PHY_CONFIGURATION,
3475 				       MGMT_STATUS_INVALID_PARAMS);
3476 
3477 	unconfigure_phys = supported_phys & ~configurable_phys;
3478 
3479 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3480 		return mgmt_cmd_status(sk, hdev->id,
3481 				       MGMT_OP_SET_PHY_CONFIGURATION,
3482 				       MGMT_STATUS_INVALID_PARAMS);
3483 
3484 	if (selected_phys == get_selected_phys(hdev))
3485 		return mgmt_cmd_complete(sk, hdev->id,
3486 					 MGMT_OP_SET_PHY_CONFIGURATION,
3487 					 0, NULL, 0);
3488 
3489 	hci_dev_lock(hdev);
3490 
3491 	if (!hdev_is_powered(hdev)) {
3492 		err = mgmt_cmd_status(sk, hdev->id,
3493 				      MGMT_OP_SET_PHY_CONFIGURATION,
3494 				      MGMT_STATUS_REJECTED);
3495 		goto unlock;
3496 	}
3497 
3498 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3499 		err = mgmt_cmd_status(sk, hdev->id,
3500 				      MGMT_OP_SET_PHY_CONFIGURATION,
3501 				      MGMT_STATUS_BUSY);
3502 		goto unlock;
3503 	}
3504 
3505 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3506 		pkt_type |= (HCI_DH3 | HCI_DM3);
3507 	else
3508 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3509 
3510 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3511 		pkt_type |= (HCI_DH5 | HCI_DM5);
3512 	else
3513 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3514 
3515 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3516 		pkt_type &= ~HCI_2DH1;
3517 	else
3518 		pkt_type |= HCI_2DH1;
3519 
3520 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3521 		pkt_type &= ~HCI_2DH3;
3522 	else
3523 		pkt_type |= HCI_2DH3;
3524 
3525 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3526 		pkt_type &= ~HCI_2DH5;
3527 	else
3528 		pkt_type |= HCI_2DH5;
3529 
3530 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3531 		pkt_type &= ~HCI_3DH1;
3532 	else
3533 		pkt_type |= HCI_3DH1;
3534 
3535 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3536 		pkt_type &= ~HCI_3DH3;
3537 	else
3538 		pkt_type |= HCI_3DH3;
3539 
3540 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3541 		pkt_type &= ~HCI_3DH5;
3542 	else
3543 		pkt_type |= HCI_3DH5;
3544 
3545 	if (pkt_type != hdev->pkt_type) {
3546 		hdev->pkt_type = pkt_type;
3547 		changed = true;
3548 	}
3549 
3550 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3551 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3552 		if (changed)
3553 			mgmt_phy_configuration_changed(hdev, sk);
3554 
3555 		err = mgmt_cmd_complete(sk, hdev->id,
3556 					MGMT_OP_SET_PHY_CONFIGURATION,
3557 					0, NULL, 0);
3558 
3559 		goto unlock;
3560 	}
3561 
3562 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3563 			       len);
3564 	if (!cmd) {
3565 		err = -ENOMEM;
3566 		goto unlock;
3567 	}
3568 
3569 	hci_req_init(&req, hdev);
3570 
3571 	memset(&cp_phy, 0, sizeof(cp_phy));
3572 
3573 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3574 		cp_phy.all_phys |= 0x01;
3575 
3576 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3577 		cp_phy.all_phys |= 0x02;
3578 
3579 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3580 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3581 
3582 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3583 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3584 
3585 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3586 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3587 
3588 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3589 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3590 
3591 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3592 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3593 
3594 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3595 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3596 
3597 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3598 
3599 	err = hci_req_run_skb(&req, set_default_phy_complete);
3600 	if (err < 0)
3601 		mgmt_pending_remove(cmd);
3602 
3603 unlock:
3604 	hci_dev_unlock(hdev);
3605 
3606 	return err;
3607 }
3608 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3609 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3610 			    u16 len)
3611 {
3612 	int err = MGMT_STATUS_SUCCESS;
3613 	struct mgmt_cp_set_blocked_keys *keys = data;
3614 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3615 				   sizeof(struct mgmt_blocked_key_info));
3616 	u16 key_count, expected_len;
3617 	int i;
3618 
3619 	bt_dev_dbg(hdev, "sock %p", sk);
3620 
3621 	key_count = __le16_to_cpu(keys->key_count);
3622 	if (key_count > max_key_count) {
3623 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3624 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3625 				       MGMT_STATUS_INVALID_PARAMS);
3626 	}
3627 
3628 	expected_len = struct_size(keys, keys, key_count);
3629 	if (expected_len != len) {
3630 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3631 			   expected_len, len);
3632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 				       MGMT_STATUS_INVALID_PARAMS);
3634 	}
3635 
3636 	hci_dev_lock(hdev);
3637 
3638 	hci_blocked_keys_clear(hdev);
3639 
3640 	for (i = 0; i < keys->key_count; ++i) {
3641 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3642 
3643 		if (!b) {
3644 			err = MGMT_STATUS_NO_RESOURCES;
3645 			break;
3646 		}
3647 
3648 		b->type = keys->keys[i].type;
3649 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3650 		list_add_rcu(&b->list, &hdev->blocked_keys);
3651 	}
3652 	hci_dev_unlock(hdev);
3653 
3654 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3655 				err, NULL, 0);
3656 }
3657 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3658 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3659 			       void *data, u16 len)
3660 {
3661 	struct mgmt_mode *cp = data;
3662 	int err;
3663 	bool changed = false;
3664 
3665 	bt_dev_dbg(hdev, "sock %p", sk);
3666 
3667 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3668 		return mgmt_cmd_status(sk, hdev->id,
3669 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3670 				       MGMT_STATUS_NOT_SUPPORTED);
3671 
3672 	if (cp->val != 0x00 && cp->val != 0x01)
3673 		return mgmt_cmd_status(sk, hdev->id,
3674 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3675 				       MGMT_STATUS_INVALID_PARAMS);
3676 
3677 	hci_dev_lock(hdev);
3678 
3679 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3680 		err = mgmt_cmd_status(sk, hdev->id,
3681 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3682 				      MGMT_STATUS_BUSY);
3683 		goto unlock;
3684 	}
3685 
3686 	if (hdev_is_powered(hdev) &&
3687 	    !!cp->val != hci_dev_test_flag(hdev,
3688 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3689 		err = mgmt_cmd_status(sk, hdev->id,
3690 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3691 				      MGMT_STATUS_REJECTED);
3692 		goto unlock;
3693 	}
3694 
3695 	if (cp->val)
3696 		changed = !hci_dev_test_and_set_flag(hdev,
3697 						   HCI_WIDEBAND_SPEECH_ENABLED);
3698 	else
3699 		changed = hci_dev_test_and_clear_flag(hdev,
3700 						   HCI_WIDEBAND_SPEECH_ENABLED);
3701 
3702 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3703 	if (err < 0)
3704 		goto unlock;
3705 
3706 	if (changed)
3707 		err = new_settings(hdev, sk);
3708 
3709 unlock:
3710 	hci_dev_unlock(hdev);
3711 	return err;
3712 }
3713 
read_security_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3714 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3715 			      void *data, u16 data_len)
3716 {
3717 	char buf[16];
3718 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3719 	u16 sec_len = 0;
3720 	u8 flags = 0;
3721 
3722 	bt_dev_dbg(hdev, "sock %p", sk);
3723 
3724 	memset(&buf, 0, sizeof(buf));
3725 
3726 	hci_dev_lock(hdev);
3727 
3728 	/* When the Read Simple Pairing Options command is supported, then
3729 	 * the remote public key validation is supported.
3730 	 */
3731 	if (hdev->commands[41] & 0x08)
3732 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3733 
3734 	flags |= 0x02;		/* Remote public key validation (LE) */
3735 
3736 	/* When the Read Encryption Key Size command is supported, then the
3737 	 * encryption key size is enforced.
3738 	 */
3739 	if (hdev->commands[20] & 0x10)
3740 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3741 
3742 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3743 
3744 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3745 
3746 	/* When the Read Simple Pairing Options command is supported, then
3747 	 * also max encryption key size information is provided.
3748 	 */
3749 	if (hdev->commands[41] & 0x08)
3750 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3751 					  hdev->max_enc_key_size);
3752 
3753 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3754 
3755 	rp->sec_len = cpu_to_le16(sec_len);
3756 
3757 	hci_dev_unlock(hdev);
3758 
3759 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3760 				 rp, sizeof(*rp) + sec_len);
3761 }
3762 
3763 #ifdef CONFIG_BT_FEATURE_DEBUG
3764 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3765 static const u8 debug_uuid[16] = {
3766 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3767 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3768 };
3769 #endif
3770 
3771 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3772 static const u8 simult_central_periph_uuid[16] = {
3773 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3774 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3775 };
3776 
3777 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3778 static const u8 rpa_resolution_uuid[16] = {
3779 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3780 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3781 };
3782 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3783 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3784 				  void *data, u16 data_len)
3785 {
3786 	char buf[62];	/* Enough space for 3 features */
3787 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3788 	u16 idx = 0;
3789 	u32 flags;
3790 
3791 	bt_dev_dbg(hdev, "sock %p", sk);
3792 
3793 	memset(&buf, 0, sizeof(buf));
3794 
3795 #ifdef CONFIG_BT_FEATURE_DEBUG
3796 	if (!hdev) {
3797 		flags = bt_dbg_get() ? BIT(0) : 0;
3798 
3799 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3800 		rp->features[idx].flags = cpu_to_le32(flags);
3801 		idx++;
3802 	}
3803 #endif
3804 
3805 	if (hdev) {
3806 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3807 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3808 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3809 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3810 			flags = BIT(0);
3811 		else
3812 			flags = 0;
3813 
3814 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3815 		rp->features[idx].flags = cpu_to_le32(flags);
3816 		idx++;
3817 	}
3818 
3819 	if (hdev && use_ll_privacy(hdev)) {
3820 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3821 			flags = BIT(0) | BIT(1);
3822 		else
3823 			flags = BIT(1);
3824 
3825 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3826 		rp->features[idx].flags = cpu_to_le32(flags);
3827 		idx++;
3828 	}
3829 
3830 	rp->feature_count = cpu_to_le16(idx);
3831 
3832 	/* After reading the experimental features information, enable
3833 	 * the events to update client on any future change.
3834 	 */
3835 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3836 
3837 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3838 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3839 				 0, rp, sizeof(*rp) + (20 * idx));
3840 }
3841 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)3842 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3843 					  struct sock *skip)
3844 {
3845 	struct mgmt_ev_exp_feature_changed ev;
3846 
3847 	memset(&ev, 0, sizeof(ev));
3848 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3849 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3850 
3851 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3852 				  &ev, sizeof(ev),
3853 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3854 
3855 }
3856 
3857 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled,struct sock * skip)3858 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3859 {
3860 	struct mgmt_ev_exp_feature_changed ev;
3861 
3862 	memset(&ev, 0, sizeof(ev));
3863 	memcpy(ev.uuid, debug_uuid, 16);
3864 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3865 
3866 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3867 				  &ev, sizeof(ev),
3868 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3869 }
3870 #endif
3871 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3872 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3873 			   void *data, u16 data_len)
3874 {
3875 	struct mgmt_cp_set_exp_feature *cp = data;
3876 	struct mgmt_rp_set_exp_feature rp;
3877 
3878 	bt_dev_dbg(hdev, "sock %p", sk);
3879 
3880 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3881 		memset(rp.uuid, 0, 16);
3882 		rp.flags = cpu_to_le32(0);
3883 
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 		if (!hdev) {
3886 			bool changed = bt_dbg_get();
3887 
3888 			bt_dbg_set(false);
3889 
3890 			if (changed)
3891 				exp_debug_feature_changed(false, sk);
3892 		}
3893 #endif
3894 
3895 		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3896 			bool changed = hci_dev_test_flag(hdev,
3897 							 HCI_ENABLE_LL_PRIVACY);
3898 
3899 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3900 
3901 			if (changed)
3902 				exp_ll_privacy_feature_changed(false, hdev, sk);
3903 		}
3904 
3905 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3906 
3907 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3908 					 MGMT_OP_SET_EXP_FEATURE, 0,
3909 					 &rp, sizeof(rp));
3910 	}
3911 
3912 #ifdef CONFIG_BT_FEATURE_DEBUG
3913 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3914 		bool val, changed;
3915 		int err;
3916 
3917 		/* Command requires to use the non-controller index */
3918 		if (hdev)
3919 			return mgmt_cmd_status(sk, hdev->id,
3920 					       MGMT_OP_SET_EXP_FEATURE,
3921 					       MGMT_STATUS_INVALID_INDEX);
3922 
3923 		/* Parameters are limited to a single octet */
3924 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3925 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3926 					       MGMT_OP_SET_EXP_FEATURE,
3927 					       MGMT_STATUS_INVALID_PARAMS);
3928 
3929 		/* Only boolean on/off is supported */
3930 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3931 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3932 					       MGMT_OP_SET_EXP_FEATURE,
3933 					       MGMT_STATUS_INVALID_PARAMS);
3934 
3935 		val = !!cp->param[0];
3936 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3937 		bt_dbg_set(val);
3938 
3939 		memcpy(rp.uuid, debug_uuid, 16);
3940 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3941 
3942 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3943 
3944 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3945 					MGMT_OP_SET_EXP_FEATURE, 0,
3946 					&rp, sizeof(rp));
3947 
3948 		if (changed)
3949 			exp_debug_feature_changed(val, sk);
3950 
3951 		return err;
3952 	}
3953 #endif
3954 
3955 	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3956 		bool val, changed;
3957 		int err;
3958 		u32 flags;
3959 
3960 		/* Command requires to use the controller index */
3961 		if (!hdev)
3962 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3963 					       MGMT_OP_SET_EXP_FEATURE,
3964 					       MGMT_STATUS_INVALID_INDEX);
3965 
3966 		/* Changes can only be made when controller is powered down */
3967 		if (hdev_is_powered(hdev))
3968 			return mgmt_cmd_status(sk, hdev->id,
3969 					       MGMT_OP_SET_EXP_FEATURE,
3970 					       MGMT_STATUS_NOT_POWERED);
3971 
3972 		/* Parameters are limited to a single octet */
3973 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3974 			return mgmt_cmd_status(sk, hdev->id,
3975 					       MGMT_OP_SET_EXP_FEATURE,
3976 					       MGMT_STATUS_INVALID_PARAMS);
3977 
3978 		/* Only boolean on/off is supported */
3979 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3980 			return mgmt_cmd_status(sk, hdev->id,
3981 					       MGMT_OP_SET_EXP_FEATURE,
3982 					       MGMT_STATUS_INVALID_PARAMS);
3983 
3984 		val = !!cp->param[0];
3985 
3986 		if (val) {
3987 			changed = !hci_dev_test_flag(hdev,
3988 						     HCI_ENABLE_LL_PRIVACY);
3989 			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3990 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3991 
3992 			/* Enable LL privacy + supported settings changed */
3993 			flags = BIT(0) | BIT(1);
3994 		} else {
3995 			changed = hci_dev_test_flag(hdev,
3996 						    HCI_ENABLE_LL_PRIVACY);
3997 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3998 
3999 			/* Disable LL privacy + supported settings changed */
4000 			flags = BIT(1);
4001 		}
4002 
4003 		memcpy(rp.uuid, rpa_resolution_uuid, 16);
4004 		rp.flags = cpu_to_le32(flags);
4005 
4006 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4007 
4008 		err = mgmt_cmd_complete(sk, hdev->id,
4009 					MGMT_OP_SET_EXP_FEATURE, 0,
4010 					&rp, sizeof(rp));
4011 
4012 		if (changed)
4013 			exp_ll_privacy_feature_changed(val, hdev, sk);
4014 
4015 		return err;
4016 	}
4017 
4018 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4019 			       MGMT_OP_SET_EXP_FEATURE,
4020 			       MGMT_STATUS_NOT_SUPPORTED);
4021 }
4022 
4023 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4024 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4025 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4026 			    u16 data_len)
4027 {
4028 	struct mgmt_cp_get_device_flags *cp = data;
4029 	struct mgmt_rp_get_device_flags rp;
4030 	struct bdaddr_list_with_flags *br_params;
4031 	struct hci_conn_params *params;
4032 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4033 	u32 current_flags = 0;
4034 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4035 
4036 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4037 		   &cp->addr.bdaddr, cp->addr.type);
4038 
4039 	hci_dev_lock(hdev);
4040 
4041 	memset(&rp, 0, sizeof(rp));
4042 
4043 	if (cp->addr.type == BDADDR_BREDR) {
4044 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4045 							      &cp->addr.bdaddr,
4046 							      cp->addr.type);
4047 		if (!br_params)
4048 			goto done;
4049 
4050 		current_flags = br_params->current_flags;
4051 	} else {
4052 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4053 						le_addr_type(cp->addr.type));
4054 
4055 		if (!params)
4056 			goto done;
4057 
4058 		current_flags = params->current_flags;
4059 	}
4060 
4061 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4062 	rp.addr.type = cp->addr.type;
4063 	rp.supported_flags = cpu_to_le32(supported_flags);
4064 	rp.current_flags = cpu_to_le32(current_flags);
4065 
4066 	status = MGMT_STATUS_SUCCESS;
4067 
4068 done:
4069 	hci_dev_unlock(hdev);
4070 
4071 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4072 				&rp, sizeof(rp));
4073 }
4074 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4075 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4076 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4077 				 u32 supported_flags, u32 current_flags)
4078 {
4079 	struct mgmt_ev_device_flags_changed ev;
4080 
4081 	bacpy(&ev.addr.bdaddr, bdaddr);
4082 	ev.addr.type = bdaddr_type;
4083 	ev.supported_flags = cpu_to_le32(supported_flags);
4084 	ev.current_flags = cpu_to_le32(current_flags);
4085 
4086 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4087 }
4088 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4089 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4090 			    u16 len)
4091 {
4092 	struct mgmt_cp_set_device_flags *cp = data;
4093 	struct bdaddr_list_with_flags *br_params;
4094 	struct hci_conn_params *params;
4095 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4096 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4097 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4098 
4099 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4100 		   &cp->addr.bdaddr, cp->addr.type,
4101 		   __le32_to_cpu(current_flags));
4102 
4103 	if ((supported_flags | current_flags) != supported_flags) {
4104 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4105 			    current_flags, supported_flags);
4106 		goto done;
4107 	}
4108 
4109 	hci_dev_lock(hdev);
4110 
4111 	if (cp->addr.type == BDADDR_BREDR) {
4112 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4113 							      &cp->addr.bdaddr,
4114 							      cp->addr.type);
4115 
4116 		if (br_params) {
4117 			br_params->current_flags = current_flags;
4118 			status = MGMT_STATUS_SUCCESS;
4119 		} else {
4120 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4121 				    &cp->addr.bdaddr, cp->addr.type);
4122 		}
4123 	} else {
4124 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4125 						le_addr_type(cp->addr.type));
4126 		if (params) {
4127 			params->current_flags = current_flags;
4128 			status = MGMT_STATUS_SUCCESS;
4129 		} else {
4130 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4131 				    &cp->addr.bdaddr,
4132 				    le_addr_type(cp->addr.type));
4133 		}
4134 	}
4135 
4136 done:
4137 	hci_dev_unlock(hdev);
4138 
4139 	if (status == MGMT_STATUS_SUCCESS)
4140 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4141 				     supported_flags, current_flags);
4142 
4143 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4144 				 &cp->addr, sizeof(cp->addr));
4145 }
4146 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4147 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4148 				   u16 handle)
4149 {
4150 	struct mgmt_ev_adv_monitor_added ev;
4151 
4152 	ev.monitor_handle = cpu_to_le16(handle);
4153 
4154 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4155 }
4156 
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,u16 handle)4157 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4158 				     u16 handle)
4159 {
4160 	struct mgmt_ev_adv_monitor_added ev;
4161 
4162 	ev.monitor_handle = cpu_to_le16(handle);
4163 
4164 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4165 }
4166 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4167 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4168 				 void *data, u16 len)
4169 {
4170 	struct adv_monitor *monitor = NULL;
4171 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4172 	int handle, err;
4173 	size_t rp_size = 0;
4174 	__u32 supported = 0;
4175 	__u16 num_handles = 0;
4176 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4177 
4178 	BT_DBG("request for %s", hdev->name);
4179 
4180 	hci_dev_lock(hdev);
4181 
4182 	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4183 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4184 
4185 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4186 		handles[num_handles++] = monitor->handle;
4187 	}
4188 
4189 	hci_dev_unlock(hdev);
4190 
4191 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4192 	rp = kmalloc(rp_size, GFP_KERNEL);
4193 	if (!rp)
4194 		return -ENOMEM;
4195 
4196 	/* Once controller-based monitoring is in place, the enabled_features
4197 	 * should reflect the use.
4198 	 */
4199 	rp->supported_features = cpu_to_le32(supported);
4200 	rp->enabled_features = 0;
4201 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4202 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4203 	rp->num_handles = cpu_to_le16(num_handles);
4204 	if (num_handles)
4205 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4206 
4207 	err = mgmt_cmd_complete(sk, hdev->id,
4208 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4209 				MGMT_STATUS_SUCCESS, rp, rp_size);
4210 
4211 	kfree(rp);
4212 
4213 	return err;
4214 }
4215 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4216 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4217 				    void *data, u16 len)
4218 {
4219 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4220 	struct mgmt_rp_add_adv_patterns_monitor rp;
4221 	struct adv_monitor *m = NULL;
4222 	struct adv_pattern *p = NULL;
4223 	unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4224 	__u8 cp_ofst = 0, cp_len = 0;
4225 	int err, i;
4226 
4227 	BT_DBG("request for %s", hdev->name);
4228 
4229 	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4230 		err = mgmt_cmd_status(sk, hdev->id,
4231 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4232 				      MGMT_STATUS_INVALID_PARAMS);
4233 		goto failed;
4234 	}
4235 
4236 	m = kmalloc(sizeof(*m), GFP_KERNEL);
4237 	if (!m) {
4238 		err = -ENOMEM;
4239 		goto failed;
4240 	}
4241 
4242 	INIT_LIST_HEAD(&m->patterns);
4243 	m->active = false;
4244 
4245 	for (i = 0; i < cp->pattern_count; i++) {
4246 		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4247 			err = mgmt_cmd_status(sk, hdev->id,
4248 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4249 					      MGMT_STATUS_INVALID_PARAMS);
4250 			goto failed;
4251 		}
4252 
4253 		cp_ofst = cp->patterns[i].offset;
4254 		cp_len = cp->patterns[i].length;
4255 		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4256 		    cp_len > HCI_MAX_AD_LENGTH ||
4257 		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4258 			err = mgmt_cmd_status(sk, hdev->id,
4259 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4260 					      MGMT_STATUS_INVALID_PARAMS);
4261 			goto failed;
4262 		}
4263 
4264 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4265 		if (!p) {
4266 			err = -ENOMEM;
4267 			goto failed;
4268 		}
4269 
4270 		p->ad_type = cp->patterns[i].ad_type;
4271 		p->offset = cp->patterns[i].offset;
4272 		p->length = cp->patterns[i].length;
4273 		memcpy(p->value, cp->patterns[i].value, p->length);
4274 
4275 		INIT_LIST_HEAD(&p->list);
4276 		list_add(&p->list, &m->patterns);
4277 	}
4278 
4279 	if (mp_cnt != cp->pattern_count) {
4280 		err = mgmt_cmd_status(sk, hdev->id,
4281 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4282 				      MGMT_STATUS_INVALID_PARAMS);
4283 		goto failed;
4284 	}
4285 
4286 	hci_dev_lock(hdev);
4287 
4288 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4289 
4290 	err = hci_add_adv_monitor(hdev, m);
4291 	if (err) {
4292 		if (err == -ENOSPC) {
4293 			mgmt_cmd_status(sk, hdev->id,
4294 					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4295 					MGMT_STATUS_NO_RESOURCES);
4296 		}
4297 		goto unlock;
4298 	}
4299 
4300 	if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4301 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4302 
4303 	hci_dev_unlock(hdev);
4304 
4305 	rp.monitor_handle = cpu_to_le16(m->handle);
4306 
4307 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4308 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4309 
4310 unlock:
4311 	hci_dev_unlock(hdev);
4312 
4313 failed:
4314 	hci_free_adv_monitor(m);
4315 	return err;
4316 }
4317 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4318 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4319 			      void *data, u16 len)
4320 {
4321 	struct mgmt_cp_remove_adv_monitor *cp = data;
4322 	struct mgmt_rp_remove_adv_monitor rp;
4323 	unsigned int prev_adv_monitors_cnt;
4324 	u16 handle;
4325 	int err;
4326 
4327 	BT_DBG("request for %s", hdev->name);
4328 
4329 	hci_dev_lock(hdev);
4330 
4331 	handle = __le16_to_cpu(cp->monitor_handle);
4332 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4333 
4334 	err = hci_remove_adv_monitor(hdev, handle);
4335 	if (err == -ENOENT) {
4336 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4337 				      MGMT_STATUS_INVALID_INDEX);
4338 		goto unlock;
4339 	}
4340 
4341 	if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4342 		mgmt_adv_monitor_removed(sk, hdev, handle);
4343 
4344 	hci_dev_unlock(hdev);
4345 
4346 	rp.monitor_handle = cp->monitor_handle;
4347 
4348 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4349 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4350 
4351 unlock:
4352 	hci_dev_unlock(hdev);
4353 	return err;
4354 }
4355 
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)4356 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4357 				         u16 opcode, struct sk_buff *skb)
4358 {
4359 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4360 	size_t rp_size = sizeof(mgmt_rp);
4361 	struct mgmt_pending_cmd *cmd;
4362 
4363 	bt_dev_dbg(hdev, "status %u", status);
4364 
4365 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4366 	if (!cmd)
4367 		return;
4368 
4369 	if (status || !skb) {
4370 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4371 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4372 		goto remove;
4373 	}
4374 
4375 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4376 
4377 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4378 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4379 
4380 		if (skb->len < sizeof(*rp)) {
4381 			mgmt_cmd_status(cmd->sk, hdev->id,
4382 					MGMT_OP_READ_LOCAL_OOB_DATA,
4383 					MGMT_STATUS_FAILED);
4384 			goto remove;
4385 		}
4386 
4387 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4388 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4389 
4390 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4391 	} else {
4392 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4393 
4394 		if (skb->len < sizeof(*rp)) {
4395 			mgmt_cmd_status(cmd->sk, hdev->id,
4396 					MGMT_OP_READ_LOCAL_OOB_DATA,
4397 					MGMT_STATUS_FAILED);
4398 			goto remove;
4399 		}
4400 
4401 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4402 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4403 
4404 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4405 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4406 	}
4407 
4408 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4409 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4410 
4411 remove:
4412 	mgmt_pending_remove(cmd);
4413 }
4414 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4415 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4416 			       void *data, u16 data_len)
4417 {
4418 	struct mgmt_pending_cmd *cmd;
4419 	struct hci_request req;
4420 	int err;
4421 
4422 	bt_dev_dbg(hdev, "sock %p", sk);
4423 
4424 	hci_dev_lock(hdev);
4425 
4426 	if (!hdev_is_powered(hdev)) {
4427 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4428 				      MGMT_STATUS_NOT_POWERED);
4429 		goto unlock;
4430 	}
4431 
4432 	if (!lmp_ssp_capable(hdev)) {
4433 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4434 				      MGMT_STATUS_NOT_SUPPORTED);
4435 		goto unlock;
4436 	}
4437 
4438 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4439 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4440 				      MGMT_STATUS_BUSY);
4441 		goto unlock;
4442 	}
4443 
4444 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4445 	if (!cmd) {
4446 		err = -ENOMEM;
4447 		goto unlock;
4448 	}
4449 
4450 	hci_req_init(&req, hdev);
4451 
4452 	if (bredr_sc_enabled(hdev))
4453 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4454 	else
4455 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4456 
4457 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4458 	if (err < 0)
4459 		mgmt_pending_remove(cmd);
4460 
4461 unlock:
4462 	hci_dev_unlock(hdev);
4463 	return err;
4464 }
4465 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4466 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4467 			       void *data, u16 len)
4468 {
4469 	struct mgmt_addr_info *addr = data;
4470 	int err;
4471 
4472 	bt_dev_dbg(hdev, "sock %p", sk);
4473 
4474 	if (!bdaddr_type_is_valid(addr->type))
4475 		return mgmt_cmd_complete(sk, hdev->id,
4476 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4477 					 MGMT_STATUS_INVALID_PARAMS,
4478 					 addr, sizeof(*addr));
4479 
4480 	hci_dev_lock(hdev);
4481 
4482 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4483 		struct mgmt_cp_add_remote_oob_data *cp = data;
4484 		u8 status;
4485 
4486 		if (cp->addr.type != BDADDR_BREDR) {
4487 			err = mgmt_cmd_complete(sk, hdev->id,
4488 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4489 						MGMT_STATUS_INVALID_PARAMS,
4490 						&cp->addr, sizeof(cp->addr));
4491 			goto unlock;
4492 		}
4493 
4494 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4495 					      cp->addr.type, cp->hash,
4496 					      cp->rand, NULL, NULL);
4497 		if (err < 0)
4498 			status = MGMT_STATUS_FAILED;
4499 		else
4500 			status = MGMT_STATUS_SUCCESS;
4501 
4502 		err = mgmt_cmd_complete(sk, hdev->id,
4503 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4504 					&cp->addr, sizeof(cp->addr));
4505 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4506 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4507 		u8 *rand192, *hash192, *rand256, *hash256;
4508 		u8 status;
4509 
4510 		if (bdaddr_type_is_le(cp->addr.type)) {
4511 			/* Enforce zero-valued 192-bit parameters as
4512 			 * long as legacy SMP OOB isn't implemented.
4513 			 */
4514 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4515 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4516 				err = mgmt_cmd_complete(sk, hdev->id,
4517 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4518 							MGMT_STATUS_INVALID_PARAMS,
4519 							addr, sizeof(*addr));
4520 				goto unlock;
4521 			}
4522 
4523 			rand192 = NULL;
4524 			hash192 = NULL;
4525 		} else {
4526 			/* In case one of the P-192 values is set to zero,
4527 			 * then just disable OOB data for P-192.
4528 			 */
4529 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4530 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4531 				rand192 = NULL;
4532 				hash192 = NULL;
4533 			} else {
4534 				rand192 = cp->rand192;
4535 				hash192 = cp->hash192;
4536 			}
4537 		}
4538 
4539 		/* In case one of the P-256 values is set to zero, then just
4540 		 * disable OOB data for P-256.
4541 		 */
4542 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4543 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4544 			rand256 = NULL;
4545 			hash256 = NULL;
4546 		} else {
4547 			rand256 = cp->rand256;
4548 			hash256 = cp->hash256;
4549 		}
4550 
4551 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4552 					      cp->addr.type, hash192, rand192,
4553 					      hash256, rand256);
4554 		if (err < 0)
4555 			status = MGMT_STATUS_FAILED;
4556 		else
4557 			status = MGMT_STATUS_SUCCESS;
4558 
4559 		err = mgmt_cmd_complete(sk, hdev->id,
4560 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4561 					status, &cp->addr, sizeof(cp->addr));
4562 	} else {
4563 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4564 			   len);
4565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4566 				      MGMT_STATUS_INVALID_PARAMS);
4567 	}
4568 
4569 unlock:
4570 	hci_dev_unlock(hdev);
4571 	return err;
4572 }
4573 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4574 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4575 				  void *data, u16 len)
4576 {
4577 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4578 	u8 status;
4579 	int err;
4580 
4581 	bt_dev_dbg(hdev, "sock %p", sk);
4582 
4583 	if (cp->addr.type != BDADDR_BREDR)
4584 		return mgmt_cmd_complete(sk, hdev->id,
4585 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4586 					 MGMT_STATUS_INVALID_PARAMS,
4587 					 &cp->addr, sizeof(cp->addr));
4588 
4589 	hci_dev_lock(hdev);
4590 
4591 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4592 		hci_remote_oob_data_clear(hdev);
4593 		status = MGMT_STATUS_SUCCESS;
4594 		goto done;
4595 	}
4596 
4597 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4598 	if (err < 0)
4599 		status = MGMT_STATUS_INVALID_PARAMS;
4600 	else
4601 		status = MGMT_STATUS_SUCCESS;
4602 
4603 done:
4604 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4605 				status, &cp->addr, sizeof(cp->addr));
4606 
4607 	hci_dev_unlock(hdev);
4608 	return err;
4609 }
4610 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)4611 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4612 {
4613 	struct mgmt_pending_cmd *cmd;
4614 
4615 	bt_dev_dbg(hdev, "status %d", status);
4616 
4617 	hci_dev_lock(hdev);
4618 
4619 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4620 	if (!cmd)
4621 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4622 
4623 	if (!cmd)
4624 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4625 
4626 	if (cmd) {
4627 		cmd->cmd_complete(cmd, mgmt_status(status));
4628 		mgmt_pending_remove(cmd);
4629 	}
4630 
4631 	hci_dev_unlock(hdev);
4632 
4633 	/* Handle suspend notifier */
4634 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4635 			       hdev->suspend_tasks)) {
4636 		bt_dev_dbg(hdev, "Unpaused discovery");
4637 		wake_up(&hdev->suspend_wait_q);
4638 	}
4639 }
4640 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)4641 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4642 				    uint8_t *mgmt_status)
4643 {
4644 	switch (type) {
4645 	case DISCOV_TYPE_LE:
4646 		*mgmt_status = mgmt_le_support(hdev);
4647 		if (*mgmt_status)
4648 			return false;
4649 		break;
4650 	case DISCOV_TYPE_INTERLEAVED:
4651 		*mgmt_status = mgmt_le_support(hdev);
4652 		if (*mgmt_status)
4653 			return false;
4654 		fallthrough;
4655 	case DISCOV_TYPE_BREDR:
4656 		*mgmt_status = mgmt_bredr_support(hdev);
4657 		if (*mgmt_status)
4658 			return false;
4659 		break;
4660 	default:
4661 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4662 		return false;
4663 	}
4664 
4665 	return true;
4666 }
4667 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)4668 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4669 				    u16 op, void *data, u16 len)
4670 {
4671 	struct mgmt_cp_start_discovery *cp = data;
4672 	struct mgmt_pending_cmd *cmd;
4673 	u8 status;
4674 	int err;
4675 
4676 	bt_dev_dbg(hdev, "sock %p", sk);
4677 
4678 	hci_dev_lock(hdev);
4679 
4680 	if (!hdev_is_powered(hdev)) {
4681 		err = mgmt_cmd_complete(sk, hdev->id, op,
4682 					MGMT_STATUS_NOT_POWERED,
4683 					&cp->type, sizeof(cp->type));
4684 		goto failed;
4685 	}
4686 
4687 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4688 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4689 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4690 					&cp->type, sizeof(cp->type));
4691 		goto failed;
4692 	}
4693 
4694 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4695 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4696 					&cp->type, sizeof(cp->type));
4697 		goto failed;
4698 	}
4699 
4700 	/* Can't start discovery when it is paused */
4701 	if (hdev->discovery_paused) {
4702 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4703 					&cp->type, sizeof(cp->type));
4704 		goto failed;
4705 	}
4706 
4707 	/* Clear the discovery filter first to free any previously
4708 	 * allocated memory for the UUID list.
4709 	 */
4710 	hci_discovery_filter_clear(hdev);
4711 
4712 	hdev->discovery.type = cp->type;
4713 	hdev->discovery.report_invalid_rssi = false;
4714 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4715 		hdev->discovery.limited = true;
4716 	else
4717 		hdev->discovery.limited = false;
4718 
4719 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4720 	if (!cmd) {
4721 		err = -ENOMEM;
4722 		goto failed;
4723 	}
4724 
4725 	cmd->cmd_complete = generic_cmd_complete;
4726 
4727 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4728 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4729 	err = 0;
4730 
4731 failed:
4732 	hci_dev_unlock(hdev);
4733 	return err;
4734 }
4735 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4736 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4737 			   void *data, u16 len)
4738 {
4739 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4740 					data, len);
4741 }
4742 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4743 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4744 				   void *data, u16 len)
4745 {
4746 	return start_discovery_internal(sk, hdev,
4747 					MGMT_OP_START_LIMITED_DISCOVERY,
4748 					data, len);
4749 }
4750 
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4751 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4752 					  u8 status)
4753 {
4754 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4755 				 cmd->param, 1);
4756 }
4757 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4758 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4759 				   void *data, u16 len)
4760 {
4761 	struct mgmt_cp_start_service_discovery *cp = data;
4762 	struct mgmt_pending_cmd *cmd;
4763 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4764 	u16 uuid_count, expected_len;
4765 	u8 status;
4766 	int err;
4767 
4768 	bt_dev_dbg(hdev, "sock %p", sk);
4769 
4770 	hci_dev_lock(hdev);
4771 
4772 	if (!hdev_is_powered(hdev)) {
4773 		err = mgmt_cmd_complete(sk, hdev->id,
4774 					MGMT_OP_START_SERVICE_DISCOVERY,
4775 					MGMT_STATUS_NOT_POWERED,
4776 					&cp->type, sizeof(cp->type));
4777 		goto failed;
4778 	}
4779 
4780 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4781 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4782 		err = mgmt_cmd_complete(sk, hdev->id,
4783 					MGMT_OP_START_SERVICE_DISCOVERY,
4784 					MGMT_STATUS_BUSY, &cp->type,
4785 					sizeof(cp->type));
4786 		goto failed;
4787 	}
4788 
4789 	uuid_count = __le16_to_cpu(cp->uuid_count);
4790 	if (uuid_count > max_uuid_count) {
4791 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4792 			   uuid_count);
4793 		err = mgmt_cmd_complete(sk, hdev->id,
4794 					MGMT_OP_START_SERVICE_DISCOVERY,
4795 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4796 					sizeof(cp->type));
4797 		goto failed;
4798 	}
4799 
4800 	expected_len = sizeof(*cp) + uuid_count * 16;
4801 	if (expected_len != len) {
4802 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4803 			   expected_len, len);
4804 		err = mgmt_cmd_complete(sk, hdev->id,
4805 					MGMT_OP_START_SERVICE_DISCOVERY,
4806 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4807 					sizeof(cp->type));
4808 		goto failed;
4809 	}
4810 
4811 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4812 		err = mgmt_cmd_complete(sk, hdev->id,
4813 					MGMT_OP_START_SERVICE_DISCOVERY,
4814 					status, &cp->type, sizeof(cp->type));
4815 		goto failed;
4816 	}
4817 
4818 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4819 			       hdev, data, len);
4820 	if (!cmd) {
4821 		err = -ENOMEM;
4822 		goto failed;
4823 	}
4824 
4825 	cmd->cmd_complete = service_discovery_cmd_complete;
4826 
4827 	/* Clear the discovery filter first to free any previously
4828 	 * allocated memory for the UUID list.
4829 	 */
4830 	hci_discovery_filter_clear(hdev);
4831 
4832 	hdev->discovery.result_filtering = true;
4833 	hdev->discovery.type = cp->type;
4834 	hdev->discovery.rssi = cp->rssi;
4835 	hdev->discovery.uuid_count = uuid_count;
4836 
4837 	if (uuid_count > 0) {
4838 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4839 						GFP_KERNEL);
4840 		if (!hdev->discovery.uuids) {
4841 			err = mgmt_cmd_complete(sk, hdev->id,
4842 						MGMT_OP_START_SERVICE_DISCOVERY,
4843 						MGMT_STATUS_FAILED,
4844 						&cp->type, sizeof(cp->type));
4845 			mgmt_pending_remove(cmd);
4846 			goto failed;
4847 		}
4848 	}
4849 
4850 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4851 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4852 	err = 0;
4853 
4854 failed:
4855 	hci_dev_unlock(hdev);
4856 	return err;
4857 }
4858 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)4859 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4860 {
4861 	struct mgmt_pending_cmd *cmd;
4862 
4863 	bt_dev_dbg(hdev, "status %d", status);
4864 
4865 	hci_dev_lock(hdev);
4866 
4867 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4868 	if (cmd) {
4869 		cmd->cmd_complete(cmd, mgmt_status(status));
4870 		mgmt_pending_remove(cmd);
4871 	}
4872 
4873 	hci_dev_unlock(hdev);
4874 
4875 	/* Handle suspend notifier */
4876 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4877 		bt_dev_dbg(hdev, "Paused discovery");
4878 		wake_up(&hdev->suspend_wait_q);
4879 	}
4880 }
4881 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4882 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4883 			  u16 len)
4884 {
4885 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4886 	struct mgmt_pending_cmd *cmd;
4887 	int err;
4888 
4889 	bt_dev_dbg(hdev, "sock %p", sk);
4890 
4891 	hci_dev_lock(hdev);
4892 
4893 	if (!hci_discovery_active(hdev)) {
4894 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4895 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4896 					sizeof(mgmt_cp->type));
4897 		goto unlock;
4898 	}
4899 
4900 	if (hdev->discovery.type != mgmt_cp->type) {
4901 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4902 					MGMT_STATUS_INVALID_PARAMS,
4903 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4904 		goto unlock;
4905 	}
4906 
4907 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4908 	if (!cmd) {
4909 		err = -ENOMEM;
4910 		goto unlock;
4911 	}
4912 
4913 	cmd->cmd_complete = generic_cmd_complete;
4914 
4915 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4916 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4917 	err = 0;
4918 
4919 unlock:
4920 	hci_dev_unlock(hdev);
4921 	return err;
4922 }
4923 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4924 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4925 			u16 len)
4926 {
4927 	struct mgmt_cp_confirm_name *cp = data;
4928 	struct inquiry_entry *e;
4929 	int err;
4930 
4931 	bt_dev_dbg(hdev, "sock %p", sk);
4932 
4933 	hci_dev_lock(hdev);
4934 
4935 	if (!hci_discovery_active(hdev)) {
4936 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4937 					MGMT_STATUS_FAILED, &cp->addr,
4938 					sizeof(cp->addr));
4939 		goto failed;
4940 	}
4941 
4942 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4943 	if (!e) {
4944 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4945 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4946 					sizeof(cp->addr));
4947 		goto failed;
4948 	}
4949 
4950 	if (cp->name_known) {
4951 		e->name_state = NAME_KNOWN;
4952 		list_del(&e->list);
4953 	} else {
4954 		e->name_state = NAME_NEEDED;
4955 		hci_inquiry_cache_update_resolve(hdev, e);
4956 	}
4957 
4958 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4959 				&cp->addr, sizeof(cp->addr));
4960 
4961 failed:
4962 	hci_dev_unlock(hdev);
4963 	return err;
4964 }
4965 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4966 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4967 			u16 len)
4968 {
4969 	struct mgmt_cp_block_device *cp = data;
4970 	u8 status;
4971 	int err;
4972 
4973 	bt_dev_dbg(hdev, "sock %p", sk);
4974 
4975 	if (!bdaddr_type_is_valid(cp->addr.type))
4976 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4977 					 MGMT_STATUS_INVALID_PARAMS,
4978 					 &cp->addr, sizeof(cp->addr));
4979 
4980 	hci_dev_lock(hdev);
4981 
4982 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
4983 				  cp->addr.type);
4984 	if (err < 0) {
4985 		status = MGMT_STATUS_FAILED;
4986 		goto done;
4987 	}
4988 
4989 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4990 		   sk);
4991 	status = MGMT_STATUS_SUCCESS;
4992 
4993 done:
4994 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4995 				&cp->addr, sizeof(cp->addr));
4996 
4997 	hci_dev_unlock(hdev);
4998 
4999 	return err;
5000 }
5001 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5002 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5003 			  u16 len)
5004 {
5005 	struct mgmt_cp_unblock_device *cp = data;
5006 	u8 status;
5007 	int err;
5008 
5009 	bt_dev_dbg(hdev, "sock %p", sk);
5010 
5011 	if (!bdaddr_type_is_valid(cp->addr.type))
5012 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5013 					 MGMT_STATUS_INVALID_PARAMS,
5014 					 &cp->addr, sizeof(cp->addr));
5015 
5016 	hci_dev_lock(hdev);
5017 
5018 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5019 				  cp->addr.type);
5020 	if (err < 0) {
5021 		status = MGMT_STATUS_INVALID_PARAMS;
5022 		goto done;
5023 	}
5024 
5025 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5026 		   sk);
5027 	status = MGMT_STATUS_SUCCESS;
5028 
5029 done:
5030 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5031 				&cp->addr, sizeof(cp->addr));
5032 
5033 	hci_dev_unlock(hdev);
5034 
5035 	return err;
5036 }
5037 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5038 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5039 			 u16 len)
5040 {
5041 	struct mgmt_cp_set_device_id *cp = data;
5042 	struct hci_request req;
5043 	int err;
5044 	__u16 source;
5045 
5046 	bt_dev_dbg(hdev, "sock %p", sk);
5047 
5048 	source = __le16_to_cpu(cp->source);
5049 
5050 	if (source > 0x0002)
5051 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5052 				       MGMT_STATUS_INVALID_PARAMS);
5053 
5054 	hci_dev_lock(hdev);
5055 
5056 	hdev->devid_source = source;
5057 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5058 	hdev->devid_product = __le16_to_cpu(cp->product);
5059 	hdev->devid_version = __le16_to_cpu(cp->version);
5060 
5061 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5062 				NULL, 0);
5063 
5064 	hci_req_init(&req, hdev);
5065 	__hci_req_update_eir(&req);
5066 	hci_req_run(&req, NULL);
5067 
5068 	hci_dev_unlock(hdev);
5069 
5070 	return err;
5071 }
5072 
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)5073 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5074 					u16 opcode)
5075 {
5076 	bt_dev_dbg(hdev, "status %d", status);
5077 }
5078 
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)5079 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5080 				     u16 opcode)
5081 {
5082 	struct cmd_lookup match = { NULL, hdev };
5083 	struct hci_request req;
5084 	u8 instance;
5085 	struct adv_info *adv_instance;
5086 	int err;
5087 
5088 	hci_dev_lock(hdev);
5089 
5090 	if (status) {
5091 		u8 mgmt_err = mgmt_status(status);
5092 
5093 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5094 				     cmd_status_rsp, &mgmt_err);
5095 		goto unlock;
5096 	}
5097 
5098 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5099 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5100 	else
5101 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5102 
5103 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5104 			     &match);
5105 
5106 	new_settings(hdev, match.sk);
5107 
5108 	if (match.sk)
5109 		sock_put(match.sk);
5110 
5111 	/* Handle suspend notifier */
5112 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5113 			       hdev->suspend_tasks)) {
5114 		bt_dev_dbg(hdev, "Paused advertising");
5115 		wake_up(&hdev->suspend_wait_q);
5116 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5117 				      hdev->suspend_tasks)) {
5118 		bt_dev_dbg(hdev, "Unpaused advertising");
5119 		wake_up(&hdev->suspend_wait_q);
5120 	}
5121 
5122 	/* If "Set Advertising" was just disabled and instance advertising was
5123 	 * set up earlier, then re-enable multi-instance advertising.
5124 	 */
5125 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5126 	    list_empty(&hdev->adv_instances))
5127 		goto unlock;
5128 
5129 	instance = hdev->cur_adv_instance;
5130 	if (!instance) {
5131 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5132 							struct adv_info, list);
5133 		if (!adv_instance)
5134 			goto unlock;
5135 
5136 		instance = adv_instance->instance;
5137 	}
5138 
5139 	hci_req_init(&req, hdev);
5140 
5141 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5142 
5143 	if (!err)
5144 		err = hci_req_run(&req, enable_advertising_instance);
5145 
5146 	if (err)
5147 		bt_dev_err(hdev, "failed to re-configure advertising");
5148 
5149 unlock:
5150 	hci_dev_unlock(hdev);
5151 }
5152 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5153 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5154 			   u16 len)
5155 {
5156 	struct mgmt_mode *cp = data;
5157 	struct mgmt_pending_cmd *cmd;
5158 	struct hci_request req;
5159 	u8 val, status;
5160 	int err;
5161 
5162 	bt_dev_dbg(hdev, "sock %p", sk);
5163 
5164 	status = mgmt_le_support(hdev);
5165 	if (status)
5166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5167 				       status);
5168 
5169 	/* Enabling the experimental LL Privay support disables support for
5170 	 * advertising.
5171 	 */
5172 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5173 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5174 				       MGMT_STATUS_NOT_SUPPORTED);
5175 
5176 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5177 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5178 				       MGMT_STATUS_INVALID_PARAMS);
5179 
5180 	if (hdev->advertising_paused)
5181 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5182 				       MGMT_STATUS_BUSY);
5183 
5184 	hci_dev_lock(hdev);
5185 
5186 	val = !!cp->val;
5187 
5188 	/* The following conditions are ones which mean that we should
5189 	 * not do any HCI communication but directly send a mgmt
5190 	 * response to user space (after toggling the flag if
5191 	 * necessary).
5192 	 */
5193 	if (!hdev_is_powered(hdev) ||
5194 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5195 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5196 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5197 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5198 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5199 		bool changed;
5200 
5201 		if (cp->val) {
5202 			hdev->cur_adv_instance = 0x00;
5203 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5204 			if (cp->val == 0x02)
5205 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5206 			else
5207 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5208 		} else {
5209 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5210 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5211 		}
5212 
5213 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5214 		if (err < 0)
5215 			goto unlock;
5216 
5217 		if (changed)
5218 			err = new_settings(hdev, sk);
5219 
5220 		goto unlock;
5221 	}
5222 
5223 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5224 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5225 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5226 				      MGMT_STATUS_BUSY);
5227 		goto unlock;
5228 	}
5229 
5230 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5231 	if (!cmd) {
5232 		err = -ENOMEM;
5233 		goto unlock;
5234 	}
5235 
5236 	hci_req_init(&req, hdev);
5237 
5238 	if (cp->val == 0x02)
5239 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5240 	else
5241 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5242 
5243 	cancel_adv_timeout(hdev);
5244 
5245 	if (val) {
5246 		/* Switch to instance "0" for the Set Advertising setting.
5247 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5248 		 * HCI_ADVERTISING flag is not yet set.
5249 		 */
5250 		hdev->cur_adv_instance = 0x00;
5251 
5252 		if (ext_adv_capable(hdev)) {
5253 			__hci_req_start_ext_adv(&req, 0x00);
5254 		} else {
5255 			__hci_req_update_adv_data(&req, 0x00);
5256 			__hci_req_update_scan_rsp_data(&req, 0x00);
5257 			__hci_req_enable_advertising(&req);
5258 		}
5259 	} else {
5260 		__hci_req_disable_advertising(&req);
5261 	}
5262 
5263 	err = hci_req_run(&req, set_advertising_complete);
5264 	if (err < 0)
5265 		mgmt_pending_remove(cmd);
5266 
5267 unlock:
5268 	hci_dev_unlock(hdev);
5269 	return err;
5270 }
5271 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5272 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5273 			      void *data, u16 len)
5274 {
5275 	struct mgmt_cp_set_static_address *cp = data;
5276 	int err;
5277 
5278 	bt_dev_dbg(hdev, "sock %p", sk);
5279 
5280 	if (!lmp_le_capable(hdev))
5281 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5282 				       MGMT_STATUS_NOT_SUPPORTED);
5283 
5284 	if (hdev_is_powered(hdev))
5285 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5286 				       MGMT_STATUS_REJECTED);
5287 
5288 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5289 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5290 			return mgmt_cmd_status(sk, hdev->id,
5291 					       MGMT_OP_SET_STATIC_ADDRESS,
5292 					       MGMT_STATUS_INVALID_PARAMS);
5293 
5294 		/* Two most significant bits shall be set */
5295 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5296 			return mgmt_cmd_status(sk, hdev->id,
5297 					       MGMT_OP_SET_STATIC_ADDRESS,
5298 					       MGMT_STATUS_INVALID_PARAMS);
5299 	}
5300 
5301 	hci_dev_lock(hdev);
5302 
5303 	bacpy(&hdev->static_addr, &cp->bdaddr);
5304 
5305 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5306 	if (err < 0)
5307 		goto unlock;
5308 
5309 	err = new_settings(hdev, sk);
5310 
5311 unlock:
5312 	hci_dev_unlock(hdev);
5313 	return err;
5314 }
5315 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5316 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5317 			   void *data, u16 len)
5318 {
5319 	struct mgmt_cp_set_scan_params *cp = data;
5320 	__u16 interval, window;
5321 	int err;
5322 
5323 	bt_dev_dbg(hdev, "sock %p", sk);
5324 
5325 	if (!lmp_le_capable(hdev))
5326 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5327 				       MGMT_STATUS_NOT_SUPPORTED);
5328 
5329 	interval = __le16_to_cpu(cp->interval);
5330 
5331 	if (interval < 0x0004 || interval > 0x4000)
5332 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5333 				       MGMT_STATUS_INVALID_PARAMS);
5334 
5335 	window = __le16_to_cpu(cp->window);
5336 
5337 	if (window < 0x0004 || window > 0x4000)
5338 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5339 				       MGMT_STATUS_INVALID_PARAMS);
5340 
5341 	if (window > interval)
5342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5343 				       MGMT_STATUS_INVALID_PARAMS);
5344 
5345 	hci_dev_lock(hdev);
5346 
5347 	hdev->le_scan_interval = interval;
5348 	hdev->le_scan_window = window;
5349 
5350 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5351 				NULL, 0);
5352 
5353 	/* If background scan is running, restart it so new parameters are
5354 	 * loaded.
5355 	 */
5356 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5357 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5358 		struct hci_request req;
5359 
5360 		hci_req_init(&req, hdev);
5361 
5362 		hci_req_add_le_scan_disable(&req, false);
5363 		hci_req_add_le_passive_scan(&req);
5364 
5365 		hci_req_run(&req, NULL);
5366 	}
5367 
5368 	hci_dev_unlock(hdev);
5369 
5370 	return err;
5371 }
5372 
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5373 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5374 				      u16 opcode)
5375 {
5376 	struct mgmt_pending_cmd *cmd;
5377 
5378 	bt_dev_dbg(hdev, "status 0x%02x", status);
5379 
5380 	hci_dev_lock(hdev);
5381 
5382 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5383 	if (!cmd)
5384 		goto unlock;
5385 
5386 	if (status) {
5387 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5388 			        mgmt_status(status));
5389 	} else {
5390 		struct mgmt_mode *cp = cmd->param;
5391 
5392 		if (cp->val)
5393 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5394 		else
5395 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5396 
5397 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5398 		new_settings(hdev, cmd->sk);
5399 	}
5400 
5401 	mgmt_pending_remove(cmd);
5402 
5403 unlock:
5404 	hci_dev_unlock(hdev);
5405 }
5406 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5407 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5408 				void *data, u16 len)
5409 {
5410 	struct mgmt_mode *cp = data;
5411 	struct mgmt_pending_cmd *cmd;
5412 	struct hci_request req;
5413 	int err;
5414 
5415 	bt_dev_dbg(hdev, "sock %p", sk);
5416 
5417 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5418 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5420 				       MGMT_STATUS_NOT_SUPPORTED);
5421 
5422 	if (cp->val != 0x00 && cp->val != 0x01)
5423 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5424 				       MGMT_STATUS_INVALID_PARAMS);
5425 
5426 	hci_dev_lock(hdev);
5427 
5428 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5429 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5430 				      MGMT_STATUS_BUSY);
5431 		goto unlock;
5432 	}
5433 
5434 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5435 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5436 					hdev);
5437 		goto unlock;
5438 	}
5439 
5440 	if (!hdev_is_powered(hdev)) {
5441 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5442 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5443 					hdev);
5444 		new_settings(hdev, sk);
5445 		goto unlock;
5446 	}
5447 
5448 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5449 			       data, len);
5450 	if (!cmd) {
5451 		err = -ENOMEM;
5452 		goto unlock;
5453 	}
5454 
5455 	hci_req_init(&req, hdev);
5456 
5457 	__hci_req_write_fast_connectable(&req, cp->val);
5458 
5459 	err = hci_req_run(&req, fast_connectable_complete);
5460 	if (err < 0) {
5461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5462 				      MGMT_STATUS_FAILED);
5463 		mgmt_pending_remove(cmd);
5464 	}
5465 
5466 unlock:
5467 	hci_dev_unlock(hdev);
5468 
5469 	return err;
5470 }
5471 
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5472 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5473 {
5474 	struct mgmt_pending_cmd *cmd;
5475 
5476 	bt_dev_dbg(hdev, "status 0x%02x", status);
5477 
5478 	hci_dev_lock(hdev);
5479 
5480 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5481 	if (!cmd)
5482 		goto unlock;
5483 
5484 	if (status) {
5485 		u8 mgmt_err = mgmt_status(status);
5486 
5487 		/* We need to restore the flag if related HCI commands
5488 		 * failed.
5489 		 */
5490 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5491 
5492 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5493 	} else {
5494 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5495 		new_settings(hdev, cmd->sk);
5496 	}
5497 
5498 	mgmt_pending_remove(cmd);
5499 
5500 unlock:
5501 	hci_dev_unlock(hdev);
5502 }
5503 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5504 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5505 {
5506 	struct mgmt_mode *cp = data;
5507 	struct mgmt_pending_cmd *cmd;
5508 	struct hci_request req;
5509 	int err;
5510 
5511 	bt_dev_dbg(hdev, "sock %p", sk);
5512 
5513 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5515 				       MGMT_STATUS_NOT_SUPPORTED);
5516 
5517 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5518 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5519 				       MGMT_STATUS_REJECTED);
5520 
5521 	if (cp->val != 0x00 && cp->val != 0x01)
5522 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5523 				       MGMT_STATUS_INVALID_PARAMS);
5524 
5525 	hci_dev_lock(hdev);
5526 
5527 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5528 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5529 		goto unlock;
5530 	}
5531 
5532 	if (!hdev_is_powered(hdev)) {
5533 		if (!cp->val) {
5534 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5535 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5536 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5537 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5538 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5539 		}
5540 
5541 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5542 
5543 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5544 		if (err < 0)
5545 			goto unlock;
5546 
5547 		err = new_settings(hdev, sk);
5548 		goto unlock;
5549 	}
5550 
5551 	/* Reject disabling when powered on */
5552 	if (!cp->val) {
5553 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5554 				      MGMT_STATUS_REJECTED);
5555 		goto unlock;
5556 	} else {
5557 		/* When configuring a dual-mode controller to operate
5558 		 * with LE only and using a static address, then switching
5559 		 * BR/EDR back on is not allowed.
5560 		 *
5561 		 * Dual-mode controllers shall operate with the public
5562 		 * address as its identity address for BR/EDR and LE. So
5563 		 * reject the attempt to create an invalid configuration.
5564 		 *
5565 		 * The same restrictions applies when secure connections
5566 		 * has been enabled. For BR/EDR this is a controller feature
5567 		 * while for LE it is a host stack feature. This means that
5568 		 * switching BR/EDR back on when secure connections has been
5569 		 * enabled is not a supported transaction.
5570 		 */
5571 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5572 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5573 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5574 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5575 					      MGMT_STATUS_REJECTED);
5576 			goto unlock;
5577 		}
5578 	}
5579 
5580 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5581 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5582 				      MGMT_STATUS_BUSY);
5583 		goto unlock;
5584 	}
5585 
5586 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5587 	if (!cmd) {
5588 		err = -ENOMEM;
5589 		goto unlock;
5590 	}
5591 
5592 	/* We need to flip the bit already here so that
5593 	 * hci_req_update_adv_data generates the correct flags.
5594 	 */
5595 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5596 
5597 	hci_req_init(&req, hdev);
5598 
5599 	__hci_req_write_fast_connectable(&req, false);
5600 	__hci_req_update_scan(&req);
5601 
5602 	/* Since only the advertising data flags will change, there
5603 	 * is no need to update the scan response data.
5604 	 */
5605 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5606 
5607 	err = hci_req_run(&req, set_bredr_complete);
5608 	if (err < 0)
5609 		mgmt_pending_remove(cmd);
5610 
5611 unlock:
5612 	hci_dev_unlock(hdev);
5613 	return err;
5614 }
5615 
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5616 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5617 {
5618 	struct mgmt_pending_cmd *cmd;
5619 	struct mgmt_mode *cp;
5620 
5621 	bt_dev_dbg(hdev, "status %u", status);
5622 
5623 	hci_dev_lock(hdev);
5624 
5625 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5626 	if (!cmd)
5627 		goto unlock;
5628 
5629 	if (status) {
5630 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5631 			        mgmt_status(status));
5632 		goto remove;
5633 	}
5634 
5635 	cp = cmd->param;
5636 
5637 	switch (cp->val) {
5638 	case 0x00:
5639 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5640 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5641 		break;
5642 	case 0x01:
5643 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5644 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5645 		break;
5646 	case 0x02:
5647 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5648 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5649 		break;
5650 	}
5651 
5652 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5653 	new_settings(hdev, cmd->sk);
5654 
5655 remove:
5656 	mgmt_pending_remove(cmd);
5657 unlock:
5658 	hci_dev_unlock(hdev);
5659 }
5660 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5661 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5662 			   void *data, u16 len)
5663 {
5664 	struct mgmt_mode *cp = data;
5665 	struct mgmt_pending_cmd *cmd;
5666 	struct hci_request req;
5667 	u8 val;
5668 	int err;
5669 
5670 	bt_dev_dbg(hdev, "sock %p", sk);
5671 
5672 	if (!lmp_sc_capable(hdev) &&
5673 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5674 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5675 				       MGMT_STATUS_NOT_SUPPORTED);
5676 
5677 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5678 	    lmp_sc_capable(hdev) &&
5679 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5681 				       MGMT_STATUS_REJECTED);
5682 
5683 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5684 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5685 				  MGMT_STATUS_INVALID_PARAMS);
5686 
5687 	hci_dev_lock(hdev);
5688 
5689 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5690 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5691 		bool changed;
5692 
5693 		if (cp->val) {
5694 			changed = !hci_dev_test_and_set_flag(hdev,
5695 							     HCI_SC_ENABLED);
5696 			if (cp->val == 0x02)
5697 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5698 			else
5699 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5700 		} else {
5701 			changed = hci_dev_test_and_clear_flag(hdev,
5702 							      HCI_SC_ENABLED);
5703 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5704 		}
5705 
5706 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5707 		if (err < 0)
5708 			goto failed;
5709 
5710 		if (changed)
5711 			err = new_settings(hdev, sk);
5712 
5713 		goto failed;
5714 	}
5715 
5716 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5717 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5718 				      MGMT_STATUS_BUSY);
5719 		goto failed;
5720 	}
5721 
5722 	val = !!cp->val;
5723 
5724 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5725 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5726 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5727 		goto failed;
5728 	}
5729 
5730 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5731 	if (!cmd) {
5732 		err = -ENOMEM;
5733 		goto failed;
5734 	}
5735 
5736 	hci_req_init(&req, hdev);
5737 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5738 	err = hci_req_run(&req, sc_enable_complete);
5739 	if (err < 0) {
5740 		mgmt_pending_remove(cmd);
5741 		goto failed;
5742 	}
5743 
5744 failed:
5745 	hci_dev_unlock(hdev);
5746 	return err;
5747 }
5748 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5749 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5750 			  void *data, u16 len)
5751 {
5752 	struct mgmt_mode *cp = data;
5753 	bool changed, use_changed;
5754 	int err;
5755 
5756 	bt_dev_dbg(hdev, "sock %p", sk);
5757 
5758 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5759 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5760 				       MGMT_STATUS_INVALID_PARAMS);
5761 
5762 	hci_dev_lock(hdev);
5763 
5764 	if (cp->val)
5765 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5766 	else
5767 		changed = hci_dev_test_and_clear_flag(hdev,
5768 						      HCI_KEEP_DEBUG_KEYS);
5769 
5770 	if (cp->val == 0x02)
5771 		use_changed = !hci_dev_test_and_set_flag(hdev,
5772 							 HCI_USE_DEBUG_KEYS);
5773 	else
5774 		use_changed = hci_dev_test_and_clear_flag(hdev,
5775 							  HCI_USE_DEBUG_KEYS);
5776 
5777 	if (hdev_is_powered(hdev) && use_changed &&
5778 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5779 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5780 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5781 			     sizeof(mode), &mode);
5782 	}
5783 
5784 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5785 	if (err < 0)
5786 		goto unlock;
5787 
5788 	if (changed)
5789 		err = new_settings(hdev, sk);
5790 
5791 unlock:
5792 	hci_dev_unlock(hdev);
5793 	return err;
5794 }
5795 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5796 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5797 		       u16 len)
5798 {
5799 	struct mgmt_cp_set_privacy *cp = cp_data;
5800 	bool changed;
5801 	int err;
5802 
5803 	bt_dev_dbg(hdev, "sock %p", sk);
5804 
5805 	if (!lmp_le_capable(hdev))
5806 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5807 				       MGMT_STATUS_NOT_SUPPORTED);
5808 
5809 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5811 				       MGMT_STATUS_INVALID_PARAMS);
5812 
5813 	if (hdev_is_powered(hdev))
5814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5815 				       MGMT_STATUS_REJECTED);
5816 
5817 	hci_dev_lock(hdev);
5818 
5819 	/* If user space supports this command it is also expected to
5820 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5821 	 */
5822 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5823 
5824 	if (cp->privacy) {
5825 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5826 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5827 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5828 		hci_adv_instances_set_rpa_expired(hdev, true);
5829 		if (cp->privacy == 0x02)
5830 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5831 		else
5832 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5833 	} else {
5834 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5835 		memset(hdev->irk, 0, sizeof(hdev->irk));
5836 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5837 		hci_adv_instances_set_rpa_expired(hdev, false);
5838 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5839 	}
5840 
5841 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5842 	if (err < 0)
5843 		goto unlock;
5844 
5845 	if (changed)
5846 		err = new_settings(hdev, sk);
5847 
5848 unlock:
5849 	hci_dev_unlock(hdev);
5850 	return err;
5851 }
5852 
irk_is_valid(struct mgmt_irk_info * irk)5853 static bool irk_is_valid(struct mgmt_irk_info *irk)
5854 {
5855 	switch (irk->addr.type) {
5856 	case BDADDR_LE_PUBLIC:
5857 		return true;
5858 
5859 	case BDADDR_LE_RANDOM:
5860 		/* Two most significant bits shall be set */
5861 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5862 			return false;
5863 		return true;
5864 	}
5865 
5866 	return false;
5867 }
5868 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5869 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5870 		     u16 len)
5871 {
5872 	struct mgmt_cp_load_irks *cp = cp_data;
5873 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5874 				   sizeof(struct mgmt_irk_info));
5875 	u16 irk_count, expected_len;
5876 	int i, err;
5877 
5878 	bt_dev_dbg(hdev, "sock %p", sk);
5879 
5880 	if (!lmp_le_capable(hdev))
5881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5882 				       MGMT_STATUS_NOT_SUPPORTED);
5883 
5884 	irk_count = __le16_to_cpu(cp->irk_count);
5885 	if (irk_count > max_irk_count) {
5886 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5887 			   irk_count);
5888 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5889 				       MGMT_STATUS_INVALID_PARAMS);
5890 	}
5891 
5892 	expected_len = struct_size(cp, irks, irk_count);
5893 	if (expected_len != len) {
5894 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5895 			   expected_len, len);
5896 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5897 				       MGMT_STATUS_INVALID_PARAMS);
5898 	}
5899 
5900 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5901 
5902 	for (i = 0; i < irk_count; i++) {
5903 		struct mgmt_irk_info *key = &cp->irks[i];
5904 
5905 		if (!irk_is_valid(key))
5906 			return mgmt_cmd_status(sk, hdev->id,
5907 					       MGMT_OP_LOAD_IRKS,
5908 					       MGMT_STATUS_INVALID_PARAMS);
5909 	}
5910 
5911 	hci_dev_lock(hdev);
5912 
5913 	hci_smp_irks_clear(hdev);
5914 
5915 	for (i = 0; i < irk_count; i++) {
5916 		struct mgmt_irk_info *irk = &cp->irks[i];
5917 
5918 		if (hci_is_blocked_key(hdev,
5919 				       HCI_BLOCKED_KEY_TYPE_IRK,
5920 				       irk->val)) {
5921 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5922 				    &irk->addr.bdaddr);
5923 			continue;
5924 		}
5925 
5926 		hci_add_irk(hdev, &irk->addr.bdaddr,
5927 			    le_addr_type(irk->addr.type), irk->val,
5928 			    BDADDR_ANY);
5929 	}
5930 
5931 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5932 
5933 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5934 
5935 	hci_dev_unlock(hdev);
5936 
5937 	return err;
5938 }
5939 
ltk_is_valid(struct mgmt_ltk_info * key)5940 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5941 {
5942 	if (key->master != 0x00 && key->master != 0x01)
5943 		return false;
5944 
5945 	switch (key->addr.type) {
5946 	case BDADDR_LE_PUBLIC:
5947 		return true;
5948 
5949 	case BDADDR_LE_RANDOM:
5950 		/* Two most significant bits shall be set */
5951 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5952 			return false;
5953 		return true;
5954 	}
5955 
5956 	return false;
5957 }
5958 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5959 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5960 			       void *cp_data, u16 len)
5961 {
5962 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5963 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5964 				   sizeof(struct mgmt_ltk_info));
5965 	u16 key_count, expected_len;
5966 	int i, err;
5967 
5968 	bt_dev_dbg(hdev, "sock %p", sk);
5969 
5970 	if (!lmp_le_capable(hdev))
5971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5972 				       MGMT_STATUS_NOT_SUPPORTED);
5973 
5974 	key_count = __le16_to_cpu(cp->key_count);
5975 	if (key_count > max_key_count) {
5976 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5977 			   key_count);
5978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5979 				       MGMT_STATUS_INVALID_PARAMS);
5980 	}
5981 
5982 	expected_len = struct_size(cp, keys, key_count);
5983 	if (expected_len != len) {
5984 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5985 			   expected_len, len);
5986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5987 				       MGMT_STATUS_INVALID_PARAMS);
5988 	}
5989 
5990 	bt_dev_dbg(hdev, "key_count %u", key_count);
5991 
5992 	for (i = 0; i < key_count; i++) {
5993 		struct mgmt_ltk_info *key = &cp->keys[i];
5994 
5995 		if (!ltk_is_valid(key))
5996 			return mgmt_cmd_status(sk, hdev->id,
5997 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5998 					       MGMT_STATUS_INVALID_PARAMS);
5999 	}
6000 
6001 	hci_dev_lock(hdev);
6002 
6003 	hci_smp_ltks_clear(hdev);
6004 
6005 	for (i = 0; i < key_count; i++) {
6006 		struct mgmt_ltk_info *key = &cp->keys[i];
6007 		u8 type, authenticated;
6008 
6009 		if (hci_is_blocked_key(hdev,
6010 				       HCI_BLOCKED_KEY_TYPE_LTK,
6011 				       key->val)) {
6012 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6013 				    &key->addr.bdaddr);
6014 			continue;
6015 		}
6016 
6017 		switch (key->type) {
6018 		case MGMT_LTK_UNAUTHENTICATED:
6019 			authenticated = 0x00;
6020 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6021 			break;
6022 		case MGMT_LTK_AUTHENTICATED:
6023 			authenticated = 0x01;
6024 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6025 			break;
6026 		case MGMT_LTK_P256_UNAUTH:
6027 			authenticated = 0x00;
6028 			type = SMP_LTK_P256;
6029 			break;
6030 		case MGMT_LTK_P256_AUTH:
6031 			authenticated = 0x01;
6032 			type = SMP_LTK_P256;
6033 			break;
6034 		case MGMT_LTK_P256_DEBUG:
6035 			authenticated = 0x00;
6036 			type = SMP_LTK_P256_DEBUG;
6037 			fallthrough;
6038 		default:
6039 			continue;
6040 		}
6041 
6042 		hci_add_ltk(hdev, &key->addr.bdaddr,
6043 			    le_addr_type(key->addr.type), type, authenticated,
6044 			    key->val, key->enc_size, key->ediv, key->rand);
6045 	}
6046 
6047 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6048 			   NULL, 0);
6049 
6050 	hci_dev_unlock(hdev);
6051 
6052 	return err;
6053 }
6054 
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6055 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6056 {
6057 	struct hci_conn *conn = cmd->user_data;
6058 	struct mgmt_rp_get_conn_info rp;
6059 	int err;
6060 
6061 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6062 
6063 	if (status == MGMT_STATUS_SUCCESS) {
6064 		rp.rssi = conn->rssi;
6065 		rp.tx_power = conn->tx_power;
6066 		rp.max_tx_power = conn->max_tx_power;
6067 	} else {
6068 		rp.rssi = HCI_RSSI_INVALID;
6069 		rp.tx_power = HCI_TX_POWER_INVALID;
6070 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6071 	}
6072 
6073 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6074 				status, &rp, sizeof(rp));
6075 
6076 	hci_conn_drop(conn);
6077 	hci_conn_put(conn);
6078 
6079 	return err;
6080 }
6081 
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)6082 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6083 				       u16 opcode)
6084 {
6085 	struct hci_cp_read_rssi *cp;
6086 	struct mgmt_pending_cmd *cmd;
6087 	struct hci_conn *conn;
6088 	u16 handle;
6089 	u8 status;
6090 
6091 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6092 
6093 	hci_dev_lock(hdev);
6094 
6095 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6096 	 * Level so we check which one was last sent to retrieve connection
6097 	 * handle.  Both commands have handle as first parameter so it's safe to
6098 	 * cast data on the same command struct.
6099 	 *
6100 	 * First command sent is always Read RSSI and we fail only if it fails.
6101 	 * In other case we simply override error to indicate success as we
6102 	 * already remembered if TX power value is actually valid.
6103 	 */
6104 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6105 	if (!cp) {
6106 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6107 		status = MGMT_STATUS_SUCCESS;
6108 	} else {
6109 		status = mgmt_status(hci_status);
6110 	}
6111 
6112 	if (!cp) {
6113 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6114 		goto unlock;
6115 	}
6116 
6117 	handle = __le16_to_cpu(cp->handle);
6118 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6119 	if (!conn) {
6120 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6121 			   handle);
6122 		goto unlock;
6123 	}
6124 
6125 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6126 	if (!cmd)
6127 		goto unlock;
6128 
6129 	cmd->cmd_complete(cmd, status);
6130 	mgmt_pending_remove(cmd);
6131 
6132 unlock:
6133 	hci_dev_unlock(hdev);
6134 }
6135 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6136 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6137 			 u16 len)
6138 {
6139 	struct mgmt_cp_get_conn_info *cp = data;
6140 	struct mgmt_rp_get_conn_info rp;
6141 	struct hci_conn *conn;
6142 	unsigned long conn_info_age;
6143 	int err = 0;
6144 
6145 	bt_dev_dbg(hdev, "sock %p", sk);
6146 
6147 	memset(&rp, 0, sizeof(rp));
6148 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6149 	rp.addr.type = cp->addr.type;
6150 
6151 	if (!bdaddr_type_is_valid(cp->addr.type))
6152 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6153 					 MGMT_STATUS_INVALID_PARAMS,
6154 					 &rp, sizeof(rp));
6155 
6156 	hci_dev_lock(hdev);
6157 
6158 	if (!hdev_is_powered(hdev)) {
6159 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6160 					MGMT_STATUS_NOT_POWERED, &rp,
6161 					sizeof(rp));
6162 		goto unlock;
6163 	}
6164 
6165 	if (cp->addr.type == BDADDR_BREDR)
6166 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6167 					       &cp->addr.bdaddr);
6168 	else
6169 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6170 
6171 	if (!conn || conn->state != BT_CONNECTED) {
6172 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6173 					MGMT_STATUS_NOT_CONNECTED, &rp,
6174 					sizeof(rp));
6175 		goto unlock;
6176 	}
6177 
6178 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6179 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6180 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6181 		goto unlock;
6182 	}
6183 
6184 	/* To avoid client trying to guess when to poll again for information we
6185 	 * calculate conn info age as random value between min/max set in hdev.
6186 	 */
6187 	conn_info_age = hdev->conn_info_min_age +
6188 			prandom_u32_max(hdev->conn_info_max_age -
6189 					hdev->conn_info_min_age);
6190 
6191 	/* Query controller to refresh cached values if they are too old or were
6192 	 * never read.
6193 	 */
6194 	if (time_after(jiffies, conn->conn_info_timestamp +
6195 		       msecs_to_jiffies(conn_info_age)) ||
6196 	    !conn->conn_info_timestamp) {
6197 		struct hci_request req;
6198 		struct hci_cp_read_tx_power req_txp_cp;
6199 		struct hci_cp_read_rssi req_rssi_cp;
6200 		struct mgmt_pending_cmd *cmd;
6201 
6202 		hci_req_init(&req, hdev);
6203 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6204 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6205 			    &req_rssi_cp);
6206 
6207 		/* For LE links TX power does not change thus we don't need to
6208 		 * query for it once value is known.
6209 		 */
6210 		if (!bdaddr_type_is_le(cp->addr.type) ||
6211 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6212 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6213 			req_txp_cp.type = 0x00;
6214 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6215 				    sizeof(req_txp_cp), &req_txp_cp);
6216 		}
6217 
6218 		/* Max TX power needs to be read only once per connection */
6219 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6220 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6221 			req_txp_cp.type = 0x01;
6222 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6223 				    sizeof(req_txp_cp), &req_txp_cp);
6224 		}
6225 
6226 		err = hci_req_run(&req, conn_info_refresh_complete);
6227 		if (err < 0)
6228 			goto unlock;
6229 
6230 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6231 				       data, len);
6232 		if (!cmd) {
6233 			err = -ENOMEM;
6234 			goto unlock;
6235 		}
6236 
6237 		hci_conn_hold(conn);
6238 		cmd->user_data = hci_conn_get(conn);
6239 		cmd->cmd_complete = conn_info_cmd_complete;
6240 
6241 		conn->conn_info_timestamp = jiffies;
6242 	} else {
6243 		/* Cache is valid, just reply with values cached in hci_conn */
6244 		rp.rssi = conn->rssi;
6245 		rp.tx_power = conn->tx_power;
6246 		rp.max_tx_power = conn->max_tx_power;
6247 
6248 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6249 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6250 	}
6251 
6252 unlock:
6253 	hci_dev_unlock(hdev);
6254 	return err;
6255 }
6256 
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)6257 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6258 {
6259 	struct hci_conn *conn = cmd->user_data;
6260 	struct mgmt_rp_get_clock_info rp;
6261 	struct hci_dev *hdev;
6262 	int err;
6263 
6264 	memset(&rp, 0, sizeof(rp));
6265 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6266 
6267 	if (status)
6268 		goto complete;
6269 
6270 	hdev = hci_dev_get(cmd->index);
6271 	if (hdev) {
6272 		rp.local_clock = cpu_to_le32(hdev->clock);
6273 		hci_dev_put(hdev);
6274 	}
6275 
6276 	if (conn) {
6277 		rp.piconet_clock = cpu_to_le32(conn->clock);
6278 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6279 	}
6280 
6281 complete:
6282 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6283 				sizeof(rp));
6284 
6285 	if (conn) {
6286 		hci_conn_drop(conn);
6287 		hci_conn_put(conn);
6288 	}
6289 
6290 	return err;
6291 }
6292 
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)6293 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6294 {
6295 	struct hci_cp_read_clock *hci_cp;
6296 	struct mgmt_pending_cmd *cmd;
6297 	struct hci_conn *conn;
6298 
6299 	bt_dev_dbg(hdev, "status %u", status);
6300 
6301 	hci_dev_lock(hdev);
6302 
6303 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6304 	if (!hci_cp)
6305 		goto unlock;
6306 
6307 	if (hci_cp->which) {
6308 		u16 handle = __le16_to_cpu(hci_cp->handle);
6309 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6310 	} else {
6311 		conn = NULL;
6312 	}
6313 
6314 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6315 	if (!cmd)
6316 		goto unlock;
6317 
6318 	cmd->cmd_complete(cmd, mgmt_status(status));
6319 	mgmt_pending_remove(cmd);
6320 
6321 unlock:
6322 	hci_dev_unlock(hdev);
6323 }
6324 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6325 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6326 			 u16 len)
6327 {
6328 	struct mgmt_cp_get_clock_info *cp = data;
6329 	struct mgmt_rp_get_clock_info rp;
6330 	struct hci_cp_read_clock hci_cp;
6331 	struct mgmt_pending_cmd *cmd;
6332 	struct hci_request req;
6333 	struct hci_conn *conn;
6334 	int err;
6335 
6336 	bt_dev_dbg(hdev, "sock %p", sk);
6337 
6338 	memset(&rp, 0, sizeof(rp));
6339 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6340 	rp.addr.type = cp->addr.type;
6341 
6342 	if (cp->addr.type != BDADDR_BREDR)
6343 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6344 					 MGMT_STATUS_INVALID_PARAMS,
6345 					 &rp, sizeof(rp));
6346 
6347 	hci_dev_lock(hdev);
6348 
6349 	if (!hdev_is_powered(hdev)) {
6350 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6351 					MGMT_STATUS_NOT_POWERED, &rp,
6352 					sizeof(rp));
6353 		goto unlock;
6354 	}
6355 
6356 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6357 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6358 					       &cp->addr.bdaddr);
6359 		if (!conn || conn->state != BT_CONNECTED) {
6360 			err = mgmt_cmd_complete(sk, hdev->id,
6361 						MGMT_OP_GET_CLOCK_INFO,
6362 						MGMT_STATUS_NOT_CONNECTED,
6363 						&rp, sizeof(rp));
6364 			goto unlock;
6365 		}
6366 	} else {
6367 		conn = NULL;
6368 	}
6369 
6370 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6371 	if (!cmd) {
6372 		err = -ENOMEM;
6373 		goto unlock;
6374 	}
6375 
6376 	cmd->cmd_complete = clock_info_cmd_complete;
6377 
6378 	hci_req_init(&req, hdev);
6379 
6380 	memset(&hci_cp, 0, sizeof(hci_cp));
6381 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6382 
6383 	if (conn) {
6384 		hci_conn_hold(conn);
6385 		cmd->user_data = hci_conn_get(conn);
6386 
6387 		hci_cp.handle = cpu_to_le16(conn->handle);
6388 		hci_cp.which = 0x01; /* Piconet clock */
6389 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6390 	}
6391 
6392 	err = hci_req_run(&req, get_clock_info_complete);
6393 	if (err < 0)
6394 		mgmt_pending_remove(cmd);
6395 
6396 unlock:
6397 	hci_dev_unlock(hdev);
6398 	return err;
6399 }
6400 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6401 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6402 {
6403 	struct hci_conn *conn;
6404 
6405 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6406 	if (!conn)
6407 		return false;
6408 
6409 	if (conn->dst_type != type)
6410 		return false;
6411 
6412 	if (conn->state != BT_CONNECTED)
6413 		return false;
6414 
6415 	return true;
6416 }
6417 
6418 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6419 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6420 			       u8 addr_type, u8 auto_connect)
6421 {
6422 	struct hci_conn_params *params;
6423 
6424 	params = hci_conn_params_add(hdev, addr, addr_type);
6425 	if (!params)
6426 		return -EIO;
6427 
6428 	if (params->auto_connect == auto_connect)
6429 		return 0;
6430 
6431 	list_del_init(&params->action);
6432 
6433 	switch (auto_connect) {
6434 	case HCI_AUTO_CONN_DISABLED:
6435 	case HCI_AUTO_CONN_LINK_LOSS:
6436 		/* If auto connect is being disabled when we're trying to
6437 		 * connect to device, keep connecting.
6438 		 */
6439 		if (params->explicit_connect)
6440 			list_add(&params->action, &hdev->pend_le_conns);
6441 		break;
6442 	case HCI_AUTO_CONN_REPORT:
6443 		if (params->explicit_connect)
6444 			list_add(&params->action, &hdev->pend_le_conns);
6445 		else
6446 			list_add(&params->action, &hdev->pend_le_reports);
6447 		break;
6448 	case HCI_AUTO_CONN_DIRECT:
6449 	case HCI_AUTO_CONN_ALWAYS:
6450 		if (!is_connected(hdev, addr, addr_type))
6451 			list_add(&params->action, &hdev->pend_le_conns);
6452 		break;
6453 	}
6454 
6455 	params->auto_connect = auto_connect;
6456 
6457 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6458 		   addr, addr_type, auto_connect);
6459 
6460 	return 0;
6461 }
6462 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6463 static void device_added(struct sock *sk, struct hci_dev *hdev,
6464 			 bdaddr_t *bdaddr, u8 type, u8 action)
6465 {
6466 	struct mgmt_ev_device_added ev;
6467 
6468 	bacpy(&ev.addr.bdaddr, bdaddr);
6469 	ev.addr.type = type;
6470 	ev.action = action;
6471 
6472 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6473 }
6474 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6475 static int add_device(struct sock *sk, struct hci_dev *hdev,
6476 		      void *data, u16 len)
6477 {
6478 	struct mgmt_cp_add_device *cp = data;
6479 	u8 auto_conn, addr_type;
6480 	struct hci_conn_params *params;
6481 	int err;
6482 	u32 current_flags = 0;
6483 
6484 	bt_dev_dbg(hdev, "sock %p", sk);
6485 
6486 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6487 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6488 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6489 					 MGMT_STATUS_INVALID_PARAMS,
6490 					 &cp->addr, sizeof(cp->addr));
6491 
6492 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6493 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6494 					 MGMT_STATUS_INVALID_PARAMS,
6495 					 &cp->addr, sizeof(cp->addr));
6496 
6497 	hci_dev_lock(hdev);
6498 
6499 	if (cp->addr.type == BDADDR_BREDR) {
6500 		/* Only incoming connections action is supported for now */
6501 		if (cp->action != 0x01) {
6502 			err = mgmt_cmd_complete(sk, hdev->id,
6503 						MGMT_OP_ADD_DEVICE,
6504 						MGMT_STATUS_INVALID_PARAMS,
6505 						&cp->addr, sizeof(cp->addr));
6506 			goto unlock;
6507 		}
6508 
6509 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6510 						     &cp->addr.bdaddr,
6511 						     cp->addr.type, 0);
6512 		if (err)
6513 			goto unlock;
6514 
6515 		hci_req_update_scan(hdev);
6516 
6517 		goto added;
6518 	}
6519 
6520 	addr_type = le_addr_type(cp->addr.type);
6521 
6522 	if (cp->action == 0x02)
6523 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6524 	else if (cp->action == 0x01)
6525 		auto_conn = HCI_AUTO_CONN_DIRECT;
6526 	else
6527 		auto_conn = HCI_AUTO_CONN_REPORT;
6528 
6529 	/* Kernel internally uses conn_params with resolvable private
6530 	 * address, but Add Device allows only identity addresses.
6531 	 * Make sure it is enforced before calling
6532 	 * hci_conn_params_lookup.
6533 	 */
6534 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6535 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6536 					MGMT_STATUS_INVALID_PARAMS,
6537 					&cp->addr, sizeof(cp->addr));
6538 		goto unlock;
6539 	}
6540 
6541 	/* If the connection parameters don't exist for this device,
6542 	 * they will be created and configured with defaults.
6543 	 */
6544 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6545 				auto_conn) < 0) {
6546 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6547 					MGMT_STATUS_FAILED, &cp->addr,
6548 					sizeof(cp->addr));
6549 		goto unlock;
6550 	} else {
6551 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6552 						addr_type);
6553 		if (params)
6554 			current_flags = params->current_flags;
6555 	}
6556 
6557 	hci_update_background_scan(hdev);
6558 
6559 added:
6560 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6561 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6562 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6563 
6564 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6565 				MGMT_STATUS_SUCCESS, &cp->addr,
6566 				sizeof(cp->addr));
6567 
6568 unlock:
6569 	hci_dev_unlock(hdev);
6570 	return err;
6571 }
6572 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6573 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6574 			   bdaddr_t *bdaddr, u8 type)
6575 {
6576 	struct mgmt_ev_device_removed ev;
6577 
6578 	bacpy(&ev.addr.bdaddr, bdaddr);
6579 	ev.addr.type = type;
6580 
6581 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6582 }
6583 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6584 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6585 			 void *data, u16 len)
6586 {
6587 	struct mgmt_cp_remove_device *cp = data;
6588 	int err;
6589 
6590 	bt_dev_dbg(hdev, "sock %p", sk);
6591 
6592 	hci_dev_lock(hdev);
6593 
6594 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6595 		struct hci_conn_params *params;
6596 		u8 addr_type;
6597 
6598 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6599 			err = mgmt_cmd_complete(sk, hdev->id,
6600 						MGMT_OP_REMOVE_DEVICE,
6601 						MGMT_STATUS_INVALID_PARAMS,
6602 						&cp->addr, sizeof(cp->addr));
6603 			goto unlock;
6604 		}
6605 
6606 		if (cp->addr.type == BDADDR_BREDR) {
6607 			err = hci_bdaddr_list_del(&hdev->accept_list,
6608 						  &cp->addr.bdaddr,
6609 						  cp->addr.type);
6610 			if (err) {
6611 				err = mgmt_cmd_complete(sk, hdev->id,
6612 							MGMT_OP_REMOVE_DEVICE,
6613 							MGMT_STATUS_INVALID_PARAMS,
6614 							&cp->addr,
6615 							sizeof(cp->addr));
6616 				goto unlock;
6617 			}
6618 
6619 			hci_req_update_scan(hdev);
6620 
6621 			device_removed(sk, hdev, &cp->addr.bdaddr,
6622 				       cp->addr.type);
6623 			goto complete;
6624 		}
6625 
6626 		addr_type = le_addr_type(cp->addr.type);
6627 
6628 		/* Kernel internally uses conn_params with resolvable private
6629 		 * address, but Remove Device allows only identity addresses.
6630 		 * Make sure it is enforced before calling
6631 		 * hci_conn_params_lookup.
6632 		 */
6633 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6634 			err = mgmt_cmd_complete(sk, hdev->id,
6635 						MGMT_OP_REMOVE_DEVICE,
6636 						MGMT_STATUS_INVALID_PARAMS,
6637 						&cp->addr, sizeof(cp->addr));
6638 			goto unlock;
6639 		}
6640 
6641 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6642 						addr_type);
6643 		if (!params) {
6644 			err = mgmt_cmd_complete(sk, hdev->id,
6645 						MGMT_OP_REMOVE_DEVICE,
6646 						MGMT_STATUS_INVALID_PARAMS,
6647 						&cp->addr, sizeof(cp->addr));
6648 			goto unlock;
6649 		}
6650 
6651 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6652 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6653 			err = mgmt_cmd_complete(sk, hdev->id,
6654 						MGMT_OP_REMOVE_DEVICE,
6655 						MGMT_STATUS_INVALID_PARAMS,
6656 						&cp->addr, sizeof(cp->addr));
6657 			goto unlock;
6658 		}
6659 
6660 		list_del(&params->action);
6661 		list_del(&params->list);
6662 		kfree(params);
6663 		hci_update_background_scan(hdev);
6664 
6665 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6666 	} else {
6667 		struct hci_conn_params *p, *tmp;
6668 		struct bdaddr_list *b, *btmp;
6669 
6670 		if (cp->addr.type) {
6671 			err = mgmt_cmd_complete(sk, hdev->id,
6672 						MGMT_OP_REMOVE_DEVICE,
6673 						MGMT_STATUS_INVALID_PARAMS,
6674 						&cp->addr, sizeof(cp->addr));
6675 			goto unlock;
6676 		}
6677 
6678 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6679 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6680 			list_del(&b->list);
6681 			kfree(b);
6682 		}
6683 
6684 		hci_req_update_scan(hdev);
6685 
6686 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6687 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6688 				continue;
6689 			device_removed(sk, hdev, &p->addr, p->addr_type);
6690 			if (p->explicit_connect) {
6691 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6692 				continue;
6693 			}
6694 			list_del(&p->action);
6695 			list_del(&p->list);
6696 			kfree(p);
6697 		}
6698 
6699 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6700 
6701 		hci_update_background_scan(hdev);
6702 	}
6703 
6704 complete:
6705 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6706 				MGMT_STATUS_SUCCESS, &cp->addr,
6707 				sizeof(cp->addr));
6708 unlock:
6709 	hci_dev_unlock(hdev);
6710 	return err;
6711 }
6712 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6713 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6714 			   u16 len)
6715 {
6716 	struct mgmt_cp_load_conn_param *cp = data;
6717 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6718 				     sizeof(struct mgmt_conn_param));
6719 	u16 param_count, expected_len;
6720 	int i;
6721 
6722 	if (!lmp_le_capable(hdev))
6723 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6724 				       MGMT_STATUS_NOT_SUPPORTED);
6725 
6726 	param_count = __le16_to_cpu(cp->param_count);
6727 	if (param_count > max_param_count) {
6728 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6729 			   param_count);
6730 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6731 				       MGMT_STATUS_INVALID_PARAMS);
6732 	}
6733 
6734 	expected_len = struct_size(cp, params, param_count);
6735 	if (expected_len != len) {
6736 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6737 			   expected_len, len);
6738 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6739 				       MGMT_STATUS_INVALID_PARAMS);
6740 	}
6741 
6742 	bt_dev_dbg(hdev, "param_count %u", param_count);
6743 
6744 	hci_dev_lock(hdev);
6745 
6746 	hci_conn_params_clear_disabled(hdev);
6747 
6748 	for (i = 0; i < param_count; i++) {
6749 		struct mgmt_conn_param *param = &cp->params[i];
6750 		struct hci_conn_params *hci_param;
6751 		u16 min, max, latency, timeout;
6752 		u8 addr_type;
6753 
6754 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6755 			   param->addr.type);
6756 
6757 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6758 			addr_type = ADDR_LE_DEV_PUBLIC;
6759 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6760 			addr_type = ADDR_LE_DEV_RANDOM;
6761 		} else {
6762 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6763 			continue;
6764 		}
6765 
6766 		min = le16_to_cpu(param->min_interval);
6767 		max = le16_to_cpu(param->max_interval);
6768 		latency = le16_to_cpu(param->latency);
6769 		timeout = le16_to_cpu(param->timeout);
6770 
6771 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6772 			   min, max, latency, timeout);
6773 
6774 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6775 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6776 			continue;
6777 		}
6778 
6779 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6780 						addr_type);
6781 		if (!hci_param) {
6782 			bt_dev_err(hdev, "failed to add connection parameters");
6783 			continue;
6784 		}
6785 
6786 		hci_param->conn_min_interval = min;
6787 		hci_param->conn_max_interval = max;
6788 		hci_param->conn_latency = latency;
6789 		hci_param->supervision_timeout = timeout;
6790 	}
6791 
6792 	hci_dev_unlock(hdev);
6793 
6794 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6795 				 NULL, 0);
6796 }
6797 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6798 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6799 			       void *data, u16 len)
6800 {
6801 	struct mgmt_cp_set_external_config *cp = data;
6802 	bool changed;
6803 	int err;
6804 
6805 	bt_dev_dbg(hdev, "sock %p", sk);
6806 
6807 	if (hdev_is_powered(hdev))
6808 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6809 				       MGMT_STATUS_REJECTED);
6810 
6811 	if (cp->config != 0x00 && cp->config != 0x01)
6812 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6813 				         MGMT_STATUS_INVALID_PARAMS);
6814 
6815 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6816 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6817 				       MGMT_STATUS_NOT_SUPPORTED);
6818 
6819 	hci_dev_lock(hdev);
6820 
6821 	if (cp->config)
6822 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6823 	else
6824 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6825 
6826 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6827 	if (err < 0)
6828 		goto unlock;
6829 
6830 	if (!changed)
6831 		goto unlock;
6832 
6833 	err = new_options(hdev, sk);
6834 
6835 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6836 		mgmt_index_removed(hdev);
6837 
6838 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6839 			hci_dev_set_flag(hdev, HCI_CONFIG);
6840 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6841 
6842 			queue_work(hdev->req_workqueue, &hdev->power_on);
6843 		} else {
6844 			set_bit(HCI_RAW, &hdev->flags);
6845 			mgmt_index_added(hdev);
6846 		}
6847 	}
6848 
6849 unlock:
6850 	hci_dev_unlock(hdev);
6851 	return err;
6852 }
6853 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6854 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6855 			      void *data, u16 len)
6856 {
6857 	struct mgmt_cp_set_public_address *cp = data;
6858 	bool changed;
6859 	int err;
6860 
6861 	bt_dev_dbg(hdev, "sock %p", sk);
6862 
6863 	if (hdev_is_powered(hdev))
6864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6865 				       MGMT_STATUS_REJECTED);
6866 
6867 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6868 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6869 				       MGMT_STATUS_INVALID_PARAMS);
6870 
6871 	if (!hdev->set_bdaddr)
6872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6873 				       MGMT_STATUS_NOT_SUPPORTED);
6874 
6875 	hci_dev_lock(hdev);
6876 
6877 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6878 	bacpy(&hdev->public_addr, &cp->bdaddr);
6879 
6880 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6881 	if (err < 0)
6882 		goto unlock;
6883 
6884 	if (!changed)
6885 		goto unlock;
6886 
6887 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6888 		err = new_options(hdev, sk);
6889 
6890 	if (is_configured(hdev)) {
6891 		mgmt_index_removed(hdev);
6892 
6893 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6894 
6895 		hci_dev_set_flag(hdev, HCI_CONFIG);
6896 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6897 
6898 		queue_work(hdev->req_workqueue, &hdev->power_on);
6899 	}
6900 
6901 unlock:
6902 	hci_dev_unlock(hdev);
6903 	return err;
6904 }
6905 
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6906 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6907 					     u16 opcode, struct sk_buff *skb)
6908 {
6909 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6910 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6911 	u8 *h192, *r192, *h256, *r256;
6912 	struct mgmt_pending_cmd *cmd;
6913 	u16 eir_len;
6914 	int err;
6915 
6916 	bt_dev_dbg(hdev, "status %u", status);
6917 
6918 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6919 	if (!cmd)
6920 		return;
6921 
6922 	mgmt_cp = cmd->param;
6923 
6924 	if (status) {
6925 		status = mgmt_status(status);
6926 		eir_len = 0;
6927 
6928 		h192 = NULL;
6929 		r192 = NULL;
6930 		h256 = NULL;
6931 		r256 = NULL;
6932 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6933 		struct hci_rp_read_local_oob_data *rp;
6934 
6935 		if (skb->len != sizeof(*rp)) {
6936 			status = MGMT_STATUS_FAILED;
6937 			eir_len = 0;
6938 		} else {
6939 			status = MGMT_STATUS_SUCCESS;
6940 			rp = (void *)skb->data;
6941 
6942 			eir_len = 5 + 18 + 18;
6943 			h192 = rp->hash;
6944 			r192 = rp->rand;
6945 			h256 = NULL;
6946 			r256 = NULL;
6947 		}
6948 	} else {
6949 		struct hci_rp_read_local_oob_ext_data *rp;
6950 
6951 		if (skb->len != sizeof(*rp)) {
6952 			status = MGMT_STATUS_FAILED;
6953 			eir_len = 0;
6954 		} else {
6955 			status = MGMT_STATUS_SUCCESS;
6956 			rp = (void *)skb->data;
6957 
6958 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6959 				eir_len = 5 + 18 + 18;
6960 				h192 = NULL;
6961 				r192 = NULL;
6962 			} else {
6963 				eir_len = 5 + 18 + 18 + 18 + 18;
6964 				h192 = rp->hash192;
6965 				r192 = rp->rand192;
6966 			}
6967 
6968 			h256 = rp->hash256;
6969 			r256 = rp->rand256;
6970 		}
6971 	}
6972 
6973 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6974 	if (!mgmt_rp)
6975 		goto done;
6976 
6977 	if (status)
6978 		goto send_rsp;
6979 
6980 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6981 				  hdev->dev_class, 3);
6982 
6983 	if (h192 && r192) {
6984 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6985 					  EIR_SSP_HASH_C192, h192, 16);
6986 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6987 					  EIR_SSP_RAND_R192, r192, 16);
6988 	}
6989 
6990 	if (h256 && r256) {
6991 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6992 					  EIR_SSP_HASH_C256, h256, 16);
6993 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6994 					  EIR_SSP_RAND_R256, r256, 16);
6995 	}
6996 
6997 send_rsp:
6998 	mgmt_rp->type = mgmt_cp->type;
6999 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7000 
7001 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7002 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7003 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7004 	if (err < 0 || status)
7005 		goto done;
7006 
7007 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7008 
7009 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7010 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7011 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7012 done:
7013 	kfree(mgmt_rp);
7014 	mgmt_pending_remove(cmd);
7015 }
7016 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7017 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7018 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7019 {
7020 	struct mgmt_pending_cmd *cmd;
7021 	struct hci_request req;
7022 	int err;
7023 
7024 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7025 			       cp, sizeof(*cp));
7026 	if (!cmd)
7027 		return -ENOMEM;
7028 
7029 	hci_req_init(&req, hdev);
7030 
7031 	if (bredr_sc_enabled(hdev))
7032 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7033 	else
7034 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7035 
7036 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7037 	if (err < 0) {
7038 		mgmt_pending_remove(cmd);
7039 		return err;
7040 	}
7041 
7042 	return 0;
7043 }
7044 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7045 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7046 				   void *data, u16 data_len)
7047 {
7048 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7049 	struct mgmt_rp_read_local_oob_ext_data *rp;
7050 	size_t rp_len;
7051 	u16 eir_len;
7052 	u8 status, flags, role, addr[7], hash[16], rand[16];
7053 	int err;
7054 
7055 	bt_dev_dbg(hdev, "sock %p", sk);
7056 
7057 	if (hdev_is_powered(hdev)) {
7058 		switch (cp->type) {
7059 		case BIT(BDADDR_BREDR):
7060 			status = mgmt_bredr_support(hdev);
7061 			if (status)
7062 				eir_len = 0;
7063 			else
7064 				eir_len = 5;
7065 			break;
7066 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7067 			status = mgmt_le_support(hdev);
7068 			if (status)
7069 				eir_len = 0;
7070 			else
7071 				eir_len = 9 + 3 + 18 + 18 + 3;
7072 			break;
7073 		default:
7074 			status = MGMT_STATUS_INVALID_PARAMS;
7075 			eir_len = 0;
7076 			break;
7077 		}
7078 	} else {
7079 		status = MGMT_STATUS_NOT_POWERED;
7080 		eir_len = 0;
7081 	}
7082 
7083 	rp_len = sizeof(*rp) + eir_len;
7084 	rp = kmalloc(rp_len, GFP_ATOMIC);
7085 	if (!rp)
7086 		return -ENOMEM;
7087 
7088 	if (status)
7089 		goto complete;
7090 
7091 	hci_dev_lock(hdev);
7092 
7093 	eir_len = 0;
7094 	switch (cp->type) {
7095 	case BIT(BDADDR_BREDR):
7096 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7097 			err = read_local_ssp_oob_req(hdev, sk, cp);
7098 			hci_dev_unlock(hdev);
7099 			if (!err)
7100 				goto done;
7101 
7102 			status = MGMT_STATUS_FAILED;
7103 			goto complete;
7104 		} else {
7105 			eir_len = eir_append_data(rp->eir, eir_len,
7106 						  EIR_CLASS_OF_DEV,
7107 						  hdev->dev_class, 3);
7108 		}
7109 		break;
7110 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7111 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7112 		    smp_generate_oob(hdev, hash, rand) < 0) {
7113 			hci_dev_unlock(hdev);
7114 			status = MGMT_STATUS_FAILED;
7115 			goto complete;
7116 		}
7117 
7118 		/* This should return the active RPA, but since the RPA
7119 		 * is only programmed on demand, it is really hard to fill
7120 		 * this in at the moment. For now disallow retrieving
7121 		 * local out-of-band data when privacy is in use.
7122 		 *
7123 		 * Returning the identity address will not help here since
7124 		 * pairing happens before the identity resolving key is
7125 		 * known and thus the connection establishment happens
7126 		 * based on the RPA and not the identity address.
7127 		 */
7128 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7129 			hci_dev_unlock(hdev);
7130 			status = MGMT_STATUS_REJECTED;
7131 			goto complete;
7132 		}
7133 
7134 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7135 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7136 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7137 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7138 			memcpy(addr, &hdev->static_addr, 6);
7139 			addr[6] = 0x01;
7140 		} else {
7141 			memcpy(addr, &hdev->bdaddr, 6);
7142 			addr[6] = 0x00;
7143 		}
7144 
7145 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7146 					  addr, sizeof(addr));
7147 
7148 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7149 			role = 0x02;
7150 		else
7151 			role = 0x01;
7152 
7153 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7154 					  &role, sizeof(role));
7155 
7156 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7157 			eir_len = eir_append_data(rp->eir, eir_len,
7158 						  EIR_LE_SC_CONFIRM,
7159 						  hash, sizeof(hash));
7160 
7161 			eir_len = eir_append_data(rp->eir, eir_len,
7162 						  EIR_LE_SC_RANDOM,
7163 						  rand, sizeof(rand));
7164 		}
7165 
7166 		flags = mgmt_get_adv_discov_flags(hdev);
7167 
7168 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7169 			flags |= LE_AD_NO_BREDR;
7170 
7171 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7172 					  &flags, sizeof(flags));
7173 		break;
7174 	}
7175 
7176 	hci_dev_unlock(hdev);
7177 
7178 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7179 
7180 	status = MGMT_STATUS_SUCCESS;
7181 
7182 complete:
7183 	rp->type = cp->type;
7184 	rp->eir_len = cpu_to_le16(eir_len);
7185 
7186 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7187 				status, rp, sizeof(*rp) + eir_len);
7188 	if (err < 0 || status)
7189 		goto done;
7190 
7191 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7192 				 rp, sizeof(*rp) + eir_len,
7193 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7194 
7195 done:
7196 	kfree(rp);
7197 
7198 	return err;
7199 }
7200 
get_supported_adv_flags(struct hci_dev * hdev)7201 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7202 {
7203 	u32 flags = 0;
7204 
7205 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7206 	flags |= MGMT_ADV_FLAG_DISCOV;
7207 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7208 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7209 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7210 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7211 
7212 	/* In extended adv TX_POWER returned from Set Adv Param
7213 	 * will be always valid.
7214 	 */
7215 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7216 	    ext_adv_capable(hdev))
7217 		flags |= MGMT_ADV_FLAG_TX_POWER;
7218 
7219 	if (ext_adv_capable(hdev)) {
7220 		flags |= MGMT_ADV_FLAG_SEC_1M;
7221 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7222 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7223 
7224 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7225 			flags |= MGMT_ADV_FLAG_SEC_2M;
7226 
7227 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7228 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7229 	}
7230 
7231 	return flags;
7232 }
7233 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7234 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7235 			     void *data, u16 data_len)
7236 {
7237 	struct mgmt_rp_read_adv_features *rp;
7238 	size_t rp_len;
7239 	int err;
7240 	struct adv_info *adv_instance;
7241 	u32 supported_flags;
7242 	u8 *instance;
7243 
7244 	bt_dev_dbg(hdev, "sock %p", sk);
7245 
7246 	if (!lmp_le_capable(hdev))
7247 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7248 				       MGMT_STATUS_REJECTED);
7249 
7250 	/* Enabling the experimental LL Privay support disables support for
7251 	 * advertising.
7252 	 */
7253 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7254 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7255 				       MGMT_STATUS_NOT_SUPPORTED);
7256 
7257 	hci_dev_lock(hdev);
7258 
7259 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7260 	rp = kmalloc(rp_len, GFP_ATOMIC);
7261 	if (!rp) {
7262 		hci_dev_unlock(hdev);
7263 		return -ENOMEM;
7264 	}
7265 
7266 	supported_flags = get_supported_adv_flags(hdev);
7267 
7268 	rp->supported_flags = cpu_to_le32(supported_flags);
7269 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7270 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7271 	rp->max_instances = hdev->le_num_of_adv_sets;
7272 	rp->num_instances = hdev->adv_instance_cnt;
7273 
7274 	instance = rp->instance;
7275 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7276 		*instance = adv_instance->instance;
7277 		instance++;
7278 	}
7279 
7280 	hci_dev_unlock(hdev);
7281 
7282 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7283 				MGMT_STATUS_SUCCESS, rp, rp_len);
7284 
7285 	kfree(rp);
7286 
7287 	return err;
7288 }
7289 
calculate_name_len(struct hci_dev * hdev)7290 static u8 calculate_name_len(struct hci_dev *hdev)
7291 {
7292 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7293 
7294 	return append_local_name(hdev, buf, 0);
7295 }
7296 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7297 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7298 			   bool is_adv_data)
7299 {
7300 	u8 max_len = HCI_MAX_AD_LENGTH;
7301 
7302 	if (is_adv_data) {
7303 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7304 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7305 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7306 			max_len -= 3;
7307 
7308 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7309 			max_len -= 3;
7310 	} else {
7311 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7312 			max_len -= calculate_name_len(hdev);
7313 
7314 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7315 			max_len -= 4;
7316 	}
7317 
7318 	return max_len;
7319 }
7320 
flags_managed(u32 adv_flags)7321 static bool flags_managed(u32 adv_flags)
7322 {
7323 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7324 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7325 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7326 }
7327 
tx_power_managed(u32 adv_flags)7328 static bool tx_power_managed(u32 adv_flags)
7329 {
7330 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7331 }
7332 
name_managed(u32 adv_flags)7333 static bool name_managed(u32 adv_flags)
7334 {
7335 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7336 }
7337 
appearance_managed(u32 adv_flags)7338 static bool appearance_managed(u32 adv_flags)
7339 {
7340 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7341 }
7342 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7343 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7344 			      u8 len, bool is_adv_data)
7345 {
7346 	int i, cur_len;
7347 	u8 max_len;
7348 
7349 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7350 
7351 	if (len > max_len)
7352 		return false;
7353 
7354 	/* Make sure that the data is correctly formatted. */
7355 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7356 		cur_len = data[i];
7357 
7358 		if (!cur_len)
7359 			continue;
7360 
7361 		if (data[i + 1] == EIR_FLAGS &&
7362 		    (!is_adv_data || flags_managed(adv_flags)))
7363 			return false;
7364 
7365 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7366 			return false;
7367 
7368 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7369 			return false;
7370 
7371 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7372 			return false;
7373 
7374 		if (data[i + 1] == EIR_APPEARANCE &&
7375 		    appearance_managed(adv_flags))
7376 			return false;
7377 
7378 		/* If the current field length would exceed the total data
7379 		 * length, then it's invalid.
7380 		 */
7381 		if (i + cur_len >= len)
7382 			return false;
7383 	}
7384 
7385 	return true;
7386 }
7387 
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7388 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7389 				     u16 opcode)
7390 {
7391 	struct mgmt_pending_cmd *cmd;
7392 	struct mgmt_cp_add_advertising *cp;
7393 	struct mgmt_rp_add_advertising rp;
7394 	struct adv_info *adv_instance, *n;
7395 	u8 instance;
7396 
7397 	bt_dev_dbg(hdev, "status %d", status);
7398 
7399 	hci_dev_lock(hdev);
7400 
7401 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7402 
7403 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7404 		if (!adv_instance->pending)
7405 			continue;
7406 
7407 		if (!status) {
7408 			adv_instance->pending = false;
7409 			continue;
7410 		}
7411 
7412 		instance = adv_instance->instance;
7413 
7414 		if (hdev->cur_adv_instance == instance)
7415 			cancel_adv_timeout(hdev);
7416 
7417 		hci_remove_adv_instance(hdev, instance);
7418 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7419 	}
7420 
7421 	if (!cmd)
7422 		goto unlock;
7423 
7424 	cp = cmd->param;
7425 	rp.instance = cp->instance;
7426 
7427 	if (status)
7428 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7429 				mgmt_status(status));
7430 	else
7431 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7432 				  mgmt_status(status), &rp, sizeof(rp));
7433 
7434 	mgmt_pending_remove(cmd);
7435 
7436 unlock:
7437 	hci_dev_unlock(hdev);
7438 }
7439 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7440 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7441 			   void *data, u16 data_len)
7442 {
7443 	struct mgmt_cp_add_advertising *cp = data;
7444 	struct mgmt_rp_add_advertising rp;
7445 	u32 flags;
7446 	u32 supported_flags, phy_flags;
7447 	u8 status;
7448 	u16 timeout, duration;
7449 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7450 	u8 schedule_instance = 0;
7451 	struct adv_info *next_instance;
7452 	int err;
7453 	struct mgmt_pending_cmd *cmd;
7454 	struct hci_request req;
7455 
7456 	bt_dev_dbg(hdev, "sock %p", sk);
7457 
7458 	status = mgmt_le_support(hdev);
7459 	if (status)
7460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7461 				       status);
7462 
7463 	/* Enabling the experimental LL Privay support disables support for
7464 	 * advertising.
7465 	 */
7466 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7467 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7468 				       MGMT_STATUS_NOT_SUPPORTED);
7469 
7470 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7472 				       MGMT_STATUS_INVALID_PARAMS);
7473 
7474 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7476 				       MGMT_STATUS_INVALID_PARAMS);
7477 
7478 	flags = __le32_to_cpu(cp->flags);
7479 	timeout = __le16_to_cpu(cp->timeout);
7480 	duration = __le16_to_cpu(cp->duration);
7481 
7482 	/* The current implementation only supports a subset of the specified
7483 	 * flags. Also need to check mutual exclusiveness of sec flags.
7484 	 */
7485 	supported_flags = get_supported_adv_flags(hdev);
7486 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7487 	if (flags & ~supported_flags ||
7488 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7489 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7490 				       MGMT_STATUS_INVALID_PARAMS);
7491 
7492 	hci_dev_lock(hdev);
7493 
7494 	if (timeout && !hdev_is_powered(hdev)) {
7495 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7496 				      MGMT_STATUS_REJECTED);
7497 		goto unlock;
7498 	}
7499 
7500 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7501 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7502 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7503 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7504 				      MGMT_STATUS_BUSY);
7505 		goto unlock;
7506 	}
7507 
7508 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7509 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7510 			       cp->scan_rsp_len, false)) {
7511 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7512 				      MGMT_STATUS_INVALID_PARAMS);
7513 		goto unlock;
7514 	}
7515 
7516 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7517 				   cp->adv_data_len, cp->data,
7518 				   cp->scan_rsp_len,
7519 				   cp->data + cp->adv_data_len,
7520 				   timeout, duration);
7521 	if (err < 0) {
7522 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 				      MGMT_STATUS_FAILED);
7524 		goto unlock;
7525 	}
7526 
7527 	/* Only trigger an advertising added event if a new instance was
7528 	 * actually added.
7529 	 */
7530 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7531 		mgmt_advertising_added(sk, hdev, cp->instance);
7532 
7533 	if (hdev->cur_adv_instance == cp->instance) {
7534 		/* If the currently advertised instance is being changed then
7535 		 * cancel the current advertising and schedule the next
7536 		 * instance. If there is only one instance then the overridden
7537 		 * advertising data will be visible right away.
7538 		 */
7539 		cancel_adv_timeout(hdev);
7540 
7541 		next_instance = hci_get_next_instance(hdev, cp->instance);
7542 		if (next_instance)
7543 			schedule_instance = next_instance->instance;
7544 	} else if (!hdev->adv_instance_timeout) {
7545 		/* Immediately advertise the new instance if no other
7546 		 * instance is currently being advertised.
7547 		 */
7548 		schedule_instance = cp->instance;
7549 	}
7550 
7551 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7552 	 * there is no instance to be advertised then we have no HCI
7553 	 * communication to make. Simply return.
7554 	 */
7555 	if (!hdev_is_powered(hdev) ||
7556 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7557 	    !schedule_instance) {
7558 		rp.instance = cp->instance;
7559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7560 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7561 		goto unlock;
7562 	}
7563 
7564 	/* We're good to go, update advertising data, parameters, and start
7565 	 * advertising.
7566 	 */
7567 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7568 			       data_len);
7569 	if (!cmd) {
7570 		err = -ENOMEM;
7571 		goto unlock;
7572 	}
7573 
7574 	hci_req_init(&req, hdev);
7575 
7576 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7577 
7578 	if (!err)
7579 		err = hci_req_run(&req, add_advertising_complete);
7580 
7581 	if (err < 0) {
7582 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7583 				      MGMT_STATUS_FAILED);
7584 		mgmt_pending_remove(cmd);
7585 	}
7586 
7587 unlock:
7588 	hci_dev_unlock(hdev);
7589 
7590 	return err;
7591 }
7592 
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7593 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7594 					u16 opcode)
7595 {
7596 	struct mgmt_pending_cmd *cmd;
7597 	struct mgmt_cp_remove_advertising *cp;
7598 	struct mgmt_rp_remove_advertising rp;
7599 
7600 	bt_dev_dbg(hdev, "status %d", status);
7601 
7602 	hci_dev_lock(hdev);
7603 
7604 	/* A failure status here only means that we failed to disable
7605 	 * advertising. Otherwise, the advertising instance has been removed,
7606 	 * so report success.
7607 	 */
7608 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7609 	if (!cmd)
7610 		goto unlock;
7611 
7612 	cp = cmd->param;
7613 	rp.instance = cp->instance;
7614 
7615 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7616 			  &rp, sizeof(rp));
7617 	mgmt_pending_remove(cmd);
7618 
7619 unlock:
7620 	hci_dev_unlock(hdev);
7621 }
7622 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7623 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7624 			      void *data, u16 data_len)
7625 {
7626 	struct mgmt_cp_remove_advertising *cp = data;
7627 	struct mgmt_rp_remove_advertising rp;
7628 	struct mgmt_pending_cmd *cmd;
7629 	struct hci_request req;
7630 	int err;
7631 
7632 	bt_dev_dbg(hdev, "sock %p", sk);
7633 
7634 	/* Enabling the experimental LL Privay support disables support for
7635 	 * advertising.
7636 	 */
7637 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7639 				       MGMT_STATUS_NOT_SUPPORTED);
7640 
7641 	hci_dev_lock(hdev);
7642 
7643 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7644 		err = mgmt_cmd_status(sk, hdev->id,
7645 				      MGMT_OP_REMOVE_ADVERTISING,
7646 				      MGMT_STATUS_INVALID_PARAMS);
7647 		goto unlock;
7648 	}
7649 
7650 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7651 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7652 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7654 				      MGMT_STATUS_BUSY);
7655 		goto unlock;
7656 	}
7657 
7658 	if (list_empty(&hdev->adv_instances)) {
7659 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7660 				      MGMT_STATUS_INVALID_PARAMS);
7661 		goto unlock;
7662 	}
7663 
7664 	hci_req_init(&req, hdev);
7665 
7666 	/* If we use extended advertising, instance is disabled and removed */
7667 	if (ext_adv_capable(hdev)) {
7668 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
7669 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
7670 	}
7671 
7672 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7673 
7674 	if (list_empty(&hdev->adv_instances))
7675 		__hci_req_disable_advertising(&req);
7676 
7677 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7678 	 * flag is set or the device isn't powered then we have no HCI
7679 	 * communication to make. Simply return.
7680 	 */
7681 	if (skb_queue_empty(&req.cmd_q) ||
7682 	    !hdev_is_powered(hdev) ||
7683 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7684 		hci_req_purge(&req);
7685 		rp.instance = cp->instance;
7686 		err = mgmt_cmd_complete(sk, hdev->id,
7687 					MGMT_OP_REMOVE_ADVERTISING,
7688 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7689 		goto unlock;
7690 	}
7691 
7692 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7693 			       data_len);
7694 	if (!cmd) {
7695 		err = -ENOMEM;
7696 		goto unlock;
7697 	}
7698 
7699 	err = hci_req_run(&req, remove_advertising_complete);
7700 	if (err < 0)
7701 		mgmt_pending_remove(cmd);
7702 
7703 unlock:
7704 	hci_dev_unlock(hdev);
7705 
7706 	return err;
7707 }
7708 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7709 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7710 			     void *data, u16 data_len)
7711 {
7712 	struct mgmt_cp_get_adv_size_info *cp = data;
7713 	struct mgmt_rp_get_adv_size_info rp;
7714 	u32 flags, supported_flags;
7715 	int err;
7716 
7717 	bt_dev_dbg(hdev, "sock %p", sk);
7718 
7719 	if (!lmp_le_capable(hdev))
7720 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7721 				       MGMT_STATUS_REJECTED);
7722 
7723 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7724 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7725 				       MGMT_STATUS_INVALID_PARAMS);
7726 
7727 	flags = __le32_to_cpu(cp->flags);
7728 
7729 	/* The current implementation only supports a subset of the specified
7730 	 * flags.
7731 	 */
7732 	supported_flags = get_supported_adv_flags(hdev);
7733 	if (flags & ~supported_flags)
7734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7735 				       MGMT_STATUS_INVALID_PARAMS);
7736 
7737 	rp.instance = cp->instance;
7738 	rp.flags = cp->flags;
7739 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7740 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7741 
7742 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7743 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7744 
7745 	return err;
7746 }
7747 
7748 static const struct hci_mgmt_handler mgmt_handlers[] = {
7749 	{ NULL }, /* 0x0000 (no command) */
7750 	{ read_version,            MGMT_READ_VERSION_SIZE,
7751 						HCI_MGMT_NO_HDEV |
7752 						HCI_MGMT_UNTRUSTED },
7753 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7754 						HCI_MGMT_NO_HDEV |
7755 						HCI_MGMT_UNTRUSTED },
7756 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7757 						HCI_MGMT_NO_HDEV |
7758 						HCI_MGMT_UNTRUSTED },
7759 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7760 						HCI_MGMT_UNTRUSTED },
7761 	{ set_powered,             MGMT_SETTING_SIZE },
7762 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7763 	{ set_connectable,         MGMT_SETTING_SIZE },
7764 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7765 	{ set_bondable,            MGMT_SETTING_SIZE },
7766 	{ set_link_security,       MGMT_SETTING_SIZE },
7767 	{ set_ssp,                 MGMT_SETTING_SIZE },
7768 	{ set_hs,                  MGMT_SETTING_SIZE },
7769 	{ set_le,                  MGMT_SETTING_SIZE },
7770 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7771 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7772 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7773 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7774 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7775 						HCI_MGMT_VAR_LEN },
7776 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7777 						HCI_MGMT_VAR_LEN },
7778 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7779 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7780 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7781 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7782 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7783 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7784 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7785 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7786 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7787 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7788 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7789 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7790 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7791 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7792 						HCI_MGMT_VAR_LEN },
7793 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7794 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7795 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7796 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7797 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7798 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7799 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7800 	{ set_advertising,         MGMT_SETTING_SIZE },
7801 	{ set_bredr,               MGMT_SETTING_SIZE },
7802 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7803 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7804 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7805 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7806 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7807 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7808 						HCI_MGMT_VAR_LEN },
7809 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7810 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7811 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7812 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7813 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7814 						HCI_MGMT_VAR_LEN },
7815 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7816 						HCI_MGMT_NO_HDEV |
7817 						HCI_MGMT_UNTRUSTED },
7818 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7819 						HCI_MGMT_UNCONFIGURED |
7820 						HCI_MGMT_UNTRUSTED },
7821 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7822 						HCI_MGMT_UNCONFIGURED },
7823 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7824 						HCI_MGMT_UNCONFIGURED },
7825 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7826 						HCI_MGMT_VAR_LEN },
7827 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7828 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7829 						HCI_MGMT_NO_HDEV |
7830 						HCI_MGMT_UNTRUSTED },
7831 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7832 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7833 						HCI_MGMT_VAR_LEN },
7834 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7835 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7836 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7837 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7838 						HCI_MGMT_UNTRUSTED },
7839 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7840 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7841 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7842 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7843 						HCI_MGMT_VAR_LEN },
7844 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7845 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7846 						HCI_MGMT_UNTRUSTED },
7847 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7848 						HCI_MGMT_UNTRUSTED |
7849 						HCI_MGMT_HDEV_OPTIONAL },
7850 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7851 						HCI_MGMT_VAR_LEN |
7852 						HCI_MGMT_HDEV_OPTIONAL },
7853 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7854 						HCI_MGMT_UNTRUSTED },
7855 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7856 						HCI_MGMT_VAR_LEN },
7857 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7858 						HCI_MGMT_UNTRUSTED },
7859 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7860 						HCI_MGMT_VAR_LEN },
7861 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
7862 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
7863 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7864 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7865 						HCI_MGMT_VAR_LEN },
7866 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
7867 };
7868 
mgmt_index_added(struct hci_dev * hdev)7869 void mgmt_index_added(struct hci_dev *hdev)
7870 {
7871 	struct mgmt_ev_ext_index ev;
7872 
7873 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7874 		return;
7875 
7876 	switch (hdev->dev_type) {
7877 	case HCI_PRIMARY:
7878 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7879 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7880 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7881 			ev.type = 0x01;
7882 		} else {
7883 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7884 					 HCI_MGMT_INDEX_EVENTS);
7885 			ev.type = 0x00;
7886 		}
7887 		break;
7888 	case HCI_AMP:
7889 		ev.type = 0x02;
7890 		break;
7891 	default:
7892 		return;
7893 	}
7894 
7895 	ev.bus = hdev->bus;
7896 
7897 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7898 			 HCI_MGMT_EXT_INDEX_EVENTS);
7899 }
7900 
mgmt_index_removed(struct hci_dev * hdev)7901 void mgmt_index_removed(struct hci_dev *hdev)
7902 {
7903 	struct mgmt_ev_ext_index ev;
7904 	u8 status = MGMT_STATUS_INVALID_INDEX;
7905 
7906 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7907 		return;
7908 
7909 	switch (hdev->dev_type) {
7910 	case HCI_PRIMARY:
7911 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7912 
7913 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7914 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7915 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7916 			ev.type = 0x01;
7917 		} else {
7918 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7919 					 HCI_MGMT_INDEX_EVENTS);
7920 			ev.type = 0x00;
7921 		}
7922 		break;
7923 	case HCI_AMP:
7924 		ev.type = 0x02;
7925 		break;
7926 	default:
7927 		return;
7928 	}
7929 
7930 	ev.bus = hdev->bus;
7931 
7932 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7933 			 HCI_MGMT_EXT_INDEX_EVENTS);
7934 }
7935 
7936 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)7937 static void restart_le_actions(struct hci_dev *hdev)
7938 {
7939 	struct hci_conn_params *p;
7940 
7941 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7942 		/* Needed for AUTO_OFF case where might not "really"
7943 		 * have been powered off.
7944 		 */
7945 		list_del_init(&p->action);
7946 
7947 		switch (p->auto_connect) {
7948 		case HCI_AUTO_CONN_DIRECT:
7949 		case HCI_AUTO_CONN_ALWAYS:
7950 			list_add(&p->action, &hdev->pend_le_conns);
7951 			break;
7952 		case HCI_AUTO_CONN_REPORT:
7953 			list_add(&p->action, &hdev->pend_le_reports);
7954 			break;
7955 		default:
7956 			break;
7957 		}
7958 	}
7959 }
7960 
mgmt_power_on(struct hci_dev * hdev,int err)7961 void mgmt_power_on(struct hci_dev *hdev, int err)
7962 {
7963 	struct cmd_lookup match = { NULL, hdev };
7964 
7965 	bt_dev_dbg(hdev, "err %d", err);
7966 
7967 	hci_dev_lock(hdev);
7968 
7969 	if (!err) {
7970 		restart_le_actions(hdev);
7971 		hci_update_background_scan(hdev);
7972 	}
7973 
7974 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7975 
7976 	new_settings(hdev, match.sk);
7977 
7978 	if (match.sk)
7979 		sock_put(match.sk);
7980 
7981 	hci_dev_unlock(hdev);
7982 }
7983 
__mgmt_power_off(struct hci_dev * hdev)7984 void __mgmt_power_off(struct hci_dev *hdev)
7985 {
7986 	struct cmd_lookup match = { NULL, hdev };
7987 	u8 status, zero_cod[] = { 0, 0, 0 };
7988 
7989 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7990 
7991 	/* If the power off is because of hdev unregistration let
7992 	 * use the appropriate INVALID_INDEX status. Otherwise use
7993 	 * NOT_POWERED. We cover both scenarios here since later in
7994 	 * mgmt_index_removed() any hci_conn callbacks will have already
7995 	 * been triggered, potentially causing misleading DISCONNECTED
7996 	 * status responses.
7997 	 */
7998 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7999 		status = MGMT_STATUS_INVALID_INDEX;
8000 	else
8001 		status = MGMT_STATUS_NOT_POWERED;
8002 
8003 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8004 
8005 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8006 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8007 				   zero_cod, sizeof(zero_cod),
8008 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8009 		ext_info_changed(hdev, NULL);
8010 	}
8011 
8012 	new_settings(hdev, match.sk);
8013 
8014 	if (match.sk)
8015 		sock_put(match.sk);
8016 }
8017 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8018 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8019 {
8020 	struct mgmt_pending_cmd *cmd;
8021 	u8 status;
8022 
8023 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8024 	if (!cmd)
8025 		return;
8026 
8027 	if (err == -ERFKILL)
8028 		status = MGMT_STATUS_RFKILLED;
8029 	else
8030 		status = MGMT_STATUS_FAILED;
8031 
8032 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8033 
8034 	mgmt_pending_remove(cmd);
8035 }
8036 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8037 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8038 		       bool persistent)
8039 {
8040 	struct mgmt_ev_new_link_key ev;
8041 
8042 	memset(&ev, 0, sizeof(ev));
8043 
8044 	ev.store_hint = persistent;
8045 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8046 	ev.key.addr.type = BDADDR_BREDR;
8047 	ev.key.type = key->type;
8048 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8049 	ev.key.pin_len = key->pin_len;
8050 
8051 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8052 }
8053 
mgmt_ltk_type(struct smp_ltk * ltk)8054 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8055 {
8056 	switch (ltk->type) {
8057 	case SMP_LTK:
8058 	case SMP_LTK_SLAVE:
8059 		if (ltk->authenticated)
8060 			return MGMT_LTK_AUTHENTICATED;
8061 		return MGMT_LTK_UNAUTHENTICATED;
8062 	case SMP_LTK_P256:
8063 		if (ltk->authenticated)
8064 			return MGMT_LTK_P256_AUTH;
8065 		return MGMT_LTK_P256_UNAUTH;
8066 	case SMP_LTK_P256_DEBUG:
8067 		return MGMT_LTK_P256_DEBUG;
8068 	}
8069 
8070 	return MGMT_LTK_UNAUTHENTICATED;
8071 }
8072 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8073 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8074 {
8075 	struct mgmt_ev_new_long_term_key ev;
8076 
8077 	memset(&ev, 0, sizeof(ev));
8078 
8079 	/* Devices using resolvable or non-resolvable random addresses
8080 	 * without providing an identity resolving key don't require
8081 	 * to store long term keys. Their addresses will change the
8082 	 * next time around.
8083 	 *
8084 	 * Only when a remote device provides an identity address
8085 	 * make sure the long term key is stored. If the remote
8086 	 * identity is known, the long term keys are internally
8087 	 * mapped to the identity address. So allow static random
8088 	 * and public addresses here.
8089 	 */
8090 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8091 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8092 		ev.store_hint = 0x00;
8093 	else
8094 		ev.store_hint = persistent;
8095 
8096 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8097 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8098 	ev.key.type = mgmt_ltk_type(key);
8099 	ev.key.enc_size = key->enc_size;
8100 	ev.key.ediv = key->ediv;
8101 	ev.key.rand = key->rand;
8102 
8103 	if (key->type == SMP_LTK)
8104 		ev.key.master = 1;
8105 
8106 	/* Make sure we copy only the significant bytes based on the
8107 	 * encryption key size, and set the rest of the value to zeroes.
8108 	 */
8109 	memcpy(ev.key.val, key->val, key->enc_size);
8110 	memset(ev.key.val + key->enc_size, 0,
8111 	       sizeof(ev.key.val) - key->enc_size);
8112 
8113 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8114 }
8115 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)8116 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8117 {
8118 	struct mgmt_ev_new_irk ev;
8119 
8120 	memset(&ev, 0, sizeof(ev));
8121 
8122 	ev.store_hint = persistent;
8123 
8124 	bacpy(&ev.rpa, &irk->rpa);
8125 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8126 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8127 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8128 
8129 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8130 }
8131 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)8132 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8133 		   bool persistent)
8134 {
8135 	struct mgmt_ev_new_csrk ev;
8136 
8137 	memset(&ev, 0, sizeof(ev));
8138 
8139 	/* Devices using resolvable or non-resolvable random addresses
8140 	 * without providing an identity resolving key don't require
8141 	 * to store signature resolving keys. Their addresses will change
8142 	 * the next time around.
8143 	 *
8144 	 * Only when a remote device provides an identity address
8145 	 * make sure the signature resolving key is stored. So allow
8146 	 * static random and public addresses here.
8147 	 */
8148 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8149 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8150 		ev.store_hint = 0x00;
8151 	else
8152 		ev.store_hint = persistent;
8153 
8154 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8155 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8156 	ev.key.type = csrk->type;
8157 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8158 
8159 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8160 }
8161 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)8162 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8163 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8164 			 u16 max_interval, u16 latency, u16 timeout)
8165 {
8166 	struct mgmt_ev_new_conn_param ev;
8167 
8168 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8169 		return;
8170 
8171 	memset(&ev, 0, sizeof(ev));
8172 	bacpy(&ev.addr.bdaddr, bdaddr);
8173 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8174 	ev.store_hint = store_hint;
8175 	ev.min_interval = cpu_to_le16(min_interval);
8176 	ev.max_interval = cpu_to_le16(max_interval);
8177 	ev.latency = cpu_to_le16(latency);
8178 	ev.timeout = cpu_to_le16(timeout);
8179 
8180 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8181 }
8182 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u32 flags,u8 * name,u8 name_len)8183 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8184 			   u32 flags, u8 *name, u8 name_len)
8185 {
8186 	char buf[512];
8187 	struct mgmt_ev_device_connected *ev = (void *) buf;
8188 	u16 eir_len = 0;
8189 
8190 	bacpy(&ev->addr.bdaddr, &conn->dst);
8191 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8192 
8193 	ev->flags = __cpu_to_le32(flags);
8194 
8195 	/* We must ensure that the EIR Data fields are ordered and
8196 	 * unique. Keep it simple for now and avoid the problem by not
8197 	 * adding any BR/EDR data to the LE adv.
8198 	 */
8199 	if (conn->le_adv_data_len > 0) {
8200 		memcpy(&ev->eir[eir_len],
8201 		       conn->le_adv_data, conn->le_adv_data_len);
8202 		eir_len = conn->le_adv_data_len;
8203 	} else {
8204 		if (name_len > 0)
8205 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8206 						  name, name_len);
8207 
8208 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8209 			eir_len = eir_append_data(ev->eir, eir_len,
8210 						  EIR_CLASS_OF_DEV,
8211 						  conn->dev_class, 3);
8212 	}
8213 
8214 	ev->eir_len = cpu_to_le16(eir_len);
8215 
8216 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8217 		    sizeof(*ev) + eir_len, NULL);
8218 }
8219 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)8220 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8221 {
8222 	struct sock **sk = data;
8223 
8224 	cmd->cmd_complete(cmd, 0);
8225 
8226 	*sk = cmd->sk;
8227 	sock_hold(*sk);
8228 
8229 	mgmt_pending_remove(cmd);
8230 }
8231 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)8232 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8233 {
8234 	struct hci_dev *hdev = data;
8235 	struct mgmt_cp_unpair_device *cp = cmd->param;
8236 
8237 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8238 
8239 	cmd->cmd_complete(cmd, 0);
8240 	mgmt_pending_remove(cmd);
8241 }
8242 
mgmt_powering_down(struct hci_dev * hdev)8243 bool mgmt_powering_down(struct hci_dev *hdev)
8244 {
8245 	struct mgmt_pending_cmd *cmd;
8246 	struct mgmt_mode *cp;
8247 
8248 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8249 	if (!cmd)
8250 		return false;
8251 
8252 	cp = cmd->param;
8253 	if (!cp->val)
8254 		return true;
8255 
8256 	return false;
8257 }
8258 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)8259 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8260 			      u8 link_type, u8 addr_type, u8 reason,
8261 			      bool mgmt_connected)
8262 {
8263 	struct mgmt_ev_device_disconnected ev;
8264 	struct sock *sk = NULL;
8265 
8266 	/* The connection is still in hci_conn_hash so test for 1
8267 	 * instead of 0 to know if this is the last one.
8268 	 */
8269 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8270 		cancel_delayed_work(&hdev->power_off);
8271 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8272 	}
8273 
8274 	if (!mgmt_connected)
8275 		return;
8276 
8277 	if (link_type != ACL_LINK && link_type != LE_LINK)
8278 		return;
8279 
8280 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8281 
8282 	bacpy(&ev.addr.bdaddr, bdaddr);
8283 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8284 	ev.reason = reason;
8285 
8286 	/* Report disconnects due to suspend */
8287 	if (hdev->suspended)
8288 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8289 
8290 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8291 
8292 	if (sk)
8293 		sock_put(sk);
8294 
8295 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8296 			     hdev);
8297 }
8298 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8299 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8300 			    u8 link_type, u8 addr_type, u8 status)
8301 {
8302 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8303 	struct mgmt_cp_disconnect *cp;
8304 	struct mgmt_pending_cmd *cmd;
8305 
8306 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8307 			     hdev);
8308 
8309 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8310 	if (!cmd)
8311 		return;
8312 
8313 	cp = cmd->param;
8314 
8315 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8316 		return;
8317 
8318 	if (cp->addr.type != bdaddr_type)
8319 		return;
8320 
8321 	cmd->cmd_complete(cmd, mgmt_status(status));
8322 	mgmt_pending_remove(cmd);
8323 }
8324 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8325 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8326 			 u8 addr_type, u8 status)
8327 {
8328 	struct mgmt_ev_connect_failed ev;
8329 
8330 	/* The connection is still in hci_conn_hash so test for 1
8331 	 * instead of 0 to know if this is the last one.
8332 	 */
8333 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8334 		cancel_delayed_work(&hdev->power_off);
8335 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8336 	}
8337 
8338 	bacpy(&ev.addr.bdaddr, bdaddr);
8339 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8340 	ev.status = mgmt_status(status);
8341 
8342 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8343 }
8344 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8345 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8346 {
8347 	struct mgmt_ev_pin_code_request ev;
8348 
8349 	bacpy(&ev.addr.bdaddr, bdaddr);
8350 	ev.addr.type = BDADDR_BREDR;
8351 	ev.secure = secure;
8352 
8353 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8354 }
8355 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8356 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8357 				  u8 status)
8358 {
8359 	struct mgmt_pending_cmd *cmd;
8360 
8361 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8362 	if (!cmd)
8363 		return;
8364 
8365 	cmd->cmd_complete(cmd, mgmt_status(status));
8366 	mgmt_pending_remove(cmd);
8367 }
8368 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8369 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8370 				      u8 status)
8371 {
8372 	struct mgmt_pending_cmd *cmd;
8373 
8374 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8375 	if (!cmd)
8376 		return;
8377 
8378 	cmd->cmd_complete(cmd, mgmt_status(status));
8379 	mgmt_pending_remove(cmd);
8380 }
8381 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)8382 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8383 			      u8 link_type, u8 addr_type, u32 value,
8384 			      u8 confirm_hint)
8385 {
8386 	struct mgmt_ev_user_confirm_request ev;
8387 
8388 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8389 
8390 	bacpy(&ev.addr.bdaddr, bdaddr);
8391 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8392 	ev.confirm_hint = confirm_hint;
8393 	ev.value = cpu_to_le32(value);
8394 
8395 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8396 			  NULL);
8397 }
8398 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)8399 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8400 			      u8 link_type, u8 addr_type)
8401 {
8402 	struct mgmt_ev_user_passkey_request ev;
8403 
8404 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8405 
8406 	bacpy(&ev.addr.bdaddr, bdaddr);
8407 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8408 
8409 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8410 			  NULL);
8411 }
8412 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)8413 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8414 				      u8 link_type, u8 addr_type, u8 status,
8415 				      u8 opcode)
8416 {
8417 	struct mgmt_pending_cmd *cmd;
8418 
8419 	cmd = pending_find(opcode, hdev);
8420 	if (!cmd)
8421 		return -ENOENT;
8422 
8423 	cmd->cmd_complete(cmd, mgmt_status(status));
8424 	mgmt_pending_remove(cmd);
8425 
8426 	return 0;
8427 }
8428 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8429 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8430 				     u8 link_type, u8 addr_type, u8 status)
8431 {
8432 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8433 					  status, MGMT_OP_USER_CONFIRM_REPLY);
8434 }
8435 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8436 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8437 					 u8 link_type, u8 addr_type, u8 status)
8438 {
8439 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8440 					  status,
8441 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8442 }
8443 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8444 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8445 				     u8 link_type, u8 addr_type, u8 status)
8446 {
8447 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8448 					  status, MGMT_OP_USER_PASSKEY_REPLY);
8449 }
8450 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8451 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8452 					 u8 link_type, u8 addr_type, u8 status)
8453 {
8454 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8455 					  status,
8456 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8457 }
8458 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)8459 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8460 			     u8 link_type, u8 addr_type, u32 passkey,
8461 			     u8 entered)
8462 {
8463 	struct mgmt_ev_passkey_notify ev;
8464 
8465 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8466 
8467 	bacpy(&ev.addr.bdaddr, bdaddr);
8468 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8469 	ev.passkey = __cpu_to_le32(passkey);
8470 	ev.entered = entered;
8471 
8472 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8473 }
8474 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)8475 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8476 {
8477 	struct mgmt_ev_auth_failed ev;
8478 	struct mgmt_pending_cmd *cmd;
8479 	u8 status = mgmt_status(hci_status);
8480 
8481 	bacpy(&ev.addr.bdaddr, &conn->dst);
8482 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8483 	ev.status = status;
8484 
8485 	cmd = find_pairing(conn);
8486 
8487 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8488 		    cmd ? cmd->sk : NULL);
8489 
8490 	if (cmd) {
8491 		cmd->cmd_complete(cmd, status);
8492 		mgmt_pending_remove(cmd);
8493 	}
8494 }
8495 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8496 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8497 {
8498 	struct cmd_lookup match = { NULL, hdev };
8499 	bool changed;
8500 
8501 	if (status) {
8502 		u8 mgmt_err = mgmt_status(status);
8503 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8504 				     cmd_status_rsp, &mgmt_err);
8505 		return;
8506 	}
8507 
8508 	if (test_bit(HCI_AUTH, &hdev->flags))
8509 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8510 	else
8511 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8512 
8513 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8514 			     &match);
8515 
8516 	if (changed)
8517 		new_settings(hdev, match.sk);
8518 
8519 	if (match.sk)
8520 		sock_put(match.sk);
8521 }
8522 
clear_eir(struct hci_request * req)8523 static void clear_eir(struct hci_request *req)
8524 {
8525 	struct hci_dev *hdev = req->hdev;
8526 	struct hci_cp_write_eir cp;
8527 
8528 	if (!lmp_ext_inq_capable(hdev))
8529 		return;
8530 
8531 	memset(hdev->eir, 0, sizeof(hdev->eir));
8532 
8533 	memset(&cp, 0, sizeof(cp));
8534 
8535 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8536 }
8537 
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8538 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8539 {
8540 	struct cmd_lookup match = { NULL, hdev };
8541 	struct hci_request req;
8542 	bool changed = false;
8543 
8544 	if (status) {
8545 		u8 mgmt_err = mgmt_status(status);
8546 
8547 		if (enable && hci_dev_test_and_clear_flag(hdev,
8548 							  HCI_SSP_ENABLED)) {
8549 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8550 			new_settings(hdev, NULL);
8551 		}
8552 
8553 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8554 				     &mgmt_err);
8555 		return;
8556 	}
8557 
8558 	if (enable) {
8559 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8560 	} else {
8561 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8562 		if (!changed)
8563 			changed = hci_dev_test_and_clear_flag(hdev,
8564 							      HCI_HS_ENABLED);
8565 		else
8566 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8567 	}
8568 
8569 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8570 
8571 	if (changed)
8572 		new_settings(hdev, match.sk);
8573 
8574 	if (match.sk)
8575 		sock_put(match.sk);
8576 
8577 	hci_req_init(&req, hdev);
8578 
8579 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8580 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8581 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8582 				    sizeof(enable), &enable);
8583 		__hci_req_update_eir(&req);
8584 	} else {
8585 		clear_eir(&req);
8586 	}
8587 
8588 	hci_req_run(&req, NULL);
8589 }
8590 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8591 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8592 {
8593 	struct cmd_lookup *match = data;
8594 
8595 	if (match->sk == NULL) {
8596 		match->sk = cmd->sk;
8597 		sock_hold(match->sk);
8598 	}
8599 }
8600 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8601 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8602 				    u8 status)
8603 {
8604 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8605 
8606 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8607 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8608 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8609 
8610 	if (!status) {
8611 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8612 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8613 		ext_info_changed(hdev, NULL);
8614 	}
8615 
8616 	if (match.sk)
8617 		sock_put(match.sk);
8618 }
8619 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8620 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8621 {
8622 	struct mgmt_cp_set_local_name ev;
8623 	struct mgmt_pending_cmd *cmd;
8624 
8625 	if (status)
8626 		return;
8627 
8628 	memset(&ev, 0, sizeof(ev));
8629 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8630 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8631 
8632 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8633 	if (!cmd) {
8634 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8635 
8636 		/* If this is a HCI command related to powering on the
8637 		 * HCI dev don't send any mgmt signals.
8638 		 */
8639 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8640 			return;
8641 	}
8642 
8643 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8644 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8645 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8646 }
8647 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8648 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8649 {
8650 	int i;
8651 
8652 	for (i = 0; i < uuid_count; i++) {
8653 		if (!memcmp(uuid, uuids[i], 16))
8654 			return true;
8655 	}
8656 
8657 	return false;
8658 }
8659 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8660 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8661 {
8662 	u16 parsed = 0;
8663 
8664 	while (parsed < eir_len) {
8665 		u8 field_len = eir[0];
8666 		u8 uuid[16];
8667 		int i;
8668 
8669 		if (field_len == 0)
8670 			break;
8671 
8672 		if (eir_len - parsed < field_len + 1)
8673 			break;
8674 
8675 		switch (eir[1]) {
8676 		case EIR_UUID16_ALL:
8677 		case EIR_UUID16_SOME:
8678 			for (i = 0; i + 3 <= field_len; i += 2) {
8679 				memcpy(uuid, bluetooth_base_uuid, 16);
8680 				uuid[13] = eir[i + 3];
8681 				uuid[12] = eir[i + 2];
8682 				if (has_uuid(uuid, uuid_count, uuids))
8683 					return true;
8684 			}
8685 			break;
8686 		case EIR_UUID32_ALL:
8687 		case EIR_UUID32_SOME:
8688 			for (i = 0; i + 5 <= field_len; i += 4) {
8689 				memcpy(uuid, bluetooth_base_uuid, 16);
8690 				uuid[15] = eir[i + 5];
8691 				uuid[14] = eir[i + 4];
8692 				uuid[13] = eir[i + 3];
8693 				uuid[12] = eir[i + 2];
8694 				if (has_uuid(uuid, uuid_count, uuids))
8695 					return true;
8696 			}
8697 			break;
8698 		case EIR_UUID128_ALL:
8699 		case EIR_UUID128_SOME:
8700 			for (i = 0; i + 17 <= field_len; i += 16) {
8701 				memcpy(uuid, eir + i + 2, 16);
8702 				if (has_uuid(uuid, uuid_count, uuids))
8703 					return true;
8704 			}
8705 			break;
8706 		}
8707 
8708 		parsed += field_len + 1;
8709 		eir += field_len + 1;
8710 	}
8711 
8712 	return false;
8713 }
8714 
restart_le_scan(struct hci_dev * hdev)8715 static void restart_le_scan(struct hci_dev *hdev)
8716 {
8717 	/* If controller is not scanning we are done. */
8718 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8719 		return;
8720 
8721 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8722 		       hdev->discovery.scan_start +
8723 		       hdev->discovery.scan_duration))
8724 		return;
8725 
8726 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8727 			   DISCOV_LE_RESTART_DELAY);
8728 }
8729 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8730 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8731 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8732 {
8733 	/* If a RSSI threshold has been specified, and
8734 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8735 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8736 	 * is set, let it through for further processing, as we might need to
8737 	 * restart the scan.
8738 	 *
8739 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8740 	 * the results are also dropped.
8741 	 */
8742 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8743 	    (rssi == HCI_RSSI_INVALID ||
8744 	    (rssi < hdev->discovery.rssi &&
8745 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8746 		return  false;
8747 
8748 	if (hdev->discovery.uuid_count != 0) {
8749 		/* If a list of UUIDs is provided in filter, results with no
8750 		 * matching UUID should be dropped.
8751 		 */
8752 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8753 				   hdev->discovery.uuids) &&
8754 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8755 				   hdev->discovery.uuid_count,
8756 				   hdev->discovery.uuids))
8757 			return false;
8758 	}
8759 
8760 	/* If duplicate filtering does not report RSSI changes, then restart
8761 	 * scanning to ensure updated result with updated RSSI values.
8762 	 */
8763 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8764 		restart_le_scan(hdev);
8765 
8766 		/* Validate RSSI value against the RSSI threshold once more. */
8767 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8768 		    rssi < hdev->discovery.rssi)
8769 			return false;
8770 	}
8771 
8772 	return true;
8773 }
8774 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8775 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8776 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8777 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8778 {
8779 	char buf[512];
8780 	struct mgmt_ev_device_found *ev = (void *)buf;
8781 	size_t ev_size;
8782 
8783 	/* Don't send events for a non-kernel initiated discovery. With
8784 	 * LE one exception is if we have pend_le_reports > 0 in which
8785 	 * case we're doing passive scanning and want these events.
8786 	 */
8787 	if (!hci_discovery_active(hdev)) {
8788 		if (link_type == ACL_LINK)
8789 			return;
8790 		if (link_type == LE_LINK &&
8791 		    list_empty(&hdev->pend_le_reports) &&
8792 		    !hci_is_adv_monitoring(hdev)) {
8793 			return;
8794 		}
8795 	}
8796 
8797 	if (hdev->discovery.result_filtering) {
8798 		/* We are using service discovery */
8799 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8800 				     scan_rsp_len))
8801 			return;
8802 	}
8803 
8804 	if (hdev->discovery.limited) {
8805 		/* Check for limited discoverable bit */
8806 		if (dev_class) {
8807 			if (!(dev_class[1] & 0x20))
8808 				return;
8809 		} else {
8810 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8811 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8812 				return;
8813 		}
8814 	}
8815 
8816 	/* Make sure that the buffer is big enough. The 5 extra bytes
8817 	 * are for the potential CoD field.
8818 	 */
8819 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8820 		return;
8821 
8822 	memset(buf, 0, sizeof(buf));
8823 
8824 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8825 	 * RSSI value was reported as 0 when not available. This behavior
8826 	 * is kept when using device discovery. This is required for full
8827 	 * backwards compatibility with the API.
8828 	 *
8829 	 * However when using service discovery, the value 127 will be
8830 	 * returned when the RSSI is not available.
8831 	 */
8832 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8833 	    link_type == ACL_LINK)
8834 		rssi = 0;
8835 
8836 	bacpy(&ev->addr.bdaddr, bdaddr);
8837 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8838 	ev->rssi = rssi;
8839 	ev->flags = cpu_to_le32(flags);
8840 
8841 	if (eir_len > 0)
8842 		/* Copy EIR or advertising data into event */
8843 		memcpy(ev->eir, eir, eir_len);
8844 
8845 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8846 				       NULL))
8847 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8848 					  dev_class, 3);
8849 
8850 	if (scan_rsp_len > 0)
8851 		/* Append scan response data to event */
8852 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8853 
8854 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8855 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8856 
8857 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8858 }
8859 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8860 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8861 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8862 {
8863 	struct mgmt_ev_device_found *ev;
8864 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8865 	u16 eir_len;
8866 
8867 	ev = (struct mgmt_ev_device_found *) buf;
8868 
8869 	memset(buf, 0, sizeof(buf));
8870 
8871 	bacpy(&ev->addr.bdaddr, bdaddr);
8872 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8873 	ev->rssi = rssi;
8874 
8875 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8876 				  name_len);
8877 
8878 	ev->eir_len = cpu_to_le16(eir_len);
8879 
8880 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8881 }
8882 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8883 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8884 {
8885 	struct mgmt_ev_discovering ev;
8886 
8887 	bt_dev_dbg(hdev, "discovering %u", discovering);
8888 
8889 	memset(&ev, 0, sizeof(ev));
8890 	ev.type = hdev->discovery.type;
8891 	ev.discovering = discovering;
8892 
8893 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8894 }
8895 
mgmt_suspending(struct hci_dev * hdev,u8 state)8896 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8897 {
8898 	struct mgmt_ev_controller_suspend ev;
8899 
8900 	ev.suspend_state = state;
8901 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8902 }
8903 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)8904 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8905 		   u8 addr_type)
8906 {
8907 	struct mgmt_ev_controller_resume ev;
8908 
8909 	ev.wake_reason = reason;
8910 	if (bdaddr) {
8911 		bacpy(&ev.addr.bdaddr, bdaddr);
8912 		ev.addr.type = addr_type;
8913 	} else {
8914 		memset(&ev.addr, 0, sizeof(ev.addr));
8915 	}
8916 
8917 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8918 }
8919 
8920 static struct hci_mgmt_chan chan = {
8921 	.channel	= HCI_CHANNEL_CONTROL,
8922 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8923 	.handlers	= mgmt_handlers,
8924 	.hdev_init	= mgmt_init_hdev,
8925 };
8926 
mgmt_init(void)8927 int mgmt_init(void)
8928 {
8929 	return hci_mgmt_chan_register(&chan);
8930 }
8931 
mgmt_exit(void)8932 void mgmt_exit(void)
8933 {
8934 	hci_mgmt_chan_unregister(&chan);
8935 }
8936