• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
is_configured(struct hci_dev * hdev)625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
get_missing_options(struct hci_dev * hdev)639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
new_options(struct hci_dev * hdev,struct sock * skip)655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
get_supported_phys(struct hci_dev * hdev)699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
get_selected_phys(struct hci_dev * hdev)751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
get_configurable_phys(struct hci_dev * hdev)814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
get_supported_settings(struct hci_dev * hdev)820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 			if (IS_ENABLED(CONFIG_BT_HS))
839 				settings |= MGMT_SETTING_HS;
840 		}
841 
842 		if (lmp_sc_capable(hdev))
843 			settings |= MGMT_SETTING_SECURE_CONN;
844 
845 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 			     &hdev->quirks))
847 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 	}
849 
850 	if (lmp_le_capable(hdev)) {
851 		settings |= MGMT_SETTING_LE;
852 		settings |= MGMT_SETTING_SECURE_CONN;
853 		settings |= MGMT_SETTING_PRIVACY;
854 		settings |= MGMT_SETTING_STATIC_ADDRESS;
855 		settings |= MGMT_SETTING_ADVERTISING;
856 	}
857 
858 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 	    hdev->set_bdaddr)
860 		settings |= MGMT_SETTING_CONFIGURATION;
861 
862 	if (cis_central_capable(hdev))
863 		settings |= MGMT_SETTING_CIS_CENTRAL;
864 
865 	if (cis_peripheral_capable(hdev))
866 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
867 
868 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
869 
870 	return settings;
871 }
872 
get_current_settings(struct hci_dev * hdev)873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875 	u32 settings = 0;
876 
877 	if (hdev_is_powered(hdev))
878 		settings |= MGMT_SETTING_POWERED;
879 
880 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 		settings |= MGMT_SETTING_CONNECTABLE;
882 
883 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 		settings |= MGMT_SETTING_DISCOVERABLE;
888 
889 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 		settings |= MGMT_SETTING_BONDABLE;
891 
892 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 		settings |= MGMT_SETTING_BREDR;
894 
895 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 		settings |= MGMT_SETTING_LE;
897 
898 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 		settings |= MGMT_SETTING_LINK_SECURITY;
900 
901 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 		settings |= MGMT_SETTING_SSP;
903 
904 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 		settings |= MGMT_SETTING_HS;
906 
907 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 		settings |= MGMT_SETTING_ADVERTISING;
909 
910 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 		settings |= MGMT_SETTING_SECURE_CONN;
912 
913 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 		settings |= MGMT_SETTING_DEBUG_KEYS;
915 
916 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 		settings |= MGMT_SETTING_PRIVACY;
918 
919 	/* The current setting for static address has two purposes. The
920 	 * first is to indicate if the static address will be used and
921 	 * the second is to indicate if it is actually set.
922 	 *
923 	 * This means if the static address is not configured, this flag
924 	 * will never be set. If the address is configured, then if the
925 	 * address is actually used decides if the flag is set or not.
926 	 *
927 	 * For single mode LE only controllers and dual-mode controllers
928 	 * with BR/EDR disabled, the existence of the static address will
929 	 * be evaluated.
930 	 */
931 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 			settings |= MGMT_SETTING_STATIC_ADDRESS;
936 	}
937 
938 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940 
941 	if (cis_central_capable(hdev))
942 		settings |= MGMT_SETTING_CIS_CENTRAL;
943 
944 	if (cis_peripheral_capable(hdev))
945 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
946 
947 	return settings;
948 }
949 
pending_find(u16 opcode,struct hci_dev * hdev)950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957 	struct mgmt_pending_cmd *cmd;
958 
959 	/* If there's a pending mgmt command the flags will not yet have
960 	 * their final values, so check for this first.
961 	 */
962 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963 	if (cmd) {
964 		struct mgmt_mode *cp = cmd->param;
965 		if (cp->val == 0x01)
966 			return LE_AD_GENERAL;
967 		else if (cp->val == 0x02)
968 			return LE_AD_LIMITED;
969 	} else {
970 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 			return LE_AD_LIMITED;
972 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 			return LE_AD_GENERAL;
974 	}
975 
976 	return 0;
977 }
978 
mgmt_get_connectable(struct hci_dev * hdev)979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981 	struct mgmt_pending_cmd *cmd;
982 
983 	/* If there's a pending mgmt command the flag will not yet have
984 	 * it's final value, so check for this first.
985 	 */
986 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987 	if (cmd) {
988 		struct mgmt_mode *cp = cmd->param;
989 
990 		return cp->val;
991 	}
992 
993 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995 
service_cache_sync(struct hci_dev * hdev,void * data)996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998 	hci_update_eir_sync(hdev);
999 	hci_update_class_sync(hdev);
1000 
1001 	return 0;
1002 }
1003 
service_cache_off(struct work_struct * work)1004 static void service_cache_off(struct work_struct *work)
1005 {
1006 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 					    service_cache.work);
1008 
1009 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010 		return;
1011 
1012 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014 
rpa_expired_sync(struct hci_dev * hdev,void * data)1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017 	/* The generation of a new RPA and programming it into the
1018 	 * controller happens in the hci_req_enable_advertising()
1019 	 * function.
1020 	 */
1021 	if (ext_adv_capable(hdev))
1022 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023 	else
1024 		return hci_enable_advertising_sync(hdev);
1025 }
1026 
rpa_expired(struct work_struct * work)1027 static void rpa_expired(struct work_struct *work)
1028 {
1029 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1030 					    rpa_expired.work);
1031 
1032 	bt_dev_dbg(hdev, "");
1033 
1034 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035 
1036 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037 		return;
1038 
1039 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041 
discov_off(struct work_struct * work)1042 static void discov_off(struct work_struct *work)
1043 {
1044 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1045 					    discov_off.work);
1046 
1047 	bt_dev_dbg(hdev, "");
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	/* When discoverable timeout triggers, then just make sure
1052 	 * the limited discoverable flag is cleared. Even in the case
1053 	 * of a timeout triggered from general discoverable, it is
1054 	 * safe to unconditionally clear the flag.
1055 	 */
1056 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058 	hdev->discov_timeout = 0;
1059 
1060 	hci_update_discoverable(hdev);
1061 
1062 	mgmt_new_settings(hdev);
1063 
1064 	hci_dev_unlock(hdev);
1065 }
1066 
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1068 
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1069 static void mesh_send_complete(struct hci_dev *hdev,
1070 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1071 {
1072 	u8 handle = mesh_tx->handle;
1073 
1074 	if (!silent)
1075 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076 			   sizeof(handle), NULL);
1077 
1078 	mgmt_mesh_remove(mesh_tx);
1079 }
1080 
mesh_send_done_sync(struct hci_dev * hdev,void * data)1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1082 {
1083 	struct mgmt_mesh_tx *mesh_tx;
1084 
1085 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086 	hci_disable_advertising_sync(hdev);
1087 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1088 
1089 	if (mesh_tx)
1090 		mesh_send_complete(hdev, mesh_tx, false);
1091 
1092 	return 0;
1093 }
1094 
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1098 {
1099 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 
1101 	if (!mesh_tx)
1102 		return;
1103 
1104 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105 				 mesh_send_start_complete);
1106 
1107 	if (err < 0)
1108 		mesh_send_complete(hdev, mesh_tx, false);
1109 	else
1110 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1111 }
1112 
mesh_send_done(struct work_struct * work)1113 static void mesh_send_done(struct work_struct *work)
1114 {
1115 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1116 					    mesh_send_done.work);
1117 
1118 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1119 		return;
1120 
1121 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1122 }
1123 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1125 {
1126 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1127 		return;
1128 
1129 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1130 
1131 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1135 
1136 	/* Non-mgmt controlled devices get this bit set
1137 	 * implicitly so that pairing works for them, however
1138 	 * for mgmt we require user-space to explicitly enable
1139 	 * it
1140 	 */
1141 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1142 
1143 	hci_dev_set_flag(hdev, HCI_MGMT);
1144 }
1145 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147 				void *data, u16 data_len)
1148 {
1149 	struct mgmt_rp_read_info rp;
1150 
1151 	bt_dev_dbg(hdev, "sock %p", sk);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	memset(&rp, 0, sizeof(rp));
1156 
1157 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1158 
1159 	rp.version = hdev->hci_ver;
1160 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1161 
1162 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1164 
1165 	memcpy(rp.dev_class, hdev->dev_class, 3);
1166 
1167 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1169 
1170 	hci_dev_unlock(hdev);
1171 
1172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1173 				 sizeof(rp));
1174 }
1175 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1177 {
1178 	u16 eir_len = 0;
1179 	size_t name_len;
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183 					  hdev->dev_class, 3);
1184 
1185 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1187 					  hdev->appearance);
1188 
1189 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191 				  hdev->dev_name, name_len);
1192 
1193 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195 				  hdev->short_name, name_len);
1196 
1197 	return eir_len;
1198 }
1199 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201 				    void *data, u16 data_len)
1202 {
1203 	char buf[512];
1204 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1205 	u16 eir_len;
1206 
1207 	bt_dev_dbg(hdev, "sock %p", sk);
1208 
1209 	memset(&buf, 0, sizeof(buf));
1210 
1211 	hci_dev_lock(hdev);
1212 
1213 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1214 
1215 	rp->version = hdev->hci_ver;
1216 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1217 
1218 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1220 
1221 
1222 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223 	rp->eir_len = cpu_to_le16(eir_len);
1224 
1225 	hci_dev_unlock(hdev);
1226 
1227 	/* If this command is called at least once, then the events
1228 	 * for class of device and local name changes are disabled
1229 	 * and only the new extended controller information event
1230 	 * is used.
1231 	 */
1232 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1235 
1236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237 				 sizeof(*rp) + eir_len);
1238 }
1239 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 	char buf[512];
1243 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1244 	u16 eir_len;
1245 
1246 	memset(buf, 0, sizeof(buf));
1247 
1248 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249 	ev->eir_len = cpu_to_le16(eir_len);
1250 
1251 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252 				  sizeof(*ev) + eir_len,
1253 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1254 }
1255 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1257 {
1258 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1259 
1260 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1261 				 sizeof(settings));
1262 }
1263 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1265 {
1266 	struct mgmt_ev_advertising_added ev;
1267 
1268 	ev.instance = instance;
1269 
1270 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1271 }
1272 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1274 			      u8 instance)
1275 {
1276 	struct mgmt_ev_advertising_removed ev;
1277 
1278 	ev.instance = instance;
1279 
1280 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1281 }
1282 
cancel_adv_timeout(struct hci_dev * hdev)1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1284 {
1285 	if (hdev->adv_instance_timeout) {
1286 		hdev->adv_instance_timeout = 0;
1287 		cancel_delayed_work(&hdev->adv_instance_expire);
1288 	}
1289 }
1290 
1291 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1292 static void restart_le_actions(struct hci_dev *hdev)
1293 {
1294 	struct hci_conn_params *p;
1295 
1296 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1297 		/* Needed for AUTO_OFF case where might not "really"
1298 		 * have been powered off.
1299 		 */
1300 		hci_pend_le_list_del_init(p);
1301 
1302 		switch (p->auto_connect) {
1303 		case HCI_AUTO_CONN_DIRECT:
1304 		case HCI_AUTO_CONN_ALWAYS:
1305 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1306 			break;
1307 		case HCI_AUTO_CONN_REPORT:
1308 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1309 			break;
1310 		default:
1311 			break;
1312 		}
1313 	}
1314 }
1315 
new_settings(struct hci_dev * hdev,struct sock * skip)1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1317 {
1318 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1319 
1320 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1322 }
1323 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1325 {
1326 	struct mgmt_pending_cmd *cmd = data;
1327 	struct mgmt_mode *cp;
1328 
1329 	/* Make sure cmd still outstanding. */
1330 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1331 		return;
1332 
1333 	cp = cmd->param;
1334 
1335 	bt_dev_dbg(hdev, "err %d", err);
1336 
1337 	if (!err) {
1338 		if (cp->val) {
1339 			hci_dev_lock(hdev);
1340 			restart_le_actions(hdev);
1341 			hci_update_passive_scan(hdev);
1342 			hci_dev_unlock(hdev);
1343 		}
1344 
1345 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1346 
1347 		/* Only call new_setting for power on as power off is deferred
1348 		 * to hdev->power_off work which does call hci_dev_do_close.
1349 		 */
1350 		if (cp->val)
1351 			new_settings(hdev, cmd->sk);
1352 	} else {
1353 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1354 				mgmt_status(err));
1355 	}
1356 
1357 	mgmt_pending_remove(cmd);
1358 }
1359 
set_powered_sync(struct hci_dev * hdev,void * data)1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1361 {
1362 	struct mgmt_pending_cmd *cmd = data;
1363 	struct mgmt_mode *cp = cmd->param;
1364 
1365 	BT_DBG("%s", hdev->name);
1366 
1367 	return hci_set_powered_sync(hdev, cp->val);
1368 }
1369 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371 		       u16 len)
1372 {
1373 	struct mgmt_mode *cp = data;
1374 	struct mgmt_pending_cmd *cmd;
1375 	int err;
1376 
1377 	bt_dev_dbg(hdev, "sock %p", sk);
1378 
1379 	if (cp->val != 0x00 && cp->val != 0x01)
1380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381 				       MGMT_STATUS_INVALID_PARAMS);
1382 
1383 	hci_dev_lock(hdev);
1384 
1385 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 				      MGMT_STATUS_BUSY);
1388 		goto failed;
1389 	}
1390 
1391 	if (!!cp->val == hdev_is_powered(hdev)) {
1392 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1393 		goto failed;
1394 	}
1395 
1396 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1397 	if (!cmd) {
1398 		err = -ENOMEM;
1399 		goto failed;
1400 	}
1401 
1402 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1403 				 mgmt_set_powered_complete);
1404 
1405 	if (err < 0)
1406 		mgmt_pending_remove(cmd);
1407 
1408 failed:
1409 	hci_dev_unlock(hdev);
1410 	return err;
1411 }
1412 
mgmt_new_settings(struct hci_dev * hdev)1413 int mgmt_new_settings(struct hci_dev *hdev)
1414 {
1415 	return new_settings(hdev, NULL);
1416 }
1417 
1418 struct cmd_lookup {
1419 	struct sock *sk;
1420 	struct hci_dev *hdev;
1421 	u8 mgmt_status;
1422 };
1423 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1424 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1425 {
1426 	struct cmd_lookup *match = data;
1427 
1428 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1429 
1430 	list_del(&cmd->list);
1431 
1432 	if (match->sk == NULL) {
1433 		match->sk = cmd->sk;
1434 		sock_hold(match->sk);
1435 	}
1436 
1437 	mgmt_pending_free(cmd);
1438 }
1439 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1440 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1441 {
1442 	u8 *status = data;
1443 
1444 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 	mgmt_pending_remove(cmd);
1446 }
1447 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1448 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	if (cmd->cmd_complete) {
1451 		u8 *status = data;
1452 
1453 		cmd->cmd_complete(cmd, *status);
1454 		mgmt_pending_remove(cmd);
1455 
1456 		return;
1457 	}
1458 
1459 	cmd_status_rsp(cmd, data);
1460 }
1461 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1462 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1463 {
1464 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1465 				 cmd->param, cmd->param_len);
1466 }
1467 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1468 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469 {
1470 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 				 cmd->param, sizeof(struct mgmt_addr_info));
1472 }
1473 
mgmt_bredr_support(struct hci_dev * hdev)1474 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1475 {
1476 	if (!lmp_bredr_capable(hdev))
1477 		return MGMT_STATUS_NOT_SUPPORTED;
1478 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1479 		return MGMT_STATUS_REJECTED;
1480 	else
1481 		return MGMT_STATUS_SUCCESS;
1482 }
1483 
mgmt_le_support(struct hci_dev * hdev)1484 static u8 mgmt_le_support(struct hci_dev *hdev)
1485 {
1486 	if (!lmp_le_capable(hdev))
1487 		return MGMT_STATUS_NOT_SUPPORTED;
1488 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1489 		return MGMT_STATUS_REJECTED;
1490 	else
1491 		return MGMT_STATUS_SUCCESS;
1492 }
1493 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1494 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1495 					   int err)
1496 {
1497 	struct mgmt_pending_cmd *cmd = data;
1498 
1499 	bt_dev_dbg(hdev, "err %d", err);
1500 
1501 	/* Make sure cmd still outstanding. */
1502 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1503 		return;
1504 
1505 	hci_dev_lock(hdev);
1506 
1507 	if (err) {
1508 		u8 mgmt_err = mgmt_status(err);
1509 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1511 		goto done;
1512 	}
1513 
1514 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1515 	    hdev->discov_timeout > 0) {
1516 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1517 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1518 	}
1519 
1520 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1521 	new_settings(hdev, cmd->sk);
1522 
1523 done:
1524 	mgmt_pending_remove(cmd);
1525 	hci_dev_unlock(hdev);
1526 }
1527 
set_discoverable_sync(struct hci_dev * hdev,void * data)1528 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1529 {
1530 	BT_DBG("%s", hdev->name);
1531 
1532 	return hci_update_discoverable_sync(hdev);
1533 }
1534 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1535 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1536 			    u16 len)
1537 {
1538 	struct mgmt_cp_set_discoverable *cp = data;
1539 	struct mgmt_pending_cmd *cmd;
1540 	u16 timeout;
1541 	int err;
1542 
1543 	bt_dev_dbg(hdev, "sock %p", sk);
1544 
1545 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1546 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1548 				       MGMT_STATUS_REJECTED);
1549 
1550 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 				       MGMT_STATUS_INVALID_PARAMS);
1553 
1554 	timeout = __le16_to_cpu(cp->timeout);
1555 
1556 	/* Disabling discoverable requires that no timeout is set,
1557 	 * and enabling limited discoverable requires a timeout.
1558 	 */
1559 	if ((cp->val == 0x00 && timeout > 0) ||
1560 	    (cp->val == 0x02 && timeout == 0))
1561 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 				       MGMT_STATUS_INVALID_PARAMS);
1563 
1564 	hci_dev_lock(hdev);
1565 
1566 	if (!hdev_is_powered(hdev) && timeout > 0) {
1567 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 				      MGMT_STATUS_NOT_POWERED);
1569 		goto failed;
1570 	}
1571 
1572 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1573 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1574 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1575 				      MGMT_STATUS_BUSY);
1576 		goto failed;
1577 	}
1578 
1579 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 				      MGMT_STATUS_REJECTED);
1582 		goto failed;
1583 	}
1584 
1585 	if (hdev->advertising_paused) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	if (!hdev_is_powered(hdev)) {
1592 		bool changed = false;
1593 
1594 		/* Setting limited discoverable when powered off is
1595 		 * not a valid operation since it requires a timeout
1596 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1597 		 */
1598 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1600 			changed = true;
1601 		}
1602 
1603 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1604 		if (err < 0)
1605 			goto failed;
1606 
1607 		if (changed)
1608 			err = new_settings(hdev, sk);
1609 
1610 		goto failed;
1611 	}
1612 
1613 	/* If the current mode is the same, then just update the timeout
1614 	 * value with the new value. And if only the timeout gets updated,
1615 	 * then no need for any HCI transactions.
1616 	 */
1617 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1618 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1619 						   HCI_LIMITED_DISCOVERABLE)) {
1620 		cancel_delayed_work(&hdev->discov_off);
1621 		hdev->discov_timeout = timeout;
1622 
1623 		if (cp->val && hdev->discov_timeout > 0) {
1624 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625 			queue_delayed_work(hdev->req_workqueue,
1626 					   &hdev->discov_off, to);
1627 		}
1628 
1629 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 		goto failed;
1631 	}
1632 
1633 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1634 	if (!cmd) {
1635 		err = -ENOMEM;
1636 		goto failed;
1637 	}
1638 
1639 	/* Cancel any potential discoverable timeout that might be
1640 	 * still active and store new timeout value. The arming of
1641 	 * the timeout happens in the complete handler.
1642 	 */
1643 	cancel_delayed_work(&hdev->discov_off);
1644 	hdev->discov_timeout = timeout;
1645 
1646 	if (cp->val)
1647 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1648 	else
1649 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1650 
1651 	/* Limited discoverable mode */
1652 	if (cp->val == 0x02)
1653 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1654 	else
1655 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1656 
1657 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1658 				 mgmt_set_discoverable_complete);
1659 
1660 	if (err < 0)
1661 		mgmt_pending_remove(cmd);
1662 
1663 failed:
1664 	hci_dev_unlock(hdev);
1665 	return err;
1666 }
1667 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1668 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1669 					  int err)
1670 {
1671 	struct mgmt_pending_cmd *cmd = data;
1672 
1673 	bt_dev_dbg(hdev, "err %d", err);
1674 
1675 	/* Make sure cmd still outstanding. */
1676 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1677 		return;
1678 
1679 	hci_dev_lock(hdev);
1680 
1681 	if (err) {
1682 		u8 mgmt_err = mgmt_status(err);
1683 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1684 		goto done;
1685 	}
1686 
1687 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1688 	new_settings(hdev, cmd->sk);
1689 
1690 done:
1691 	if (cmd)
1692 		mgmt_pending_remove(cmd);
1693 
1694 	hci_dev_unlock(hdev);
1695 }
1696 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1697 static int set_connectable_update_settings(struct hci_dev *hdev,
1698 					   struct sock *sk, u8 val)
1699 {
1700 	bool changed = false;
1701 	int err;
1702 
1703 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1704 		changed = true;
1705 
1706 	if (val) {
1707 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1708 	} else {
1709 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1710 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1711 	}
1712 
1713 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1714 	if (err < 0)
1715 		return err;
1716 
1717 	if (changed) {
1718 		hci_update_scan(hdev);
1719 		hci_update_passive_scan(hdev);
1720 		return new_settings(hdev, sk);
1721 	}
1722 
1723 	return 0;
1724 }
1725 
set_connectable_sync(struct hci_dev * hdev,void * data)1726 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1727 {
1728 	BT_DBG("%s", hdev->name);
1729 
1730 	return hci_update_connectable_sync(hdev);
1731 }
1732 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1733 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1734 			   u16 len)
1735 {
1736 	struct mgmt_mode *cp = data;
1737 	struct mgmt_pending_cmd *cmd;
1738 	int err;
1739 
1740 	bt_dev_dbg(hdev, "sock %p", sk);
1741 
1742 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1743 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1745 				       MGMT_STATUS_REJECTED);
1746 
1747 	if (cp->val != 0x00 && cp->val != 0x01)
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 				       MGMT_STATUS_INVALID_PARAMS);
1750 
1751 	hci_dev_lock(hdev);
1752 
1753 	if (!hdev_is_powered(hdev)) {
1754 		err = set_connectable_update_settings(hdev, sk, cp->val);
1755 		goto failed;
1756 	}
1757 
1758 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1759 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1760 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1761 				      MGMT_STATUS_BUSY);
1762 		goto failed;
1763 	}
1764 
1765 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1766 	if (!cmd) {
1767 		err = -ENOMEM;
1768 		goto failed;
1769 	}
1770 
1771 	if (cp->val) {
1772 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1773 	} else {
1774 		if (hdev->discov_timeout > 0)
1775 			cancel_delayed_work(&hdev->discov_off);
1776 
1777 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1778 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1779 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1780 	}
1781 
1782 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1783 				 mgmt_set_connectable_complete);
1784 
1785 	if (err < 0)
1786 		mgmt_pending_remove(cmd);
1787 
1788 failed:
1789 	hci_dev_unlock(hdev);
1790 	return err;
1791 }
1792 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1793 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1794 			u16 len)
1795 {
1796 	struct mgmt_mode *cp = data;
1797 	bool changed;
1798 	int err;
1799 
1800 	bt_dev_dbg(hdev, "sock %p", sk);
1801 
1802 	if (cp->val != 0x00 && cp->val != 0x01)
1803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1804 				       MGMT_STATUS_INVALID_PARAMS);
1805 
1806 	hci_dev_lock(hdev);
1807 
1808 	if (cp->val)
1809 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1810 	else
1811 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1812 
1813 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1814 	if (err < 0)
1815 		goto unlock;
1816 
1817 	if (changed) {
1818 		/* In limited privacy mode the change of bondable mode
1819 		 * may affect the local advertising address.
1820 		 */
1821 		hci_update_discoverable(hdev);
1822 
1823 		err = new_settings(hdev, sk);
1824 	}
1825 
1826 unlock:
1827 	hci_dev_unlock(hdev);
1828 	return err;
1829 }
1830 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1831 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1832 			     u16 len)
1833 {
1834 	struct mgmt_mode *cp = data;
1835 	struct mgmt_pending_cmd *cmd;
1836 	u8 val, status;
1837 	int err;
1838 
1839 	bt_dev_dbg(hdev, "sock %p", sk);
1840 
1841 	status = mgmt_bredr_support(hdev);
1842 	if (status)
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1844 				       status);
1845 
1846 	if (cp->val != 0x00 && cp->val != 0x01)
1847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1848 				       MGMT_STATUS_INVALID_PARAMS);
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	if (!hdev_is_powered(hdev)) {
1853 		bool changed = false;
1854 
1855 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1856 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1857 			changed = true;
1858 		}
1859 
1860 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1861 		if (err < 0)
1862 			goto failed;
1863 
1864 		if (changed)
1865 			err = new_settings(hdev, sk);
1866 
1867 		goto failed;
1868 	}
1869 
1870 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1872 				      MGMT_STATUS_BUSY);
1873 		goto failed;
1874 	}
1875 
1876 	val = !!cp->val;
1877 
1878 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1879 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1880 		goto failed;
1881 	}
1882 
1883 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1884 	if (!cmd) {
1885 		err = -ENOMEM;
1886 		goto failed;
1887 	}
1888 
1889 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1890 	if (err < 0) {
1891 		mgmt_pending_remove(cmd);
1892 		goto failed;
1893 	}
1894 
1895 failed:
1896 	hci_dev_unlock(hdev);
1897 	return err;
1898 }
1899 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1900 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1901 {
1902 	struct cmd_lookup match = { NULL, hdev };
1903 	struct mgmt_pending_cmd *cmd = data;
1904 	struct mgmt_mode *cp = cmd->param;
1905 	u8 enable = cp->val;
1906 	bool changed;
1907 
1908 	/* Make sure cmd still outstanding. */
1909 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1910 		return;
1911 
1912 	if (err) {
1913 		u8 mgmt_err = mgmt_status(err);
1914 
1915 		if (enable && hci_dev_test_and_clear_flag(hdev,
1916 							  HCI_SSP_ENABLED)) {
1917 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1918 			new_settings(hdev, NULL);
1919 		}
1920 
1921 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1922 				     &mgmt_err);
1923 		return;
1924 	}
1925 
1926 	if (enable) {
1927 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1928 	} else {
1929 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1930 
1931 		if (!changed)
1932 			changed = hci_dev_test_and_clear_flag(hdev,
1933 							      HCI_HS_ENABLED);
1934 		else
1935 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 	}
1937 
1938 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1939 
1940 	if (changed)
1941 		new_settings(hdev, match.sk);
1942 
1943 	if (match.sk)
1944 		sock_put(match.sk);
1945 
1946 	hci_update_eir_sync(hdev);
1947 }
1948 
set_ssp_sync(struct hci_dev * hdev,void * data)1949 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1950 {
1951 	struct mgmt_pending_cmd *cmd = data;
1952 	struct mgmt_mode *cp = cmd->param;
1953 	bool changed = false;
1954 	int err;
1955 
1956 	if (cp->val)
1957 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1958 
1959 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1960 
1961 	if (!err && changed)
1962 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1963 
1964 	return err;
1965 }
1966 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1967 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1968 {
1969 	struct mgmt_mode *cp = data;
1970 	struct mgmt_pending_cmd *cmd;
1971 	u8 status;
1972 	int err;
1973 
1974 	bt_dev_dbg(hdev, "sock %p", sk);
1975 
1976 	status = mgmt_bredr_support(hdev);
1977 	if (status)
1978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1979 
1980 	if (!lmp_ssp_capable(hdev))
1981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1982 				       MGMT_STATUS_NOT_SUPPORTED);
1983 
1984 	if (cp->val != 0x00 && cp->val != 0x01)
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_INVALID_PARAMS);
1987 
1988 	hci_dev_lock(hdev);
1989 
1990 	if (!hdev_is_powered(hdev)) {
1991 		bool changed;
1992 
1993 		if (cp->val) {
1994 			changed = !hci_dev_test_and_set_flag(hdev,
1995 							     HCI_SSP_ENABLED);
1996 		} else {
1997 			changed = hci_dev_test_and_clear_flag(hdev,
1998 							      HCI_SSP_ENABLED);
1999 			if (!changed)
2000 				changed = hci_dev_test_and_clear_flag(hdev,
2001 								      HCI_HS_ENABLED);
2002 			else
2003 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2004 		}
2005 
2006 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2007 		if (err < 0)
2008 			goto failed;
2009 
2010 		if (changed)
2011 			err = new_settings(hdev, sk);
2012 
2013 		goto failed;
2014 	}
2015 
2016 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2018 				      MGMT_STATUS_BUSY);
2019 		goto failed;
2020 	}
2021 
2022 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2023 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2024 		goto failed;
2025 	}
2026 
2027 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2028 	if (!cmd)
2029 		err = -ENOMEM;
2030 	else
2031 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2032 					 set_ssp_complete);
2033 
2034 	if (err < 0) {
2035 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 				      MGMT_STATUS_FAILED);
2037 
2038 		if (cmd)
2039 			mgmt_pending_remove(cmd);
2040 	}
2041 
2042 failed:
2043 	hci_dev_unlock(hdev);
2044 	return err;
2045 }
2046 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2047 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 {
2049 	struct mgmt_mode *cp = data;
2050 	bool changed;
2051 	u8 status;
2052 	int err;
2053 
2054 	bt_dev_dbg(hdev, "sock %p", sk);
2055 
2056 	if (!IS_ENABLED(CONFIG_BT_HS))
2057 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 				       MGMT_STATUS_NOT_SUPPORTED);
2059 
2060 	status = mgmt_bredr_support(hdev);
2061 	if (status)
2062 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2063 
2064 	if (!lmp_ssp_capable(hdev))
2065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 
2068 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2069 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 				       MGMT_STATUS_REJECTED);
2071 
2072 	if (cp->val != 0x00 && cp->val != 0x01)
2073 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 				       MGMT_STATUS_INVALID_PARAMS);
2075 
2076 	hci_dev_lock(hdev);
2077 
2078 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2079 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 				      MGMT_STATUS_BUSY);
2081 		goto unlock;
2082 	}
2083 
2084 	if (cp->val) {
2085 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2086 	} else {
2087 		if (hdev_is_powered(hdev)) {
2088 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 					      MGMT_STATUS_REJECTED);
2090 			goto unlock;
2091 		}
2092 
2093 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2094 	}
2095 
2096 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2097 	if (err < 0)
2098 		goto unlock;
2099 
2100 	if (changed)
2101 		err = new_settings(hdev, sk);
2102 
2103 unlock:
2104 	hci_dev_unlock(hdev);
2105 	return err;
2106 }
2107 
set_le_complete(struct hci_dev * hdev,void * data,int err)2108 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2109 {
2110 	struct cmd_lookup match = { NULL, hdev };
2111 	u8 status = mgmt_status(err);
2112 
2113 	bt_dev_dbg(hdev, "err %d", err);
2114 
2115 	if (status) {
2116 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2117 							&status);
2118 		return;
2119 	}
2120 
2121 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2122 
2123 	new_settings(hdev, match.sk);
2124 
2125 	if (match.sk)
2126 		sock_put(match.sk);
2127 }
2128 
set_le_sync(struct hci_dev * hdev,void * data)2129 static int set_le_sync(struct hci_dev *hdev, void *data)
2130 {
2131 	struct mgmt_pending_cmd *cmd = data;
2132 	struct mgmt_mode *cp = cmd->param;
2133 	u8 val = !!cp->val;
2134 	int err;
2135 
2136 	if (!val) {
2137 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2138 
2139 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2140 			hci_disable_advertising_sync(hdev);
2141 
2142 		if (ext_adv_capable(hdev))
2143 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2144 	} else {
2145 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2146 	}
2147 
2148 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2149 
2150 	/* Make sure the controller has a good default for
2151 	 * advertising data. Restrict the update to when LE
2152 	 * has actually been enabled. During power on, the
2153 	 * update in powered_update_hci will take care of it.
2154 	 */
2155 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2156 		if (ext_adv_capable(hdev)) {
2157 			int status;
2158 
2159 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2160 			if (!status)
2161 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2162 		} else {
2163 			hci_update_adv_data_sync(hdev, 0x00);
2164 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2165 		}
2166 
2167 		hci_update_passive_scan(hdev);
2168 	}
2169 
2170 	return err;
2171 }
2172 
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2173 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2174 {
2175 	struct mgmt_pending_cmd *cmd = data;
2176 	u8 status = mgmt_status(err);
2177 	struct sock *sk = cmd->sk;
2178 
2179 	if (status) {
2180 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2181 				     cmd_status_rsp, &status);
2182 		return;
2183 	}
2184 
2185 	mgmt_pending_remove(cmd);
2186 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2187 }
2188 
set_mesh_sync(struct hci_dev * hdev,void * data)2189 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2190 {
2191 	struct mgmt_pending_cmd *cmd = data;
2192 	struct mgmt_cp_set_mesh *cp = cmd->param;
2193 	size_t len = cmd->param_len;
2194 
2195 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2196 
2197 	if (cp->enable)
2198 		hci_dev_set_flag(hdev, HCI_MESH);
2199 	else
2200 		hci_dev_clear_flag(hdev, HCI_MESH);
2201 
2202 	len -= sizeof(*cp);
2203 
2204 	/* If filters don't fit, forward all adv pkts */
2205 	if (len <= sizeof(hdev->mesh_ad_types))
2206 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2207 
2208 	hci_update_passive_scan_sync(hdev);
2209 	return 0;
2210 }
2211 
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2212 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2213 {
2214 	struct mgmt_cp_set_mesh *cp = data;
2215 	struct mgmt_pending_cmd *cmd;
2216 	int err = 0;
2217 
2218 	bt_dev_dbg(hdev, "sock %p", sk);
2219 
2220 	if (!lmp_le_capable(hdev) ||
2221 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2222 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2223 				       MGMT_STATUS_NOT_SUPPORTED);
2224 
2225 	if (cp->enable != 0x00 && cp->enable != 0x01)
2226 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 				       MGMT_STATUS_INVALID_PARAMS);
2228 
2229 	hci_dev_lock(hdev);
2230 
2231 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2232 	if (!cmd)
2233 		err = -ENOMEM;
2234 	else
2235 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2236 					 set_mesh_complete);
2237 
2238 	if (err < 0) {
2239 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2240 				      MGMT_STATUS_FAILED);
2241 
2242 		if (cmd)
2243 			mgmt_pending_remove(cmd);
2244 	}
2245 
2246 	hci_dev_unlock(hdev);
2247 	return err;
2248 }
2249 
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2250 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2251 {
2252 	struct mgmt_mesh_tx *mesh_tx = data;
2253 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2254 	unsigned long mesh_send_interval;
2255 	u8 mgmt_err = mgmt_status(err);
2256 
2257 	/* Report any errors here, but don't report completion */
2258 
2259 	if (mgmt_err) {
2260 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2261 		/* Send Complete Error Code for handle */
2262 		mesh_send_complete(hdev, mesh_tx, false);
2263 		return;
2264 	}
2265 
2266 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2267 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2268 			   mesh_send_interval);
2269 }
2270 
mesh_send_sync(struct hci_dev * hdev,void * data)2271 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2272 {
2273 	struct mgmt_mesh_tx *mesh_tx = data;
2274 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2275 	struct adv_info *adv, *next_instance;
2276 	u8 instance = hdev->le_num_of_adv_sets + 1;
2277 	u16 timeout, duration;
2278 	int err = 0;
2279 
2280 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2281 		return MGMT_STATUS_BUSY;
2282 
2283 	timeout = 1000;
2284 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2285 	adv = hci_add_adv_instance(hdev, instance, 0,
2286 				   send->adv_data_len, send->adv_data,
2287 				   0, NULL,
2288 				   timeout, duration,
2289 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2290 				   hdev->le_adv_min_interval,
2291 				   hdev->le_adv_max_interval,
2292 				   mesh_tx->handle);
2293 
2294 	if (!IS_ERR(adv))
2295 		mesh_tx->instance = instance;
2296 	else
2297 		err = PTR_ERR(adv);
2298 
2299 	if (hdev->cur_adv_instance == instance) {
2300 		/* If the currently advertised instance is being changed then
2301 		 * cancel the current advertising and schedule the next
2302 		 * instance. If there is only one instance then the overridden
2303 		 * advertising data will be visible right away.
2304 		 */
2305 		cancel_adv_timeout(hdev);
2306 
2307 		next_instance = hci_get_next_instance(hdev, instance);
2308 		if (next_instance)
2309 			instance = next_instance->instance;
2310 		else
2311 			instance = 0;
2312 	} else if (hdev->adv_instance_timeout) {
2313 		/* Immediately advertise the new instance if no other, or
2314 		 * let it go naturally from queue if ADV is already happening
2315 		 */
2316 		instance = 0;
2317 	}
2318 
2319 	if (instance)
2320 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2321 
2322 	return err;
2323 }
2324 
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2325 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2326 {
2327 	struct mgmt_rp_mesh_read_features *rp = data;
2328 
2329 	if (rp->used_handles >= rp->max_handles)
2330 		return;
2331 
2332 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2333 }
2334 
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2335 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2336 			 void *data, u16 len)
2337 {
2338 	struct mgmt_rp_mesh_read_features rp;
2339 
2340 	if (!lmp_le_capable(hdev) ||
2341 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2343 				       MGMT_STATUS_NOT_SUPPORTED);
2344 
2345 	memset(&rp, 0, sizeof(rp));
2346 	rp.index = cpu_to_le16(hdev->id);
2347 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2348 		rp.max_handles = MESH_HANDLES_MAX;
2349 
2350 	hci_dev_lock(hdev);
2351 
2352 	if (rp.max_handles)
2353 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2354 
2355 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2356 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2357 
2358 	hci_dev_unlock(hdev);
2359 	return 0;
2360 }
2361 
send_cancel(struct hci_dev * hdev,void * data)2362 static int send_cancel(struct hci_dev *hdev, void *data)
2363 {
2364 	struct mgmt_pending_cmd *cmd = data;
2365 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2366 	struct mgmt_mesh_tx *mesh_tx;
2367 
2368 	if (!cancel->handle) {
2369 		do {
2370 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2371 
2372 			if (mesh_tx)
2373 				mesh_send_complete(hdev, mesh_tx, false);
2374 		} while (mesh_tx);
2375 	} else {
2376 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2377 
2378 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2379 			mesh_send_complete(hdev, mesh_tx, false);
2380 	}
2381 
2382 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2383 			  0, NULL, 0);
2384 	mgmt_pending_free(cmd);
2385 
2386 	return 0;
2387 }
2388 
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2389 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2390 			    void *data, u16 len)
2391 {
2392 	struct mgmt_pending_cmd *cmd;
2393 	int err;
2394 
2395 	if (!lmp_le_capable(hdev) ||
2396 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2397 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 				       MGMT_STATUS_NOT_SUPPORTED);
2399 
2400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 				       MGMT_STATUS_REJECTED);
2403 
2404 	hci_dev_lock(hdev);
2405 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2406 	if (!cmd)
2407 		err = -ENOMEM;
2408 	else
2409 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2410 
2411 	if (err < 0) {
2412 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2413 				      MGMT_STATUS_FAILED);
2414 
2415 		if (cmd)
2416 			mgmt_pending_free(cmd);
2417 	}
2418 
2419 	hci_dev_unlock(hdev);
2420 	return err;
2421 }
2422 
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2423 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2424 {
2425 	struct mgmt_mesh_tx *mesh_tx;
2426 	struct mgmt_cp_mesh_send *send = data;
2427 	struct mgmt_rp_mesh_read_features rp;
2428 	bool sending;
2429 	int err = 0;
2430 
2431 	if (!lmp_le_capable(hdev) ||
2432 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2434 				       MGMT_STATUS_NOT_SUPPORTED);
2435 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2436 	    len <= MGMT_MESH_SEND_SIZE ||
2437 	    len > (MGMT_MESH_SEND_SIZE + 31))
2438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2439 				       MGMT_STATUS_REJECTED);
2440 
2441 	hci_dev_lock(hdev);
2442 
2443 	memset(&rp, 0, sizeof(rp));
2444 	rp.max_handles = MESH_HANDLES_MAX;
2445 
2446 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2447 
2448 	if (rp.max_handles <= rp.used_handles) {
2449 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2450 				      MGMT_STATUS_BUSY);
2451 		goto done;
2452 	}
2453 
2454 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2455 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2456 
2457 	if (!mesh_tx)
2458 		err = -ENOMEM;
2459 	else if (!sending)
2460 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2461 					 mesh_send_start_complete);
2462 
2463 	if (err < 0) {
2464 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 				      MGMT_STATUS_FAILED);
2467 
2468 		if (mesh_tx) {
2469 			if (sending)
2470 				mgmt_mesh_remove(mesh_tx);
2471 		}
2472 	} else {
2473 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2474 
2475 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2476 				  &mesh_tx->handle, 1);
2477 	}
2478 
2479 done:
2480 	hci_dev_unlock(hdev);
2481 	return err;
2482 }
2483 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2484 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2485 {
2486 	struct mgmt_mode *cp = data;
2487 	struct mgmt_pending_cmd *cmd;
2488 	int err;
2489 	u8 val, enabled;
2490 
2491 	bt_dev_dbg(hdev, "sock %p", sk);
2492 
2493 	if (!lmp_le_capable(hdev))
2494 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				       MGMT_STATUS_NOT_SUPPORTED);
2496 
2497 	if (cp->val != 0x00 && cp->val != 0x01)
2498 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 				       MGMT_STATUS_INVALID_PARAMS);
2500 
2501 	/* Bluetooth single mode LE only controllers or dual-mode
2502 	 * controllers configured as LE only devices, do not allow
2503 	 * switching LE off. These have either LE enabled explicitly
2504 	 * or BR/EDR has been previously switched off.
2505 	 *
2506 	 * When trying to enable an already enabled LE, then gracefully
2507 	 * send a positive response. Trying to disable it however will
2508 	 * result into rejection.
2509 	 */
2510 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2511 		if (cp->val == 0x01)
2512 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2513 
2514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 				       MGMT_STATUS_REJECTED);
2516 	}
2517 
2518 	hci_dev_lock(hdev);
2519 
2520 	val = !!cp->val;
2521 	enabled = lmp_host_le_capable(hdev);
2522 
2523 	if (!hdev_is_powered(hdev) || val == enabled) {
2524 		bool changed = false;
2525 
2526 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2527 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2528 			changed = true;
2529 		}
2530 
2531 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2532 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2533 			changed = true;
2534 		}
2535 
2536 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2537 		if (err < 0)
2538 			goto unlock;
2539 
2540 		if (changed)
2541 			err = new_settings(hdev, sk);
2542 
2543 		goto unlock;
2544 	}
2545 
2546 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2547 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2548 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2549 				      MGMT_STATUS_BUSY);
2550 		goto unlock;
2551 	}
2552 
2553 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2554 	if (!cmd)
2555 		err = -ENOMEM;
2556 	else
2557 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2558 					 set_le_complete);
2559 
2560 	if (err < 0) {
2561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2562 				      MGMT_STATUS_FAILED);
2563 
2564 		if (cmd)
2565 			mgmt_pending_remove(cmd);
2566 	}
2567 
2568 unlock:
2569 	hci_dev_unlock(hdev);
2570 	return err;
2571 }
2572 
2573 /* This is a helper function to test for pending mgmt commands that can
2574  * cause CoD or EIR HCI commands. We can only allow one such pending
2575  * mgmt command at a time since otherwise we cannot easily track what
2576  * the current values are, will be, and based on that calculate if a new
2577  * HCI command needs to be sent and if yes with what value.
2578  */
pending_eir_or_class(struct hci_dev * hdev)2579 static bool pending_eir_or_class(struct hci_dev *hdev)
2580 {
2581 	struct mgmt_pending_cmd *cmd;
2582 
2583 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2584 		switch (cmd->opcode) {
2585 		case MGMT_OP_ADD_UUID:
2586 		case MGMT_OP_REMOVE_UUID:
2587 		case MGMT_OP_SET_DEV_CLASS:
2588 		case MGMT_OP_SET_POWERED:
2589 			return true;
2590 		}
2591 	}
2592 
2593 	return false;
2594 }
2595 
2596 static const u8 bluetooth_base_uuid[] = {
2597 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2598 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2599 };
2600 
get_uuid_size(const u8 * uuid)2601 static u8 get_uuid_size(const u8 *uuid)
2602 {
2603 	u32 val;
2604 
2605 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2606 		return 128;
2607 
2608 	val = get_unaligned_le32(&uuid[12]);
2609 	if (val > 0xffff)
2610 		return 32;
2611 
2612 	return 16;
2613 }
2614 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2615 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2616 {
2617 	struct mgmt_pending_cmd *cmd = data;
2618 
2619 	bt_dev_dbg(hdev, "err %d", err);
2620 
2621 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2622 			  mgmt_status(err), hdev->dev_class, 3);
2623 
2624 	mgmt_pending_free(cmd);
2625 }
2626 
add_uuid_sync(struct hci_dev * hdev,void * data)2627 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2628 {
2629 	int err;
2630 
2631 	err = hci_update_class_sync(hdev);
2632 	if (err)
2633 		return err;
2634 
2635 	return hci_update_eir_sync(hdev);
2636 }
2637 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2638 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2639 {
2640 	struct mgmt_cp_add_uuid *cp = data;
2641 	struct mgmt_pending_cmd *cmd;
2642 	struct bt_uuid *uuid;
2643 	int err;
2644 
2645 	bt_dev_dbg(hdev, "sock %p", sk);
2646 
2647 	hci_dev_lock(hdev);
2648 
2649 	if (pending_eir_or_class(hdev)) {
2650 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2651 				      MGMT_STATUS_BUSY);
2652 		goto failed;
2653 	}
2654 
2655 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2656 	if (!uuid) {
2657 		err = -ENOMEM;
2658 		goto failed;
2659 	}
2660 
2661 	memcpy(uuid->uuid, cp->uuid, 16);
2662 	uuid->svc_hint = cp->svc_hint;
2663 	uuid->size = get_uuid_size(cp->uuid);
2664 
2665 	list_add_tail(&uuid->list, &hdev->uuids);
2666 
2667 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2668 	if (!cmd) {
2669 		err = -ENOMEM;
2670 		goto failed;
2671 	}
2672 
2673 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2674 	if (err < 0) {
2675 		mgmt_pending_free(cmd);
2676 		goto failed;
2677 	}
2678 
2679 failed:
2680 	hci_dev_unlock(hdev);
2681 	return err;
2682 }
2683 
enable_service_cache(struct hci_dev * hdev)2684 static bool enable_service_cache(struct hci_dev *hdev)
2685 {
2686 	if (!hdev_is_powered(hdev))
2687 		return false;
2688 
2689 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2690 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2691 				   CACHE_TIMEOUT);
2692 		return true;
2693 	}
2694 
2695 	return false;
2696 }
2697 
remove_uuid_sync(struct hci_dev * hdev,void * data)2698 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2699 {
2700 	int err;
2701 
2702 	err = hci_update_class_sync(hdev);
2703 	if (err)
2704 		return err;
2705 
2706 	return hci_update_eir_sync(hdev);
2707 }
2708 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2709 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2710 		       u16 len)
2711 {
2712 	struct mgmt_cp_remove_uuid *cp = data;
2713 	struct mgmt_pending_cmd *cmd;
2714 	struct bt_uuid *match, *tmp;
2715 	static const u8 bt_uuid_any[] = {
2716 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2717 	};
2718 	int err, found;
2719 
2720 	bt_dev_dbg(hdev, "sock %p", sk);
2721 
2722 	hci_dev_lock(hdev);
2723 
2724 	if (pending_eir_or_class(hdev)) {
2725 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2726 				      MGMT_STATUS_BUSY);
2727 		goto unlock;
2728 	}
2729 
2730 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2731 		hci_uuids_clear(hdev);
2732 
2733 		if (enable_service_cache(hdev)) {
2734 			err = mgmt_cmd_complete(sk, hdev->id,
2735 						MGMT_OP_REMOVE_UUID,
2736 						0, hdev->dev_class, 3);
2737 			goto unlock;
2738 		}
2739 
2740 		goto update_class;
2741 	}
2742 
2743 	found = 0;
2744 
2745 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2746 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2747 			continue;
2748 
2749 		list_del(&match->list);
2750 		kfree(match);
2751 		found++;
2752 	}
2753 
2754 	if (found == 0) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2756 				      MGMT_STATUS_INVALID_PARAMS);
2757 		goto unlock;
2758 	}
2759 
2760 update_class:
2761 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2762 	if (!cmd) {
2763 		err = -ENOMEM;
2764 		goto unlock;
2765 	}
2766 
2767 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2768 				 mgmt_class_complete);
2769 	if (err < 0)
2770 		mgmt_pending_free(cmd);
2771 
2772 unlock:
2773 	hci_dev_unlock(hdev);
2774 	return err;
2775 }
2776 
set_class_sync(struct hci_dev * hdev,void * data)2777 static int set_class_sync(struct hci_dev *hdev, void *data)
2778 {
2779 	int err = 0;
2780 
2781 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2782 		cancel_delayed_work_sync(&hdev->service_cache);
2783 		err = hci_update_eir_sync(hdev);
2784 	}
2785 
2786 	if (err)
2787 		return err;
2788 
2789 	return hci_update_class_sync(hdev);
2790 }
2791 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2792 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2793 			 u16 len)
2794 {
2795 	struct mgmt_cp_set_dev_class *cp = data;
2796 	struct mgmt_pending_cmd *cmd;
2797 	int err;
2798 
2799 	bt_dev_dbg(hdev, "sock %p", sk);
2800 
2801 	if (!lmp_bredr_capable(hdev))
2802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803 				       MGMT_STATUS_NOT_SUPPORTED);
2804 
2805 	hci_dev_lock(hdev);
2806 
2807 	if (pending_eir_or_class(hdev)) {
2808 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2809 				      MGMT_STATUS_BUSY);
2810 		goto unlock;
2811 	}
2812 
2813 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2814 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2815 				      MGMT_STATUS_INVALID_PARAMS);
2816 		goto unlock;
2817 	}
2818 
2819 	hdev->major_class = cp->major;
2820 	hdev->minor_class = cp->minor;
2821 
2822 	if (!hdev_is_powered(hdev)) {
2823 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2824 					hdev->dev_class, 3);
2825 		goto unlock;
2826 	}
2827 
2828 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2829 	if (!cmd) {
2830 		err = -ENOMEM;
2831 		goto unlock;
2832 	}
2833 
2834 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2835 				 mgmt_class_complete);
2836 	if (err < 0)
2837 		mgmt_pending_free(cmd);
2838 
2839 unlock:
2840 	hci_dev_unlock(hdev);
2841 	return err;
2842 }
2843 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2844 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2845 			  u16 len)
2846 {
2847 	struct mgmt_cp_load_link_keys *cp = data;
2848 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2849 				   sizeof(struct mgmt_link_key_info));
2850 	u16 key_count, expected_len;
2851 	bool changed;
2852 	int i;
2853 
2854 	bt_dev_dbg(hdev, "sock %p", sk);
2855 
2856 	if (!lmp_bredr_capable(hdev))
2857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2858 				       MGMT_STATUS_NOT_SUPPORTED);
2859 
2860 	key_count = __le16_to_cpu(cp->key_count);
2861 	if (key_count > max_key_count) {
2862 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2863 			   key_count);
2864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2865 				       MGMT_STATUS_INVALID_PARAMS);
2866 	}
2867 
2868 	expected_len = struct_size(cp, keys, key_count);
2869 	if (expected_len != len) {
2870 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2871 			   expected_len, len);
2872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873 				       MGMT_STATUS_INVALID_PARAMS);
2874 	}
2875 
2876 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878 				       MGMT_STATUS_INVALID_PARAMS);
2879 
2880 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2881 		   key_count);
2882 
2883 	for (i = 0; i < key_count; i++) {
2884 		struct mgmt_link_key_info *key = &cp->keys[i];
2885 
2886 		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2887 		if (key->type > 0x08)
2888 			return mgmt_cmd_status(sk, hdev->id,
2889 					       MGMT_OP_LOAD_LINK_KEYS,
2890 					       MGMT_STATUS_INVALID_PARAMS);
2891 	}
2892 
2893 	hci_dev_lock(hdev);
2894 
2895 	hci_link_keys_clear(hdev);
2896 
2897 	if (cp->debug_keys)
2898 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2899 	else
2900 		changed = hci_dev_test_and_clear_flag(hdev,
2901 						      HCI_KEEP_DEBUG_KEYS);
2902 
2903 	if (changed)
2904 		new_settings(hdev, NULL);
2905 
2906 	for (i = 0; i < key_count; i++) {
2907 		struct mgmt_link_key_info *key = &cp->keys[i];
2908 
2909 		if (hci_is_blocked_key(hdev,
2910 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2911 				       key->val)) {
2912 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2913 				    &key->addr.bdaddr);
2914 			continue;
2915 		}
2916 
2917 		/* Always ignore debug keys and require a new pairing if
2918 		 * the user wants to use them.
2919 		 */
2920 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2921 			continue;
2922 
2923 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2924 				 key->type, key->pin_len, NULL);
2925 	}
2926 
2927 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2928 
2929 	hci_dev_unlock(hdev);
2930 
2931 	return 0;
2932 }
2933 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2934 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2935 			   u8 addr_type, struct sock *skip_sk)
2936 {
2937 	struct mgmt_ev_device_unpaired ev;
2938 
2939 	bacpy(&ev.addr.bdaddr, bdaddr);
2940 	ev.addr.type = addr_type;
2941 
2942 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2943 			  skip_sk);
2944 }
2945 
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2946 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2947 {
2948 	struct mgmt_pending_cmd *cmd = data;
2949 	struct mgmt_cp_unpair_device *cp = cmd->param;
2950 
2951 	if (!err)
2952 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2953 
2954 	cmd->cmd_complete(cmd, err);
2955 	mgmt_pending_free(cmd);
2956 }
2957 
unpair_device_sync(struct hci_dev * hdev,void * data)2958 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2959 {
2960 	struct mgmt_pending_cmd *cmd = data;
2961 	struct mgmt_cp_unpair_device *cp = cmd->param;
2962 	struct hci_conn *conn;
2963 
2964 	if (cp->addr.type == BDADDR_BREDR)
2965 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2966 					       &cp->addr.bdaddr);
2967 	else
2968 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2969 					       le_addr_type(cp->addr.type));
2970 
2971 	if (!conn)
2972 		return 0;
2973 
2974 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2975 }
2976 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2977 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2978 			 u16 len)
2979 {
2980 	struct mgmt_cp_unpair_device *cp = data;
2981 	struct mgmt_rp_unpair_device rp;
2982 	struct hci_conn_params *params;
2983 	struct mgmt_pending_cmd *cmd;
2984 	struct hci_conn *conn;
2985 	u8 addr_type;
2986 	int err;
2987 
2988 	memset(&rp, 0, sizeof(rp));
2989 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2990 	rp.addr.type = cp->addr.type;
2991 
2992 	if (!bdaddr_type_is_valid(cp->addr.type))
2993 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2994 					 MGMT_STATUS_INVALID_PARAMS,
2995 					 &rp, sizeof(rp));
2996 
2997 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2998 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2999 					 MGMT_STATUS_INVALID_PARAMS,
3000 					 &rp, sizeof(rp));
3001 
3002 	hci_dev_lock(hdev);
3003 
3004 	if (!hdev_is_powered(hdev)) {
3005 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3006 					MGMT_STATUS_NOT_POWERED, &rp,
3007 					sizeof(rp));
3008 		goto unlock;
3009 	}
3010 
3011 	if (cp->addr.type == BDADDR_BREDR) {
3012 		/* If disconnection is requested, then look up the
3013 		 * connection. If the remote device is connected, it
3014 		 * will be later used to terminate the link.
3015 		 *
3016 		 * Setting it to NULL explicitly will cause no
3017 		 * termination of the link.
3018 		 */
3019 		if (cp->disconnect)
3020 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3021 						       &cp->addr.bdaddr);
3022 		else
3023 			conn = NULL;
3024 
3025 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3026 		if (err < 0) {
3027 			err = mgmt_cmd_complete(sk, hdev->id,
3028 						MGMT_OP_UNPAIR_DEVICE,
3029 						MGMT_STATUS_NOT_PAIRED, &rp,
3030 						sizeof(rp));
3031 			goto unlock;
3032 		}
3033 
3034 		goto done;
3035 	}
3036 
3037 	/* LE address type */
3038 	addr_type = le_addr_type(cp->addr.type);
3039 
3040 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3041 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3042 	if (err < 0) {
3043 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3044 					MGMT_STATUS_NOT_PAIRED, &rp,
3045 					sizeof(rp));
3046 		goto unlock;
3047 	}
3048 
3049 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3050 	if (!conn) {
3051 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3052 		goto done;
3053 	}
3054 
3055 
3056 	/* Defer clearing up the connection parameters until closing to
3057 	 * give a chance of keeping them if a repairing happens.
3058 	 */
3059 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3060 
3061 	/* Disable auto-connection parameters if present */
3062 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3063 	if (params) {
3064 		if (params->explicit_connect)
3065 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3066 		else
3067 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3068 	}
3069 
3070 	/* If disconnection is not requested, then clear the connection
3071 	 * variable so that the link is not terminated.
3072 	 */
3073 	if (!cp->disconnect)
3074 		conn = NULL;
3075 
3076 done:
3077 	/* If the connection variable is set, then termination of the
3078 	 * link is requested.
3079 	 */
3080 	if (!conn) {
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3082 					&rp, sizeof(rp));
3083 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3084 		goto unlock;
3085 	}
3086 
3087 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3088 			       sizeof(*cp));
3089 	if (!cmd) {
3090 		err = -ENOMEM;
3091 		goto unlock;
3092 	}
3093 
3094 	cmd->cmd_complete = addr_cmd_complete;
3095 
3096 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3097 				 unpair_device_complete);
3098 	if (err < 0)
3099 		mgmt_pending_free(cmd);
3100 
3101 unlock:
3102 	hci_dev_unlock(hdev);
3103 	return err;
3104 }
3105 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3106 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3107 		      u16 len)
3108 {
3109 	struct mgmt_cp_disconnect *cp = data;
3110 	struct mgmt_rp_disconnect rp;
3111 	struct mgmt_pending_cmd *cmd;
3112 	struct hci_conn *conn;
3113 	int err;
3114 
3115 	bt_dev_dbg(hdev, "sock %p", sk);
3116 
3117 	memset(&rp, 0, sizeof(rp));
3118 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3119 	rp.addr.type = cp->addr.type;
3120 
3121 	if (!bdaddr_type_is_valid(cp->addr.type))
3122 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3123 					 MGMT_STATUS_INVALID_PARAMS,
3124 					 &rp, sizeof(rp));
3125 
3126 	hci_dev_lock(hdev);
3127 
3128 	if (!test_bit(HCI_UP, &hdev->flags)) {
3129 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3130 					MGMT_STATUS_NOT_POWERED, &rp,
3131 					sizeof(rp));
3132 		goto failed;
3133 	}
3134 
3135 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3136 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3138 		goto failed;
3139 	}
3140 
3141 	if (cp->addr.type == BDADDR_BREDR)
3142 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3143 					       &cp->addr.bdaddr);
3144 	else
3145 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3146 					       le_addr_type(cp->addr.type));
3147 
3148 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3149 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3150 					MGMT_STATUS_NOT_CONNECTED, &rp,
3151 					sizeof(rp));
3152 		goto failed;
3153 	}
3154 
3155 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3156 	if (!cmd) {
3157 		err = -ENOMEM;
3158 		goto failed;
3159 	}
3160 
3161 	cmd->cmd_complete = generic_cmd_complete;
3162 
3163 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3164 	if (err < 0)
3165 		mgmt_pending_remove(cmd);
3166 
3167 failed:
3168 	hci_dev_unlock(hdev);
3169 	return err;
3170 }
3171 
link_to_bdaddr(u8 link_type,u8 addr_type)3172 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3173 {
3174 	switch (link_type) {
3175 	case LE_LINK:
3176 		switch (addr_type) {
3177 		case ADDR_LE_DEV_PUBLIC:
3178 			return BDADDR_LE_PUBLIC;
3179 
3180 		default:
3181 			/* Fallback to LE Random address type */
3182 			return BDADDR_LE_RANDOM;
3183 		}
3184 
3185 	default:
3186 		/* Fallback to BR/EDR type */
3187 		return BDADDR_BREDR;
3188 	}
3189 }
3190 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3191 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3192 			   u16 data_len)
3193 {
3194 	struct mgmt_rp_get_connections *rp;
3195 	struct hci_conn *c;
3196 	int err;
3197 	u16 i;
3198 
3199 	bt_dev_dbg(hdev, "sock %p", sk);
3200 
3201 	hci_dev_lock(hdev);
3202 
3203 	if (!hdev_is_powered(hdev)) {
3204 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3205 				      MGMT_STATUS_NOT_POWERED);
3206 		goto unlock;
3207 	}
3208 
3209 	i = 0;
3210 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3211 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3212 			i++;
3213 	}
3214 
3215 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3216 	if (!rp) {
3217 		err = -ENOMEM;
3218 		goto unlock;
3219 	}
3220 
3221 	i = 0;
3222 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3223 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3224 			continue;
3225 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3226 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3227 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3228 			continue;
3229 		i++;
3230 	}
3231 
3232 	rp->conn_count = cpu_to_le16(i);
3233 
3234 	/* Recalculate length in case of filtered SCO connections, etc */
3235 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3236 				struct_size(rp, addr, i));
3237 
3238 	kfree(rp);
3239 
3240 unlock:
3241 	hci_dev_unlock(hdev);
3242 	return err;
3243 }
3244 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3245 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3246 				   struct mgmt_cp_pin_code_neg_reply *cp)
3247 {
3248 	struct mgmt_pending_cmd *cmd;
3249 	int err;
3250 
3251 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3252 			       sizeof(*cp));
3253 	if (!cmd)
3254 		return -ENOMEM;
3255 
3256 	cmd->cmd_complete = addr_cmd_complete;
3257 
3258 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3259 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3260 	if (err < 0)
3261 		mgmt_pending_remove(cmd);
3262 
3263 	return err;
3264 }
3265 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3266 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3267 			  u16 len)
3268 {
3269 	struct hci_conn *conn;
3270 	struct mgmt_cp_pin_code_reply *cp = data;
3271 	struct hci_cp_pin_code_reply reply;
3272 	struct mgmt_pending_cmd *cmd;
3273 	int err;
3274 
3275 	bt_dev_dbg(hdev, "sock %p", sk);
3276 
3277 	hci_dev_lock(hdev);
3278 
3279 	if (!hdev_is_powered(hdev)) {
3280 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3281 				      MGMT_STATUS_NOT_POWERED);
3282 		goto failed;
3283 	}
3284 
3285 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3286 	if (!conn) {
3287 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3288 				      MGMT_STATUS_NOT_CONNECTED);
3289 		goto failed;
3290 	}
3291 
3292 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3293 		struct mgmt_cp_pin_code_neg_reply ncp;
3294 
3295 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3296 
3297 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3298 
3299 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3300 		if (err >= 0)
3301 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3302 					      MGMT_STATUS_INVALID_PARAMS);
3303 
3304 		goto failed;
3305 	}
3306 
3307 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3308 	if (!cmd) {
3309 		err = -ENOMEM;
3310 		goto failed;
3311 	}
3312 
3313 	cmd->cmd_complete = addr_cmd_complete;
3314 
3315 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3316 	reply.pin_len = cp->pin_len;
3317 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3318 
3319 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3320 	if (err < 0)
3321 		mgmt_pending_remove(cmd);
3322 
3323 failed:
3324 	hci_dev_unlock(hdev);
3325 	return err;
3326 }
3327 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3328 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3329 			     u16 len)
3330 {
3331 	struct mgmt_cp_set_io_capability *cp = data;
3332 
3333 	bt_dev_dbg(hdev, "sock %p", sk);
3334 
3335 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3336 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3337 				       MGMT_STATUS_INVALID_PARAMS);
3338 
3339 	hci_dev_lock(hdev);
3340 
3341 	hdev->io_capability = cp->io_capability;
3342 
3343 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3344 
3345 	hci_dev_unlock(hdev);
3346 
3347 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3348 				 NULL, 0);
3349 }
3350 
find_pairing(struct hci_conn * conn)3351 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3352 {
3353 	struct hci_dev *hdev = conn->hdev;
3354 	struct mgmt_pending_cmd *cmd;
3355 
3356 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3357 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3358 			continue;
3359 
3360 		if (cmd->user_data != conn)
3361 			continue;
3362 
3363 		return cmd;
3364 	}
3365 
3366 	return NULL;
3367 }
3368 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3369 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3370 {
3371 	struct mgmt_rp_pair_device rp;
3372 	struct hci_conn *conn = cmd->user_data;
3373 	int err;
3374 
3375 	bacpy(&rp.addr.bdaddr, &conn->dst);
3376 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3377 
3378 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3379 				status, &rp, sizeof(rp));
3380 
3381 	/* So we don't get further callbacks for this connection */
3382 	conn->connect_cfm_cb = NULL;
3383 	conn->security_cfm_cb = NULL;
3384 	conn->disconn_cfm_cb = NULL;
3385 
3386 	hci_conn_drop(conn);
3387 
3388 	/* The device is paired so there is no need to remove
3389 	 * its connection parameters anymore.
3390 	 */
3391 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3392 
3393 	hci_conn_put(conn);
3394 
3395 	return err;
3396 }
3397 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3398 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3399 {
3400 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3401 	struct mgmt_pending_cmd *cmd;
3402 
3403 	cmd = find_pairing(conn);
3404 	if (cmd) {
3405 		cmd->cmd_complete(cmd, status);
3406 		mgmt_pending_remove(cmd);
3407 	}
3408 }
3409 
pairing_complete_cb(struct hci_conn * conn,u8 status)3410 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3411 {
3412 	struct mgmt_pending_cmd *cmd;
3413 
3414 	BT_DBG("status %u", status);
3415 
3416 	cmd = find_pairing(conn);
3417 	if (!cmd) {
3418 		BT_DBG("Unable to find a pending command");
3419 		return;
3420 	}
3421 
3422 	cmd->cmd_complete(cmd, mgmt_status(status));
3423 	mgmt_pending_remove(cmd);
3424 }
3425 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3426 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3427 {
3428 	struct mgmt_pending_cmd *cmd;
3429 
3430 	BT_DBG("status %u", status);
3431 
3432 	if (!status)
3433 		return;
3434 
3435 	cmd = find_pairing(conn);
3436 	if (!cmd) {
3437 		BT_DBG("Unable to find a pending command");
3438 		return;
3439 	}
3440 
3441 	cmd->cmd_complete(cmd, mgmt_status(status));
3442 	mgmt_pending_remove(cmd);
3443 }
3444 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3445 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3446 		       u16 len)
3447 {
3448 	struct mgmt_cp_pair_device *cp = data;
3449 	struct mgmt_rp_pair_device rp;
3450 	struct mgmt_pending_cmd *cmd;
3451 	u8 sec_level, auth_type;
3452 	struct hci_conn *conn;
3453 	int err;
3454 
3455 	bt_dev_dbg(hdev, "sock %p", sk);
3456 
3457 	memset(&rp, 0, sizeof(rp));
3458 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3459 	rp.addr.type = cp->addr.type;
3460 
3461 	if (!bdaddr_type_is_valid(cp->addr.type))
3462 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3463 					 MGMT_STATUS_INVALID_PARAMS,
3464 					 &rp, sizeof(rp));
3465 
3466 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3467 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3468 					 MGMT_STATUS_INVALID_PARAMS,
3469 					 &rp, sizeof(rp));
3470 
3471 	hci_dev_lock(hdev);
3472 
3473 	if (!hdev_is_powered(hdev)) {
3474 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3475 					MGMT_STATUS_NOT_POWERED, &rp,
3476 					sizeof(rp));
3477 		goto unlock;
3478 	}
3479 
3480 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3481 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3483 					sizeof(rp));
3484 		goto unlock;
3485 	}
3486 
3487 	sec_level = BT_SECURITY_MEDIUM;
3488 	auth_type = HCI_AT_DEDICATED_BONDING;
3489 
3490 	if (cp->addr.type == BDADDR_BREDR) {
3491 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3492 				       auth_type, CONN_REASON_PAIR_DEVICE);
3493 	} else {
3494 		u8 addr_type = le_addr_type(cp->addr.type);
3495 		struct hci_conn_params *p;
3496 
3497 		/* When pairing a new device, it is expected to remember
3498 		 * this device for future connections. Adding the connection
3499 		 * parameter information ahead of time allows tracking
3500 		 * of the peripheral preferred values and will speed up any
3501 		 * further connection establishment.
3502 		 *
3503 		 * If connection parameters already exist, then they
3504 		 * will be kept and this function does nothing.
3505 		 */
3506 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3507 
3508 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3509 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3510 
3511 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3512 					   sec_level, HCI_LE_CONN_TIMEOUT,
3513 					   CONN_REASON_PAIR_DEVICE);
3514 	}
3515 
3516 	if (IS_ERR(conn)) {
3517 		int status;
3518 
3519 		if (PTR_ERR(conn) == -EBUSY)
3520 			status = MGMT_STATUS_BUSY;
3521 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3522 			status = MGMT_STATUS_NOT_SUPPORTED;
3523 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3524 			status = MGMT_STATUS_REJECTED;
3525 		else
3526 			status = MGMT_STATUS_CONNECT_FAILED;
3527 
3528 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3529 					status, &rp, sizeof(rp));
3530 		goto unlock;
3531 	}
3532 
3533 	if (conn->connect_cfm_cb) {
3534 		hci_conn_drop(conn);
3535 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3536 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3537 		goto unlock;
3538 	}
3539 
3540 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3541 	if (!cmd) {
3542 		err = -ENOMEM;
3543 		hci_conn_drop(conn);
3544 		goto unlock;
3545 	}
3546 
3547 	cmd->cmd_complete = pairing_complete;
3548 
3549 	/* For LE, just connecting isn't a proof that the pairing finished */
3550 	if (cp->addr.type == BDADDR_BREDR) {
3551 		conn->connect_cfm_cb = pairing_complete_cb;
3552 		conn->security_cfm_cb = pairing_complete_cb;
3553 		conn->disconn_cfm_cb = pairing_complete_cb;
3554 	} else {
3555 		conn->connect_cfm_cb = le_pairing_complete_cb;
3556 		conn->security_cfm_cb = le_pairing_complete_cb;
3557 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3558 	}
3559 
3560 	conn->io_capability = cp->io_cap;
3561 	cmd->user_data = hci_conn_get(conn);
3562 
3563 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3564 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3565 		cmd->cmd_complete(cmd, 0);
3566 		mgmt_pending_remove(cmd);
3567 	}
3568 
3569 	err = 0;
3570 
3571 unlock:
3572 	hci_dev_unlock(hdev);
3573 	return err;
3574 }
3575 
abort_conn_sync(struct hci_dev * hdev,void * data)3576 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3577 {
3578 	struct hci_conn *conn;
3579 	u16 handle = PTR_ERR(data);
3580 
3581 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3582 	if (!conn)
3583 		return 0;
3584 
3585 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3586 }
3587 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3588 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3589 			      u16 len)
3590 {
3591 	struct mgmt_addr_info *addr = data;
3592 	struct mgmt_pending_cmd *cmd;
3593 	struct hci_conn *conn;
3594 	int err;
3595 
3596 	bt_dev_dbg(hdev, "sock %p", sk);
3597 
3598 	hci_dev_lock(hdev);
3599 
3600 	if (!hdev_is_powered(hdev)) {
3601 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3602 				      MGMT_STATUS_NOT_POWERED);
3603 		goto unlock;
3604 	}
3605 
3606 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3607 	if (!cmd) {
3608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3609 				      MGMT_STATUS_INVALID_PARAMS);
3610 		goto unlock;
3611 	}
3612 
3613 	conn = cmd->user_data;
3614 
3615 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3616 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3617 				      MGMT_STATUS_INVALID_PARAMS);
3618 		goto unlock;
3619 	}
3620 
3621 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3622 	mgmt_pending_remove(cmd);
3623 
3624 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3625 				addr, sizeof(*addr));
3626 
3627 	/* Since user doesn't want to proceed with the connection, abort any
3628 	 * ongoing pairing and then terminate the link if it was created
3629 	 * because of the pair device action.
3630 	 */
3631 	if (addr->type == BDADDR_BREDR)
3632 		hci_remove_link_key(hdev, &addr->bdaddr);
3633 	else
3634 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3635 					      le_addr_type(addr->type));
3636 
3637 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3638 		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3639 				   NULL);
3640 
3641 unlock:
3642 	hci_dev_unlock(hdev);
3643 	return err;
3644 }
3645 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3646 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3647 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3648 			     u16 hci_op, __le32 passkey)
3649 {
3650 	struct mgmt_pending_cmd *cmd;
3651 	struct hci_conn *conn;
3652 	int err;
3653 
3654 	hci_dev_lock(hdev);
3655 
3656 	if (!hdev_is_powered(hdev)) {
3657 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3658 					MGMT_STATUS_NOT_POWERED, addr,
3659 					sizeof(*addr));
3660 		goto done;
3661 	}
3662 
3663 	if (addr->type == BDADDR_BREDR)
3664 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3665 	else
3666 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3667 					       le_addr_type(addr->type));
3668 
3669 	if (!conn) {
3670 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3671 					MGMT_STATUS_NOT_CONNECTED, addr,
3672 					sizeof(*addr));
3673 		goto done;
3674 	}
3675 
3676 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3677 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3678 		if (!err)
3679 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3680 						MGMT_STATUS_SUCCESS, addr,
3681 						sizeof(*addr));
3682 		else
3683 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3684 						MGMT_STATUS_FAILED, addr,
3685 						sizeof(*addr));
3686 
3687 		goto done;
3688 	}
3689 
3690 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3691 	if (!cmd) {
3692 		err = -ENOMEM;
3693 		goto done;
3694 	}
3695 
3696 	cmd->cmd_complete = addr_cmd_complete;
3697 
3698 	/* Continue with pairing via HCI */
3699 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3700 		struct hci_cp_user_passkey_reply cp;
3701 
3702 		bacpy(&cp.bdaddr, &addr->bdaddr);
3703 		cp.passkey = passkey;
3704 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3705 	} else
3706 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3707 				   &addr->bdaddr);
3708 
3709 	if (err < 0)
3710 		mgmt_pending_remove(cmd);
3711 
3712 done:
3713 	hci_dev_unlock(hdev);
3714 	return err;
3715 }
3716 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3717 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3718 			      void *data, u16 len)
3719 {
3720 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3721 
3722 	bt_dev_dbg(hdev, "sock %p", sk);
3723 
3724 	return user_pairing_resp(sk, hdev, &cp->addr,
3725 				MGMT_OP_PIN_CODE_NEG_REPLY,
3726 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3727 }
3728 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3729 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3730 			      u16 len)
3731 {
3732 	struct mgmt_cp_user_confirm_reply *cp = data;
3733 
3734 	bt_dev_dbg(hdev, "sock %p", sk);
3735 
3736 	if (len != sizeof(*cp))
3737 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3738 				       MGMT_STATUS_INVALID_PARAMS);
3739 
3740 	return user_pairing_resp(sk, hdev, &cp->addr,
3741 				 MGMT_OP_USER_CONFIRM_REPLY,
3742 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3743 }
3744 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3745 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3746 				  void *data, u16 len)
3747 {
3748 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3749 
3750 	bt_dev_dbg(hdev, "sock %p", sk);
3751 
3752 	return user_pairing_resp(sk, hdev, &cp->addr,
3753 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3754 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3755 }
3756 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3757 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3758 			      u16 len)
3759 {
3760 	struct mgmt_cp_user_passkey_reply *cp = data;
3761 
3762 	bt_dev_dbg(hdev, "sock %p", sk);
3763 
3764 	return user_pairing_resp(sk, hdev, &cp->addr,
3765 				 MGMT_OP_USER_PASSKEY_REPLY,
3766 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3767 }
3768 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3769 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3770 				  void *data, u16 len)
3771 {
3772 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3773 
3774 	bt_dev_dbg(hdev, "sock %p", sk);
3775 
3776 	return user_pairing_resp(sk, hdev, &cp->addr,
3777 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3778 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3779 }
3780 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3781 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3782 {
3783 	struct adv_info *adv_instance;
3784 
3785 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3786 	if (!adv_instance)
3787 		return 0;
3788 
3789 	/* stop if current instance doesn't need to be changed */
3790 	if (!(adv_instance->flags & flags))
3791 		return 0;
3792 
3793 	cancel_adv_timeout(hdev);
3794 
3795 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3796 	if (!adv_instance)
3797 		return 0;
3798 
3799 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3800 
3801 	return 0;
3802 }
3803 
name_changed_sync(struct hci_dev * hdev,void * data)3804 static int name_changed_sync(struct hci_dev *hdev, void *data)
3805 {
3806 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3807 }
3808 
set_name_complete(struct hci_dev * hdev,void * data,int err)3809 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3810 {
3811 	struct mgmt_pending_cmd *cmd = data;
3812 	struct mgmt_cp_set_local_name *cp = cmd->param;
3813 	u8 status = mgmt_status(err);
3814 
3815 	bt_dev_dbg(hdev, "err %d", err);
3816 
3817 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3818 		return;
3819 
3820 	if (status) {
3821 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3822 				status);
3823 	} else {
3824 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3825 				  cp, sizeof(*cp));
3826 
3827 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3828 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3829 	}
3830 
3831 	mgmt_pending_remove(cmd);
3832 }
3833 
set_name_sync(struct hci_dev * hdev,void * data)3834 static int set_name_sync(struct hci_dev *hdev, void *data)
3835 {
3836 	if (lmp_bredr_capable(hdev)) {
3837 		hci_update_name_sync(hdev);
3838 		hci_update_eir_sync(hdev);
3839 	}
3840 
3841 	/* The name is stored in the scan response data and so
3842 	 * no need to update the advertising data here.
3843 	 */
3844 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3845 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3846 
3847 	return 0;
3848 }
3849 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3850 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3851 			  u16 len)
3852 {
3853 	struct mgmt_cp_set_local_name *cp = data;
3854 	struct mgmt_pending_cmd *cmd;
3855 	int err;
3856 
3857 	bt_dev_dbg(hdev, "sock %p", sk);
3858 
3859 	hci_dev_lock(hdev);
3860 
3861 	/* If the old values are the same as the new ones just return a
3862 	 * direct command complete event.
3863 	 */
3864 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3865 	    !memcmp(hdev->short_name, cp->short_name,
3866 		    sizeof(hdev->short_name))) {
3867 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3868 					data, len);
3869 		goto failed;
3870 	}
3871 
3872 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3873 
3874 	if (!hdev_is_powered(hdev)) {
3875 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3876 
3877 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3878 					data, len);
3879 		if (err < 0)
3880 			goto failed;
3881 
3882 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3883 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3884 		ext_info_changed(hdev, sk);
3885 
3886 		goto failed;
3887 	}
3888 
3889 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3890 	if (!cmd)
3891 		err = -ENOMEM;
3892 	else
3893 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3894 					 set_name_complete);
3895 
3896 	if (err < 0) {
3897 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3898 				      MGMT_STATUS_FAILED);
3899 
3900 		if (cmd)
3901 			mgmt_pending_remove(cmd);
3902 
3903 		goto failed;
3904 	}
3905 
3906 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3907 
3908 failed:
3909 	hci_dev_unlock(hdev);
3910 	return err;
3911 }
3912 
appearance_changed_sync(struct hci_dev * hdev,void * data)3913 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3914 {
3915 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3916 }
3917 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3918 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3919 			  u16 len)
3920 {
3921 	struct mgmt_cp_set_appearance *cp = data;
3922 	u16 appearance;
3923 	int err;
3924 
3925 	bt_dev_dbg(hdev, "sock %p", sk);
3926 
3927 	if (!lmp_le_capable(hdev))
3928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3929 				       MGMT_STATUS_NOT_SUPPORTED);
3930 
3931 	appearance = le16_to_cpu(cp->appearance);
3932 
3933 	hci_dev_lock(hdev);
3934 
3935 	if (hdev->appearance != appearance) {
3936 		hdev->appearance = appearance;
3937 
3938 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3939 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3940 					   NULL);
3941 
3942 		ext_info_changed(hdev, sk);
3943 	}
3944 
3945 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3946 				0);
3947 
3948 	hci_dev_unlock(hdev);
3949 
3950 	return err;
3951 }
3952 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3953 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3954 				 void *data, u16 len)
3955 {
3956 	struct mgmt_rp_get_phy_configuration rp;
3957 
3958 	bt_dev_dbg(hdev, "sock %p", sk);
3959 
3960 	hci_dev_lock(hdev);
3961 
3962 	memset(&rp, 0, sizeof(rp));
3963 
3964 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3965 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3966 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3967 
3968 	hci_dev_unlock(hdev);
3969 
3970 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3971 				 &rp, sizeof(rp));
3972 }
3973 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3974 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3975 {
3976 	struct mgmt_ev_phy_configuration_changed ev;
3977 
3978 	memset(&ev, 0, sizeof(ev));
3979 
3980 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3981 
3982 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3983 			  sizeof(ev), skip);
3984 }
3985 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3986 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3987 {
3988 	struct mgmt_pending_cmd *cmd = data;
3989 	struct sk_buff *skb = cmd->skb;
3990 	u8 status = mgmt_status(err);
3991 
3992 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3993 		return;
3994 
3995 	if (!status) {
3996 		if (!skb)
3997 			status = MGMT_STATUS_FAILED;
3998 		else if (IS_ERR(skb))
3999 			status = mgmt_status(PTR_ERR(skb));
4000 		else
4001 			status = mgmt_status(skb->data[0]);
4002 	}
4003 
4004 	bt_dev_dbg(hdev, "status %d", status);
4005 
4006 	if (status) {
4007 		mgmt_cmd_status(cmd->sk, hdev->id,
4008 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4009 	} else {
4010 		mgmt_cmd_complete(cmd->sk, hdev->id,
4011 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4012 				  NULL, 0);
4013 
4014 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4015 	}
4016 
4017 	if (skb && !IS_ERR(skb))
4018 		kfree_skb(skb);
4019 
4020 	mgmt_pending_remove(cmd);
4021 }
4022 
set_default_phy_sync(struct hci_dev * hdev,void * data)4023 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4024 {
4025 	struct mgmt_pending_cmd *cmd = data;
4026 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4027 	struct hci_cp_le_set_default_phy cp_phy;
4028 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4029 
4030 	memset(&cp_phy, 0, sizeof(cp_phy));
4031 
4032 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4033 		cp_phy.all_phys |= 0x01;
4034 
4035 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4036 		cp_phy.all_phys |= 0x02;
4037 
4038 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4039 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4040 
4041 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4042 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4043 
4044 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4045 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4046 
4047 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4048 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4049 
4050 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4051 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4052 
4053 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4054 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4055 
4056 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4057 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4058 
4059 	return 0;
4060 }
4061 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4062 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4063 				 void *data, u16 len)
4064 {
4065 	struct mgmt_cp_set_phy_configuration *cp = data;
4066 	struct mgmt_pending_cmd *cmd;
4067 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4068 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4069 	bool changed = false;
4070 	int err;
4071 
4072 	bt_dev_dbg(hdev, "sock %p", sk);
4073 
4074 	configurable_phys = get_configurable_phys(hdev);
4075 	supported_phys = get_supported_phys(hdev);
4076 	selected_phys = __le32_to_cpu(cp->selected_phys);
4077 
4078 	if (selected_phys & ~supported_phys)
4079 		return mgmt_cmd_status(sk, hdev->id,
4080 				       MGMT_OP_SET_PHY_CONFIGURATION,
4081 				       MGMT_STATUS_INVALID_PARAMS);
4082 
4083 	unconfigure_phys = supported_phys & ~configurable_phys;
4084 
4085 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4086 		return mgmt_cmd_status(sk, hdev->id,
4087 				       MGMT_OP_SET_PHY_CONFIGURATION,
4088 				       MGMT_STATUS_INVALID_PARAMS);
4089 
4090 	if (selected_phys == get_selected_phys(hdev))
4091 		return mgmt_cmd_complete(sk, hdev->id,
4092 					 MGMT_OP_SET_PHY_CONFIGURATION,
4093 					 0, NULL, 0);
4094 
4095 	hci_dev_lock(hdev);
4096 
4097 	if (!hdev_is_powered(hdev)) {
4098 		err = mgmt_cmd_status(sk, hdev->id,
4099 				      MGMT_OP_SET_PHY_CONFIGURATION,
4100 				      MGMT_STATUS_REJECTED);
4101 		goto unlock;
4102 	}
4103 
4104 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4105 		err = mgmt_cmd_status(sk, hdev->id,
4106 				      MGMT_OP_SET_PHY_CONFIGURATION,
4107 				      MGMT_STATUS_BUSY);
4108 		goto unlock;
4109 	}
4110 
4111 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4112 		pkt_type |= (HCI_DH3 | HCI_DM3);
4113 	else
4114 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4115 
4116 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4117 		pkt_type |= (HCI_DH5 | HCI_DM5);
4118 	else
4119 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4120 
4121 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4122 		pkt_type &= ~HCI_2DH1;
4123 	else
4124 		pkt_type |= HCI_2DH1;
4125 
4126 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4127 		pkt_type &= ~HCI_2DH3;
4128 	else
4129 		pkt_type |= HCI_2DH3;
4130 
4131 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4132 		pkt_type &= ~HCI_2DH5;
4133 	else
4134 		pkt_type |= HCI_2DH5;
4135 
4136 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4137 		pkt_type &= ~HCI_3DH1;
4138 	else
4139 		pkt_type |= HCI_3DH1;
4140 
4141 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4142 		pkt_type &= ~HCI_3DH3;
4143 	else
4144 		pkt_type |= HCI_3DH3;
4145 
4146 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4147 		pkt_type &= ~HCI_3DH5;
4148 	else
4149 		pkt_type |= HCI_3DH5;
4150 
4151 	if (pkt_type != hdev->pkt_type) {
4152 		hdev->pkt_type = pkt_type;
4153 		changed = true;
4154 	}
4155 
4156 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4157 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4158 		if (changed)
4159 			mgmt_phy_configuration_changed(hdev, sk);
4160 
4161 		err = mgmt_cmd_complete(sk, hdev->id,
4162 					MGMT_OP_SET_PHY_CONFIGURATION,
4163 					0, NULL, 0);
4164 
4165 		goto unlock;
4166 	}
4167 
4168 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4169 			       len);
4170 	if (!cmd)
4171 		err = -ENOMEM;
4172 	else
4173 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4174 					 set_default_phy_complete);
4175 
4176 	if (err < 0) {
4177 		err = mgmt_cmd_status(sk, hdev->id,
4178 				      MGMT_OP_SET_PHY_CONFIGURATION,
4179 				      MGMT_STATUS_FAILED);
4180 
4181 		if (cmd)
4182 			mgmt_pending_remove(cmd);
4183 	}
4184 
4185 unlock:
4186 	hci_dev_unlock(hdev);
4187 
4188 	return err;
4189 }
4190 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4191 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4192 			    u16 len)
4193 {
4194 	int err = MGMT_STATUS_SUCCESS;
4195 	struct mgmt_cp_set_blocked_keys *keys = data;
4196 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4197 				   sizeof(struct mgmt_blocked_key_info));
4198 	u16 key_count, expected_len;
4199 	int i;
4200 
4201 	bt_dev_dbg(hdev, "sock %p", sk);
4202 
4203 	key_count = __le16_to_cpu(keys->key_count);
4204 	if (key_count > max_key_count) {
4205 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4206 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4207 				       MGMT_STATUS_INVALID_PARAMS);
4208 	}
4209 
4210 	expected_len = struct_size(keys, keys, key_count);
4211 	if (expected_len != len) {
4212 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4213 			   expected_len, len);
4214 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4215 				       MGMT_STATUS_INVALID_PARAMS);
4216 	}
4217 
4218 	hci_dev_lock(hdev);
4219 
4220 	hci_blocked_keys_clear(hdev);
4221 
4222 	for (i = 0; i < key_count; ++i) {
4223 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4224 
4225 		if (!b) {
4226 			err = MGMT_STATUS_NO_RESOURCES;
4227 			break;
4228 		}
4229 
4230 		b->type = keys->keys[i].type;
4231 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4232 		list_add_rcu(&b->list, &hdev->blocked_keys);
4233 	}
4234 	hci_dev_unlock(hdev);
4235 
4236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4237 				err, NULL, 0);
4238 }
4239 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4240 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4241 			       void *data, u16 len)
4242 {
4243 	struct mgmt_mode *cp = data;
4244 	int err;
4245 	bool changed = false;
4246 
4247 	bt_dev_dbg(hdev, "sock %p", sk);
4248 
4249 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4250 		return mgmt_cmd_status(sk, hdev->id,
4251 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4252 				       MGMT_STATUS_NOT_SUPPORTED);
4253 
4254 	if (cp->val != 0x00 && cp->val != 0x01)
4255 		return mgmt_cmd_status(sk, hdev->id,
4256 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4257 				       MGMT_STATUS_INVALID_PARAMS);
4258 
4259 	hci_dev_lock(hdev);
4260 
4261 	if (hdev_is_powered(hdev) &&
4262 	    !!cp->val != hci_dev_test_flag(hdev,
4263 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4264 		err = mgmt_cmd_status(sk, hdev->id,
4265 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4266 				      MGMT_STATUS_REJECTED);
4267 		goto unlock;
4268 	}
4269 
4270 	if (cp->val)
4271 		changed = !hci_dev_test_and_set_flag(hdev,
4272 						   HCI_WIDEBAND_SPEECH_ENABLED);
4273 	else
4274 		changed = hci_dev_test_and_clear_flag(hdev,
4275 						   HCI_WIDEBAND_SPEECH_ENABLED);
4276 
4277 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4278 	if (err < 0)
4279 		goto unlock;
4280 
4281 	if (changed)
4282 		err = new_settings(hdev, sk);
4283 
4284 unlock:
4285 	hci_dev_unlock(hdev);
4286 	return err;
4287 }
4288 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4289 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4290 			       void *data, u16 data_len)
4291 {
4292 	char buf[20];
4293 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4294 	u16 cap_len = 0;
4295 	u8 flags = 0;
4296 	u8 tx_power_range[2];
4297 
4298 	bt_dev_dbg(hdev, "sock %p", sk);
4299 
4300 	memset(&buf, 0, sizeof(buf));
4301 
4302 	hci_dev_lock(hdev);
4303 
4304 	/* When the Read Simple Pairing Options command is supported, then
4305 	 * the remote public key validation is supported.
4306 	 *
4307 	 * Alternatively, when Microsoft extensions are available, they can
4308 	 * indicate support for public key validation as well.
4309 	 */
4310 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4311 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4312 
4313 	flags |= 0x02;		/* Remote public key validation (LE) */
4314 
4315 	/* When the Read Encryption Key Size command is supported, then the
4316 	 * encryption key size is enforced.
4317 	 */
4318 	if (hdev->commands[20] & 0x10)
4319 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4320 
4321 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4322 
4323 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4324 				  &flags, 1);
4325 
4326 	/* When the Read Simple Pairing Options command is supported, then
4327 	 * also max encryption key size information is provided.
4328 	 */
4329 	if (hdev->commands[41] & 0x08)
4330 		cap_len = eir_append_le16(rp->cap, cap_len,
4331 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4332 					  hdev->max_enc_key_size);
4333 
4334 	cap_len = eir_append_le16(rp->cap, cap_len,
4335 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4336 				  SMP_MAX_ENC_KEY_SIZE);
4337 
4338 	/* Append the min/max LE tx power parameters if we were able to fetch
4339 	 * it from the controller
4340 	 */
4341 	if (hdev->commands[38] & 0x80) {
4342 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4343 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4344 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4345 					  tx_power_range, 2);
4346 	}
4347 
4348 	rp->cap_len = cpu_to_le16(cap_len);
4349 
4350 	hci_dev_unlock(hdev);
4351 
4352 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4353 				 rp, sizeof(*rp) + cap_len);
4354 }
4355 
4356 #ifdef CONFIG_BT_FEATURE_DEBUG
4357 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4358 static const u8 debug_uuid[16] = {
4359 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4360 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4361 };
4362 #endif
4363 
4364 /* 330859bc-7506-492d-9370-9a6f0614037f */
4365 static const u8 quality_report_uuid[16] = {
4366 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4367 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4368 };
4369 
4370 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4371 static const u8 offload_codecs_uuid[16] = {
4372 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4373 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4374 };
4375 
4376 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4377 static const u8 le_simultaneous_roles_uuid[16] = {
4378 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4379 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4380 };
4381 
4382 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4383 static const u8 rpa_resolution_uuid[16] = {
4384 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4385 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4386 };
4387 
4388 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4389 static const u8 iso_socket_uuid[16] = {
4390 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4391 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4392 };
4393 
4394 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4395 static const u8 mgmt_mesh_uuid[16] = {
4396 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4397 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4398 };
4399 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4400 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4401 				  void *data, u16 data_len)
4402 {
4403 	struct mgmt_rp_read_exp_features_info *rp;
4404 	size_t len;
4405 	u16 idx = 0;
4406 	u32 flags;
4407 	int status;
4408 
4409 	bt_dev_dbg(hdev, "sock %p", sk);
4410 
4411 	/* Enough space for 7 features */
4412 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4413 	rp = kzalloc(len, GFP_KERNEL);
4414 	if (!rp)
4415 		return -ENOMEM;
4416 
4417 #ifdef CONFIG_BT_FEATURE_DEBUG
4418 	if (!hdev) {
4419 		flags = bt_dbg_get() ? BIT(0) : 0;
4420 
4421 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4422 		rp->features[idx].flags = cpu_to_le32(flags);
4423 		idx++;
4424 	}
4425 #endif
4426 
4427 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4428 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4429 			flags = BIT(0);
4430 		else
4431 			flags = 0;
4432 
4433 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4434 		rp->features[idx].flags = cpu_to_le32(flags);
4435 		idx++;
4436 	}
4437 
4438 	if (hdev && ll_privacy_capable(hdev)) {
4439 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4440 			flags = BIT(0) | BIT(1);
4441 		else
4442 			flags = BIT(1);
4443 
4444 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4445 		rp->features[idx].flags = cpu_to_le32(flags);
4446 		idx++;
4447 	}
4448 
4449 	if (hdev && (aosp_has_quality_report(hdev) ||
4450 		     hdev->set_quality_report)) {
4451 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4452 			flags = BIT(0);
4453 		else
4454 			flags = 0;
4455 
4456 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4457 		rp->features[idx].flags = cpu_to_le32(flags);
4458 		idx++;
4459 	}
4460 
4461 	if (hdev && hdev->get_data_path_id) {
4462 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4463 			flags = BIT(0);
4464 		else
4465 			flags = 0;
4466 
4467 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4468 		rp->features[idx].flags = cpu_to_le32(flags);
4469 		idx++;
4470 	}
4471 
4472 	if (IS_ENABLED(CONFIG_BT_LE)) {
4473 		flags = iso_enabled() ? BIT(0) : 0;
4474 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4475 		rp->features[idx].flags = cpu_to_le32(flags);
4476 		idx++;
4477 	}
4478 
4479 	if (hdev && lmp_le_capable(hdev)) {
4480 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4481 			flags = BIT(0);
4482 		else
4483 			flags = 0;
4484 
4485 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4486 		rp->features[idx].flags = cpu_to_le32(flags);
4487 		idx++;
4488 	}
4489 
4490 	rp->feature_count = cpu_to_le16(idx);
4491 
4492 	/* After reading the experimental features information, enable
4493 	 * the events to update client on any future change.
4494 	 */
4495 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4496 
4497 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4498 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4499 				   0, rp, sizeof(*rp) + (20 * idx));
4500 
4501 	kfree(rp);
4502 	return status;
4503 }
4504 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4505 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4506 					  struct sock *skip)
4507 {
4508 	struct mgmt_ev_exp_feature_changed ev;
4509 
4510 	memset(&ev, 0, sizeof(ev));
4511 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4512 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4513 
4514 	// Do we need to be atomic with the conn_flags?
4515 	if (enabled && privacy_mode_capable(hdev))
4516 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4517 	else
4518 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4519 
4520 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4521 				  &ev, sizeof(ev),
4522 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4523 
4524 }
4525 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4526 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4527 			       bool enabled, struct sock *skip)
4528 {
4529 	struct mgmt_ev_exp_feature_changed ev;
4530 
4531 	memset(&ev, 0, sizeof(ev));
4532 	memcpy(ev.uuid, uuid, 16);
4533 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4534 
4535 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4536 				  &ev, sizeof(ev),
4537 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4538 }
4539 
4540 #define EXP_FEAT(_uuid, _set_func)	\
4541 {					\
4542 	.uuid = _uuid,			\
4543 	.set_func = _set_func,		\
4544 }
4545 
4546 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4547 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4548 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4549 {
4550 	struct mgmt_rp_set_exp_feature rp;
4551 
4552 	memset(rp.uuid, 0, 16);
4553 	rp.flags = cpu_to_le32(0);
4554 
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
4556 	if (!hdev) {
4557 		bool changed = bt_dbg_get();
4558 
4559 		bt_dbg_set(false);
4560 
4561 		if (changed)
4562 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4563 	}
4564 #endif
4565 
4566 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4567 		bool changed;
4568 
4569 		changed = hci_dev_test_and_clear_flag(hdev,
4570 						      HCI_ENABLE_LL_PRIVACY);
4571 		if (changed)
4572 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4573 					    sk);
4574 	}
4575 
4576 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4577 
4578 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4579 				 MGMT_OP_SET_EXP_FEATURE, 0,
4580 				 &rp, sizeof(rp));
4581 }
4582 
4583 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4584 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4585 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4586 {
4587 	struct mgmt_rp_set_exp_feature rp;
4588 
4589 	bool val, changed;
4590 	int err;
4591 
4592 	/* Command requires to use the non-controller index */
4593 	if (hdev)
4594 		return mgmt_cmd_status(sk, hdev->id,
4595 				       MGMT_OP_SET_EXP_FEATURE,
4596 				       MGMT_STATUS_INVALID_INDEX);
4597 
4598 	/* Parameters are limited to a single octet */
4599 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4600 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4601 				       MGMT_OP_SET_EXP_FEATURE,
4602 				       MGMT_STATUS_INVALID_PARAMS);
4603 
4604 	/* Only boolean on/off is supported */
4605 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4606 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4607 				       MGMT_OP_SET_EXP_FEATURE,
4608 				       MGMT_STATUS_INVALID_PARAMS);
4609 
4610 	val = !!cp->param[0];
4611 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4612 	bt_dbg_set(val);
4613 
4614 	memcpy(rp.uuid, debug_uuid, 16);
4615 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4616 
4617 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4618 
4619 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4620 				MGMT_OP_SET_EXP_FEATURE, 0,
4621 				&rp, sizeof(rp));
4622 
4623 	if (changed)
4624 		exp_feature_changed(hdev, debug_uuid, val, sk);
4625 
4626 	return err;
4627 }
4628 #endif
4629 
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4630 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4631 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4632 {
4633 	struct mgmt_rp_set_exp_feature rp;
4634 	bool val, changed;
4635 	int err;
4636 
4637 	/* Command requires to use the controller index */
4638 	if (!hdev)
4639 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4640 				       MGMT_OP_SET_EXP_FEATURE,
4641 				       MGMT_STATUS_INVALID_INDEX);
4642 
4643 	/* Parameters are limited to a single octet */
4644 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4645 		return mgmt_cmd_status(sk, hdev->id,
4646 				       MGMT_OP_SET_EXP_FEATURE,
4647 				       MGMT_STATUS_INVALID_PARAMS);
4648 
4649 	/* Only boolean on/off is supported */
4650 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4651 		return mgmt_cmd_status(sk, hdev->id,
4652 				       MGMT_OP_SET_EXP_FEATURE,
4653 				       MGMT_STATUS_INVALID_PARAMS);
4654 
4655 	val = !!cp->param[0];
4656 
4657 	if (val) {
4658 		changed = !hci_dev_test_and_set_flag(hdev,
4659 						     HCI_MESH_EXPERIMENTAL);
4660 	} else {
4661 		hci_dev_clear_flag(hdev, HCI_MESH);
4662 		changed = hci_dev_test_and_clear_flag(hdev,
4663 						      HCI_MESH_EXPERIMENTAL);
4664 	}
4665 
4666 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4667 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4668 
4669 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4670 
4671 	err = mgmt_cmd_complete(sk, hdev->id,
4672 				MGMT_OP_SET_EXP_FEATURE, 0,
4673 				&rp, sizeof(rp));
4674 
4675 	if (changed)
4676 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4677 
4678 	return err;
4679 }
4680 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4681 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4682 				   struct mgmt_cp_set_exp_feature *cp,
4683 				   u16 data_len)
4684 {
4685 	struct mgmt_rp_set_exp_feature rp;
4686 	bool val, changed;
4687 	int err;
4688 	u32 flags;
4689 
4690 	/* Command requires to use the controller index */
4691 	if (!hdev)
4692 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4693 				       MGMT_OP_SET_EXP_FEATURE,
4694 				       MGMT_STATUS_INVALID_INDEX);
4695 
4696 	/* Changes can only be made when controller is powered down */
4697 	if (hdev_is_powered(hdev))
4698 		return mgmt_cmd_status(sk, hdev->id,
4699 				       MGMT_OP_SET_EXP_FEATURE,
4700 				       MGMT_STATUS_REJECTED);
4701 
4702 	/* Parameters are limited to a single octet */
4703 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4704 		return mgmt_cmd_status(sk, hdev->id,
4705 				       MGMT_OP_SET_EXP_FEATURE,
4706 				       MGMT_STATUS_INVALID_PARAMS);
4707 
4708 	/* Only boolean on/off is supported */
4709 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4710 		return mgmt_cmd_status(sk, hdev->id,
4711 				       MGMT_OP_SET_EXP_FEATURE,
4712 				       MGMT_STATUS_INVALID_PARAMS);
4713 
4714 	val = !!cp->param[0];
4715 
4716 	if (val) {
4717 		changed = !hci_dev_test_and_set_flag(hdev,
4718 						     HCI_ENABLE_LL_PRIVACY);
4719 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4720 
4721 		/* Enable LL privacy + supported settings changed */
4722 		flags = BIT(0) | BIT(1);
4723 	} else {
4724 		changed = hci_dev_test_and_clear_flag(hdev,
4725 						      HCI_ENABLE_LL_PRIVACY);
4726 
4727 		/* Disable LL privacy + supported settings changed */
4728 		flags = BIT(1);
4729 	}
4730 
4731 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4732 	rp.flags = cpu_to_le32(flags);
4733 
4734 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4735 
4736 	err = mgmt_cmd_complete(sk, hdev->id,
4737 				MGMT_OP_SET_EXP_FEATURE, 0,
4738 				&rp, sizeof(rp));
4739 
4740 	if (changed)
4741 		exp_ll_privacy_feature_changed(val, hdev, sk);
4742 
4743 	return err;
4744 }
4745 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4746 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4747 				   struct mgmt_cp_set_exp_feature *cp,
4748 				   u16 data_len)
4749 {
4750 	struct mgmt_rp_set_exp_feature rp;
4751 	bool val, changed;
4752 	int err;
4753 
4754 	/* Command requires to use a valid controller index */
4755 	if (!hdev)
4756 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4757 				       MGMT_OP_SET_EXP_FEATURE,
4758 				       MGMT_STATUS_INVALID_INDEX);
4759 
4760 	/* Parameters are limited to a single octet */
4761 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4762 		return mgmt_cmd_status(sk, hdev->id,
4763 				       MGMT_OP_SET_EXP_FEATURE,
4764 				       MGMT_STATUS_INVALID_PARAMS);
4765 
4766 	/* Only boolean on/off is supported */
4767 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4768 		return mgmt_cmd_status(sk, hdev->id,
4769 				       MGMT_OP_SET_EXP_FEATURE,
4770 				       MGMT_STATUS_INVALID_PARAMS);
4771 
4772 	hci_req_sync_lock(hdev);
4773 
4774 	val = !!cp->param[0];
4775 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4776 
4777 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4778 		err = mgmt_cmd_status(sk, hdev->id,
4779 				      MGMT_OP_SET_EXP_FEATURE,
4780 				      MGMT_STATUS_NOT_SUPPORTED);
4781 		goto unlock_quality_report;
4782 	}
4783 
4784 	if (changed) {
4785 		if (hdev->set_quality_report)
4786 			err = hdev->set_quality_report(hdev, val);
4787 		else
4788 			err = aosp_set_quality_report(hdev, val);
4789 
4790 		if (err) {
4791 			err = mgmt_cmd_status(sk, hdev->id,
4792 					      MGMT_OP_SET_EXP_FEATURE,
4793 					      MGMT_STATUS_FAILED);
4794 			goto unlock_quality_report;
4795 		}
4796 
4797 		if (val)
4798 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4799 		else
4800 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4801 	}
4802 
4803 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4804 
4805 	memcpy(rp.uuid, quality_report_uuid, 16);
4806 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4807 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4808 
4809 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4810 				&rp, sizeof(rp));
4811 
4812 	if (changed)
4813 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4814 
4815 unlock_quality_report:
4816 	hci_req_sync_unlock(hdev);
4817 	return err;
4818 }
4819 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4820 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4821 				  struct mgmt_cp_set_exp_feature *cp,
4822 				  u16 data_len)
4823 {
4824 	bool val, changed;
4825 	int err;
4826 	struct mgmt_rp_set_exp_feature rp;
4827 
4828 	/* Command requires to use a valid controller index */
4829 	if (!hdev)
4830 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4831 				       MGMT_OP_SET_EXP_FEATURE,
4832 				       MGMT_STATUS_INVALID_INDEX);
4833 
4834 	/* Parameters are limited to a single octet */
4835 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4836 		return mgmt_cmd_status(sk, hdev->id,
4837 				       MGMT_OP_SET_EXP_FEATURE,
4838 				       MGMT_STATUS_INVALID_PARAMS);
4839 
4840 	/* Only boolean on/off is supported */
4841 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4842 		return mgmt_cmd_status(sk, hdev->id,
4843 				       MGMT_OP_SET_EXP_FEATURE,
4844 				       MGMT_STATUS_INVALID_PARAMS);
4845 
4846 	val = !!cp->param[0];
4847 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4848 
4849 	if (!hdev->get_data_path_id) {
4850 		return mgmt_cmd_status(sk, hdev->id,
4851 				       MGMT_OP_SET_EXP_FEATURE,
4852 				       MGMT_STATUS_NOT_SUPPORTED);
4853 	}
4854 
4855 	if (changed) {
4856 		if (val)
4857 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4858 		else
4859 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4860 	}
4861 
4862 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4863 		    val, changed);
4864 
4865 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4866 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4867 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4868 	err = mgmt_cmd_complete(sk, hdev->id,
4869 				MGMT_OP_SET_EXP_FEATURE, 0,
4870 				&rp, sizeof(rp));
4871 
4872 	if (changed)
4873 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4874 
4875 	return err;
4876 }
4877 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4878 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4879 					  struct mgmt_cp_set_exp_feature *cp,
4880 					  u16 data_len)
4881 {
4882 	bool val, changed;
4883 	int err;
4884 	struct mgmt_rp_set_exp_feature rp;
4885 
4886 	/* Command requires to use a valid controller index */
4887 	if (!hdev)
4888 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4889 				       MGMT_OP_SET_EXP_FEATURE,
4890 				       MGMT_STATUS_INVALID_INDEX);
4891 
4892 	/* Parameters are limited to a single octet */
4893 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4894 		return mgmt_cmd_status(sk, hdev->id,
4895 				       MGMT_OP_SET_EXP_FEATURE,
4896 				       MGMT_STATUS_INVALID_PARAMS);
4897 
4898 	/* Only boolean on/off is supported */
4899 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4900 		return mgmt_cmd_status(sk, hdev->id,
4901 				       MGMT_OP_SET_EXP_FEATURE,
4902 				       MGMT_STATUS_INVALID_PARAMS);
4903 
4904 	val = !!cp->param[0];
4905 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4906 
4907 	if (!hci_dev_le_state_simultaneous(hdev)) {
4908 		return mgmt_cmd_status(sk, hdev->id,
4909 				       MGMT_OP_SET_EXP_FEATURE,
4910 				       MGMT_STATUS_NOT_SUPPORTED);
4911 	}
4912 
4913 	if (changed) {
4914 		if (val)
4915 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4916 		else
4917 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4918 	}
4919 
4920 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4921 		    val, changed);
4922 
4923 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4924 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4925 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4926 	err = mgmt_cmd_complete(sk, hdev->id,
4927 				MGMT_OP_SET_EXP_FEATURE, 0,
4928 				&rp, sizeof(rp));
4929 
4930 	if (changed)
4931 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4932 
4933 	return err;
4934 }
4935 
4936 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4937 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4938 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4939 {
4940 	struct mgmt_rp_set_exp_feature rp;
4941 	bool val, changed = false;
4942 	int err;
4943 
4944 	/* Command requires to use the non-controller index */
4945 	if (hdev)
4946 		return mgmt_cmd_status(sk, hdev->id,
4947 				       MGMT_OP_SET_EXP_FEATURE,
4948 				       MGMT_STATUS_INVALID_INDEX);
4949 
4950 	/* Parameters are limited to a single octet */
4951 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4952 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4953 				       MGMT_OP_SET_EXP_FEATURE,
4954 				       MGMT_STATUS_INVALID_PARAMS);
4955 
4956 	/* Only boolean on/off is supported */
4957 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4958 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4959 				       MGMT_OP_SET_EXP_FEATURE,
4960 				       MGMT_STATUS_INVALID_PARAMS);
4961 
4962 	val = cp->param[0] ? true : false;
4963 	if (val)
4964 		err = iso_init();
4965 	else
4966 		err = iso_exit();
4967 
4968 	if (!err)
4969 		changed = true;
4970 
4971 	memcpy(rp.uuid, iso_socket_uuid, 16);
4972 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4973 
4974 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4975 
4976 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4977 				MGMT_OP_SET_EXP_FEATURE, 0,
4978 				&rp, sizeof(rp));
4979 
4980 	if (changed)
4981 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4982 
4983 	return err;
4984 }
4985 #endif
4986 
4987 static const struct mgmt_exp_feature {
4988 	const u8 *uuid;
4989 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4990 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4991 } exp_features[] = {
4992 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4993 #ifdef CONFIG_BT_FEATURE_DEBUG
4994 	EXP_FEAT(debug_uuid, set_debug_func),
4995 #endif
4996 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4997 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4998 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4999 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5000 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5001 #ifdef CONFIG_BT_LE
5002 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5003 #endif
5004 
5005 	/* end with a null feature */
5006 	EXP_FEAT(NULL, NULL)
5007 };
5008 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5009 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5010 			   void *data, u16 data_len)
5011 {
5012 	struct mgmt_cp_set_exp_feature *cp = data;
5013 	size_t i = 0;
5014 
5015 	bt_dev_dbg(hdev, "sock %p", sk);
5016 
5017 	for (i = 0; exp_features[i].uuid; i++) {
5018 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5019 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5020 	}
5021 
5022 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5023 			       MGMT_OP_SET_EXP_FEATURE,
5024 			       MGMT_STATUS_NOT_SUPPORTED);
5025 }
5026 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)5027 static u32 get_params_flags(struct hci_dev *hdev,
5028 			    struct hci_conn_params *params)
5029 {
5030 	u32 flags = hdev->conn_flags;
5031 
5032 	/* Devices using RPAs can only be programmed in the acceptlist if
5033 	 * LL Privacy has been enable otherwise they cannot mark
5034 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5035 	 */
5036 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5037 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5038 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5039 
5040 	return flags;
5041 }
5042 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5043 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5044 			    u16 data_len)
5045 {
5046 	struct mgmt_cp_get_device_flags *cp = data;
5047 	struct mgmt_rp_get_device_flags rp;
5048 	struct bdaddr_list_with_flags *br_params;
5049 	struct hci_conn_params *params;
5050 	u32 supported_flags;
5051 	u32 current_flags = 0;
5052 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5053 
5054 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5055 		   &cp->addr.bdaddr, cp->addr.type);
5056 
5057 	hci_dev_lock(hdev);
5058 
5059 	supported_flags = hdev->conn_flags;
5060 
5061 	memset(&rp, 0, sizeof(rp));
5062 
5063 	if (cp->addr.type == BDADDR_BREDR) {
5064 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5065 							      &cp->addr.bdaddr,
5066 							      cp->addr.type);
5067 		if (!br_params)
5068 			goto done;
5069 
5070 		current_flags = br_params->flags;
5071 	} else {
5072 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5073 						le_addr_type(cp->addr.type));
5074 		if (!params)
5075 			goto done;
5076 
5077 		supported_flags = get_params_flags(hdev, params);
5078 		current_flags = params->flags;
5079 	}
5080 
5081 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5082 	rp.addr.type = cp->addr.type;
5083 	rp.supported_flags = cpu_to_le32(supported_flags);
5084 	rp.current_flags = cpu_to_le32(current_flags);
5085 
5086 	status = MGMT_STATUS_SUCCESS;
5087 
5088 done:
5089 	hci_dev_unlock(hdev);
5090 
5091 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5092 				&rp, sizeof(rp));
5093 }
5094 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5095 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5096 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5097 				 u32 supported_flags, u32 current_flags)
5098 {
5099 	struct mgmt_ev_device_flags_changed ev;
5100 
5101 	bacpy(&ev.addr.bdaddr, bdaddr);
5102 	ev.addr.type = bdaddr_type;
5103 	ev.supported_flags = cpu_to_le32(supported_flags);
5104 	ev.current_flags = cpu_to_le32(current_flags);
5105 
5106 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5107 }
5108 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5109 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5110 			    u16 len)
5111 {
5112 	struct mgmt_cp_set_device_flags *cp = data;
5113 	struct bdaddr_list_with_flags *br_params;
5114 	struct hci_conn_params *params;
5115 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5116 	u32 supported_flags;
5117 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5118 
5119 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5120 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5121 
5122 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5123 	supported_flags = hdev->conn_flags;
5124 
5125 	if ((supported_flags | current_flags) != supported_flags) {
5126 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5127 			    current_flags, supported_flags);
5128 		goto done;
5129 	}
5130 
5131 	hci_dev_lock(hdev);
5132 
5133 	if (cp->addr.type == BDADDR_BREDR) {
5134 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5135 							      &cp->addr.bdaddr,
5136 							      cp->addr.type);
5137 
5138 		if (br_params) {
5139 			br_params->flags = current_flags;
5140 			status = MGMT_STATUS_SUCCESS;
5141 		} else {
5142 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5143 				    &cp->addr.bdaddr, cp->addr.type);
5144 		}
5145 
5146 		goto unlock;
5147 	}
5148 
5149 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5150 					le_addr_type(cp->addr.type));
5151 	if (!params) {
5152 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5153 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5154 		goto unlock;
5155 	}
5156 
5157 	supported_flags = get_params_flags(hdev, params);
5158 
5159 	if ((supported_flags | current_flags) != supported_flags) {
5160 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5161 			    current_flags, supported_flags);
5162 		goto unlock;
5163 	}
5164 
5165 	WRITE_ONCE(params->flags, current_flags);
5166 	status = MGMT_STATUS_SUCCESS;
5167 
5168 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5169 	 * has been set.
5170 	 */
5171 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5172 		hci_update_passive_scan(hdev);
5173 
5174 unlock:
5175 	hci_dev_unlock(hdev);
5176 
5177 done:
5178 	if (status == MGMT_STATUS_SUCCESS)
5179 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5180 				     supported_flags, current_flags);
5181 
5182 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5183 				 &cp->addr, sizeof(cp->addr));
5184 }
5185 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5186 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5187 				   u16 handle)
5188 {
5189 	struct mgmt_ev_adv_monitor_added ev;
5190 
5191 	ev.monitor_handle = cpu_to_le16(handle);
5192 
5193 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5194 }
5195 
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5196 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5197 {
5198 	struct mgmt_ev_adv_monitor_removed ev;
5199 	struct mgmt_pending_cmd *cmd;
5200 	struct sock *sk_skip = NULL;
5201 	struct mgmt_cp_remove_adv_monitor *cp;
5202 
5203 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5204 	if (cmd) {
5205 		cp = cmd->param;
5206 
5207 		if (cp->monitor_handle)
5208 			sk_skip = cmd->sk;
5209 	}
5210 
5211 	ev.monitor_handle = cpu_to_le16(handle);
5212 
5213 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5214 }
5215 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5216 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5217 				 void *data, u16 len)
5218 {
5219 	struct adv_monitor *monitor = NULL;
5220 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5221 	int handle, err;
5222 	size_t rp_size = 0;
5223 	__u32 supported = 0;
5224 	__u32 enabled = 0;
5225 	__u16 num_handles = 0;
5226 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5227 
5228 	BT_DBG("request for %s", hdev->name);
5229 
5230 	hci_dev_lock(hdev);
5231 
5232 	if (msft_monitor_supported(hdev))
5233 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5234 
5235 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5236 		handles[num_handles++] = monitor->handle;
5237 
5238 	hci_dev_unlock(hdev);
5239 
5240 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5241 	rp = kmalloc(rp_size, GFP_KERNEL);
5242 	if (!rp)
5243 		return -ENOMEM;
5244 
5245 	/* All supported features are currently enabled */
5246 	enabled = supported;
5247 
5248 	rp->supported_features = cpu_to_le32(supported);
5249 	rp->enabled_features = cpu_to_le32(enabled);
5250 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5251 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5252 	rp->num_handles = cpu_to_le16(num_handles);
5253 	if (num_handles)
5254 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5255 
5256 	err = mgmt_cmd_complete(sk, hdev->id,
5257 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5258 				MGMT_STATUS_SUCCESS, rp, rp_size);
5259 
5260 	kfree(rp);
5261 
5262 	return err;
5263 }
5264 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5265 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5266 						   void *data, int status)
5267 {
5268 	struct mgmt_rp_add_adv_patterns_monitor rp;
5269 	struct mgmt_pending_cmd *cmd = data;
5270 	struct adv_monitor *monitor = cmd->user_data;
5271 
5272 	hci_dev_lock(hdev);
5273 
5274 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5275 
5276 	if (!status) {
5277 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5278 		hdev->adv_monitors_cnt++;
5279 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5280 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5281 		hci_update_passive_scan(hdev);
5282 	}
5283 
5284 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5285 			  mgmt_status(status), &rp, sizeof(rp));
5286 	mgmt_pending_remove(cmd);
5287 
5288 	hci_dev_unlock(hdev);
5289 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5290 		   rp.monitor_handle, status);
5291 }
5292 
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5293 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5294 {
5295 	struct mgmt_pending_cmd *cmd = data;
5296 	struct adv_monitor *monitor = cmd->user_data;
5297 
5298 	return hci_add_adv_monitor(hdev, monitor);
5299 }
5300 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5301 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5302 				      struct adv_monitor *m, u8 status,
5303 				      void *data, u16 len, u16 op)
5304 {
5305 	struct mgmt_pending_cmd *cmd;
5306 	int err;
5307 
5308 	hci_dev_lock(hdev);
5309 
5310 	if (status)
5311 		goto unlock;
5312 
5313 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5314 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5315 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5316 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5317 		status = MGMT_STATUS_BUSY;
5318 		goto unlock;
5319 	}
5320 
5321 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5322 	if (!cmd) {
5323 		status = MGMT_STATUS_NO_RESOURCES;
5324 		goto unlock;
5325 	}
5326 
5327 	cmd->user_data = m;
5328 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5329 				 mgmt_add_adv_patterns_monitor_complete);
5330 	if (err) {
5331 		if (err == -ENOMEM)
5332 			status = MGMT_STATUS_NO_RESOURCES;
5333 		else
5334 			status = MGMT_STATUS_FAILED;
5335 
5336 		goto unlock;
5337 	}
5338 
5339 	hci_dev_unlock(hdev);
5340 
5341 	return 0;
5342 
5343 unlock:
5344 	hci_free_adv_monitor(hdev, m);
5345 	hci_dev_unlock(hdev);
5346 	return mgmt_cmd_status(sk, hdev->id, op, status);
5347 }
5348 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5349 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5350 				   struct mgmt_adv_rssi_thresholds *rssi)
5351 {
5352 	if (rssi) {
5353 		m->rssi.low_threshold = rssi->low_threshold;
5354 		m->rssi.low_threshold_timeout =
5355 		    __le16_to_cpu(rssi->low_threshold_timeout);
5356 		m->rssi.high_threshold = rssi->high_threshold;
5357 		m->rssi.high_threshold_timeout =
5358 		    __le16_to_cpu(rssi->high_threshold_timeout);
5359 		m->rssi.sampling_period = rssi->sampling_period;
5360 	} else {
5361 		/* Default values. These numbers are the least constricting
5362 		 * parameters for MSFT API to work, so it behaves as if there
5363 		 * are no rssi parameter to consider. May need to be changed
5364 		 * if other API are to be supported.
5365 		 */
5366 		m->rssi.low_threshold = -127;
5367 		m->rssi.low_threshold_timeout = 60;
5368 		m->rssi.high_threshold = -127;
5369 		m->rssi.high_threshold_timeout = 0;
5370 		m->rssi.sampling_period = 0;
5371 	}
5372 }
5373 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5374 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5375 				    struct mgmt_adv_pattern *patterns)
5376 {
5377 	u8 offset = 0, length = 0;
5378 	struct adv_pattern *p = NULL;
5379 	int i;
5380 
5381 	for (i = 0; i < pattern_count; i++) {
5382 		offset = patterns[i].offset;
5383 		length = patterns[i].length;
5384 		if (offset >= HCI_MAX_AD_LENGTH ||
5385 		    length > HCI_MAX_AD_LENGTH ||
5386 		    (offset + length) > HCI_MAX_AD_LENGTH)
5387 			return MGMT_STATUS_INVALID_PARAMS;
5388 
5389 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5390 		if (!p)
5391 			return MGMT_STATUS_NO_RESOURCES;
5392 
5393 		p->ad_type = patterns[i].ad_type;
5394 		p->offset = patterns[i].offset;
5395 		p->length = patterns[i].length;
5396 		memcpy(p->value, patterns[i].value, p->length);
5397 
5398 		INIT_LIST_HEAD(&p->list);
5399 		list_add(&p->list, &m->patterns);
5400 	}
5401 
5402 	return MGMT_STATUS_SUCCESS;
5403 }
5404 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5405 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5406 				    void *data, u16 len)
5407 {
5408 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5409 	struct adv_monitor *m = NULL;
5410 	u8 status = MGMT_STATUS_SUCCESS;
5411 	size_t expected_size = sizeof(*cp);
5412 
5413 	BT_DBG("request for %s", hdev->name);
5414 
5415 	if (len <= sizeof(*cp)) {
5416 		status = MGMT_STATUS_INVALID_PARAMS;
5417 		goto done;
5418 	}
5419 
5420 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5421 	if (len != expected_size) {
5422 		status = MGMT_STATUS_INVALID_PARAMS;
5423 		goto done;
5424 	}
5425 
5426 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5427 	if (!m) {
5428 		status = MGMT_STATUS_NO_RESOURCES;
5429 		goto done;
5430 	}
5431 
5432 	INIT_LIST_HEAD(&m->patterns);
5433 
5434 	parse_adv_monitor_rssi(m, NULL);
5435 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5436 
5437 done:
5438 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5439 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5440 }
5441 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5442 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5443 					 void *data, u16 len)
5444 {
5445 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5446 	struct adv_monitor *m = NULL;
5447 	u8 status = MGMT_STATUS_SUCCESS;
5448 	size_t expected_size = sizeof(*cp);
5449 
5450 	BT_DBG("request for %s", hdev->name);
5451 
5452 	if (len <= sizeof(*cp)) {
5453 		status = MGMT_STATUS_INVALID_PARAMS;
5454 		goto done;
5455 	}
5456 
5457 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5458 	if (len != expected_size) {
5459 		status = MGMT_STATUS_INVALID_PARAMS;
5460 		goto done;
5461 	}
5462 
5463 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5464 	if (!m) {
5465 		status = MGMT_STATUS_NO_RESOURCES;
5466 		goto done;
5467 	}
5468 
5469 	INIT_LIST_HEAD(&m->patterns);
5470 
5471 	parse_adv_monitor_rssi(m, &cp->rssi);
5472 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5473 
5474 done:
5475 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5476 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5477 }
5478 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5479 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5480 					     void *data, int status)
5481 {
5482 	struct mgmt_rp_remove_adv_monitor rp;
5483 	struct mgmt_pending_cmd *cmd = data;
5484 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5485 
5486 	hci_dev_lock(hdev);
5487 
5488 	rp.monitor_handle = cp->monitor_handle;
5489 
5490 	if (!status)
5491 		hci_update_passive_scan(hdev);
5492 
5493 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5494 			  mgmt_status(status), &rp, sizeof(rp));
5495 	mgmt_pending_remove(cmd);
5496 
5497 	hci_dev_unlock(hdev);
5498 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5499 		   rp.monitor_handle, status);
5500 }
5501 
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5502 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5503 {
5504 	struct mgmt_pending_cmd *cmd = data;
5505 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5506 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5507 
5508 	if (!handle)
5509 		return hci_remove_all_adv_monitor(hdev);
5510 
5511 	return hci_remove_single_adv_monitor(hdev, handle);
5512 }
5513 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5514 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5515 			      void *data, u16 len)
5516 {
5517 	struct mgmt_pending_cmd *cmd;
5518 	int err, status;
5519 
5520 	hci_dev_lock(hdev);
5521 
5522 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5523 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5524 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5525 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5526 		status = MGMT_STATUS_BUSY;
5527 		goto unlock;
5528 	}
5529 
5530 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5531 	if (!cmd) {
5532 		status = MGMT_STATUS_NO_RESOURCES;
5533 		goto unlock;
5534 	}
5535 
5536 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5537 				 mgmt_remove_adv_monitor_complete);
5538 
5539 	if (err) {
5540 		mgmt_pending_remove(cmd);
5541 
5542 		if (err == -ENOMEM)
5543 			status = MGMT_STATUS_NO_RESOURCES;
5544 		else
5545 			status = MGMT_STATUS_FAILED;
5546 
5547 		goto unlock;
5548 	}
5549 
5550 	hci_dev_unlock(hdev);
5551 
5552 	return 0;
5553 
5554 unlock:
5555 	hci_dev_unlock(hdev);
5556 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5557 			       status);
5558 }
5559 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5560 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5561 {
5562 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5563 	size_t rp_size = sizeof(mgmt_rp);
5564 	struct mgmt_pending_cmd *cmd = data;
5565 	struct sk_buff *skb = cmd->skb;
5566 	u8 status = mgmt_status(err);
5567 
5568 	if (!status) {
5569 		if (!skb)
5570 			status = MGMT_STATUS_FAILED;
5571 		else if (IS_ERR(skb))
5572 			status = mgmt_status(PTR_ERR(skb));
5573 		else
5574 			status = mgmt_status(skb->data[0]);
5575 	}
5576 
5577 	bt_dev_dbg(hdev, "status %d", status);
5578 
5579 	if (status) {
5580 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5581 		goto remove;
5582 	}
5583 
5584 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5585 
5586 	if (!bredr_sc_enabled(hdev)) {
5587 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5588 
5589 		if (skb->len < sizeof(*rp)) {
5590 			mgmt_cmd_status(cmd->sk, hdev->id,
5591 					MGMT_OP_READ_LOCAL_OOB_DATA,
5592 					MGMT_STATUS_FAILED);
5593 			goto remove;
5594 		}
5595 
5596 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5597 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5598 
5599 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5600 	} else {
5601 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5602 
5603 		if (skb->len < sizeof(*rp)) {
5604 			mgmt_cmd_status(cmd->sk, hdev->id,
5605 					MGMT_OP_READ_LOCAL_OOB_DATA,
5606 					MGMT_STATUS_FAILED);
5607 			goto remove;
5608 		}
5609 
5610 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5611 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5612 
5613 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5614 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5615 	}
5616 
5617 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5618 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5619 
5620 remove:
5621 	if (skb && !IS_ERR(skb))
5622 		kfree_skb(skb);
5623 
5624 	mgmt_pending_free(cmd);
5625 }
5626 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5627 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5628 {
5629 	struct mgmt_pending_cmd *cmd = data;
5630 
5631 	if (bredr_sc_enabled(hdev))
5632 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5633 	else
5634 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5635 
5636 	if (IS_ERR(cmd->skb))
5637 		return PTR_ERR(cmd->skb);
5638 	else
5639 		return 0;
5640 }
5641 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5642 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5643 			       void *data, u16 data_len)
5644 {
5645 	struct mgmt_pending_cmd *cmd;
5646 	int err;
5647 
5648 	bt_dev_dbg(hdev, "sock %p", sk);
5649 
5650 	hci_dev_lock(hdev);
5651 
5652 	if (!hdev_is_powered(hdev)) {
5653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5654 				      MGMT_STATUS_NOT_POWERED);
5655 		goto unlock;
5656 	}
5657 
5658 	if (!lmp_ssp_capable(hdev)) {
5659 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5660 				      MGMT_STATUS_NOT_SUPPORTED);
5661 		goto unlock;
5662 	}
5663 
5664 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5665 	if (!cmd)
5666 		err = -ENOMEM;
5667 	else
5668 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5669 					 read_local_oob_data_complete);
5670 
5671 	if (err < 0) {
5672 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5673 				      MGMT_STATUS_FAILED);
5674 
5675 		if (cmd)
5676 			mgmt_pending_free(cmd);
5677 	}
5678 
5679 unlock:
5680 	hci_dev_unlock(hdev);
5681 	return err;
5682 }
5683 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5684 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5685 			       void *data, u16 len)
5686 {
5687 	struct mgmt_addr_info *addr = data;
5688 	int err;
5689 
5690 	bt_dev_dbg(hdev, "sock %p", sk);
5691 
5692 	if (!bdaddr_type_is_valid(addr->type))
5693 		return mgmt_cmd_complete(sk, hdev->id,
5694 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5695 					 MGMT_STATUS_INVALID_PARAMS,
5696 					 addr, sizeof(*addr));
5697 
5698 	hci_dev_lock(hdev);
5699 
5700 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5701 		struct mgmt_cp_add_remote_oob_data *cp = data;
5702 		u8 status;
5703 
5704 		if (cp->addr.type != BDADDR_BREDR) {
5705 			err = mgmt_cmd_complete(sk, hdev->id,
5706 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 						MGMT_STATUS_INVALID_PARAMS,
5708 						&cp->addr, sizeof(cp->addr));
5709 			goto unlock;
5710 		}
5711 
5712 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5713 					      cp->addr.type, cp->hash,
5714 					      cp->rand, NULL, NULL);
5715 		if (err < 0)
5716 			status = MGMT_STATUS_FAILED;
5717 		else
5718 			status = MGMT_STATUS_SUCCESS;
5719 
5720 		err = mgmt_cmd_complete(sk, hdev->id,
5721 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5722 					&cp->addr, sizeof(cp->addr));
5723 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5724 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5725 		u8 *rand192, *hash192, *rand256, *hash256;
5726 		u8 status;
5727 
5728 		if (bdaddr_type_is_le(cp->addr.type)) {
5729 			/* Enforce zero-valued 192-bit parameters as
5730 			 * long as legacy SMP OOB isn't implemented.
5731 			 */
5732 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5733 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5734 				err = mgmt_cmd_complete(sk, hdev->id,
5735 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5736 							MGMT_STATUS_INVALID_PARAMS,
5737 							addr, sizeof(*addr));
5738 				goto unlock;
5739 			}
5740 
5741 			rand192 = NULL;
5742 			hash192 = NULL;
5743 		} else {
5744 			/* In case one of the P-192 values is set to zero,
5745 			 * then just disable OOB data for P-192.
5746 			 */
5747 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5748 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5749 				rand192 = NULL;
5750 				hash192 = NULL;
5751 			} else {
5752 				rand192 = cp->rand192;
5753 				hash192 = cp->hash192;
5754 			}
5755 		}
5756 
5757 		/* In case one of the P-256 values is set to zero, then just
5758 		 * disable OOB data for P-256.
5759 		 */
5760 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5761 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5762 			rand256 = NULL;
5763 			hash256 = NULL;
5764 		} else {
5765 			rand256 = cp->rand256;
5766 			hash256 = cp->hash256;
5767 		}
5768 
5769 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5770 					      cp->addr.type, hash192, rand192,
5771 					      hash256, rand256);
5772 		if (err < 0)
5773 			status = MGMT_STATUS_FAILED;
5774 		else
5775 			status = MGMT_STATUS_SUCCESS;
5776 
5777 		err = mgmt_cmd_complete(sk, hdev->id,
5778 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5779 					status, &cp->addr, sizeof(cp->addr));
5780 	} else {
5781 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5782 			   len);
5783 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 				      MGMT_STATUS_INVALID_PARAMS);
5785 	}
5786 
5787 unlock:
5788 	hci_dev_unlock(hdev);
5789 	return err;
5790 }
5791 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5792 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5793 				  void *data, u16 len)
5794 {
5795 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5796 	u8 status;
5797 	int err;
5798 
5799 	bt_dev_dbg(hdev, "sock %p", sk);
5800 
5801 	if (cp->addr.type != BDADDR_BREDR)
5802 		return mgmt_cmd_complete(sk, hdev->id,
5803 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5804 					 MGMT_STATUS_INVALID_PARAMS,
5805 					 &cp->addr, sizeof(cp->addr));
5806 
5807 	hci_dev_lock(hdev);
5808 
5809 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5810 		hci_remote_oob_data_clear(hdev);
5811 		status = MGMT_STATUS_SUCCESS;
5812 		goto done;
5813 	}
5814 
5815 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5816 	if (err < 0)
5817 		status = MGMT_STATUS_INVALID_PARAMS;
5818 	else
5819 		status = MGMT_STATUS_SUCCESS;
5820 
5821 done:
5822 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5823 				status, &cp->addr, sizeof(cp->addr));
5824 
5825 	hci_dev_unlock(hdev);
5826 	return err;
5827 }
5828 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5829 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5830 {
5831 	struct mgmt_pending_cmd *cmd;
5832 
5833 	bt_dev_dbg(hdev, "status %u", status);
5834 
5835 	hci_dev_lock(hdev);
5836 
5837 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5838 	if (!cmd)
5839 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5840 
5841 	if (!cmd)
5842 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5843 
5844 	if (cmd) {
5845 		cmd->cmd_complete(cmd, mgmt_status(status));
5846 		mgmt_pending_remove(cmd);
5847 	}
5848 
5849 	hci_dev_unlock(hdev);
5850 }
5851 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5852 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5853 				    uint8_t *mgmt_status)
5854 {
5855 	switch (type) {
5856 	case DISCOV_TYPE_LE:
5857 		*mgmt_status = mgmt_le_support(hdev);
5858 		if (*mgmt_status)
5859 			return false;
5860 		break;
5861 	case DISCOV_TYPE_INTERLEAVED:
5862 		*mgmt_status = mgmt_le_support(hdev);
5863 		if (*mgmt_status)
5864 			return false;
5865 		fallthrough;
5866 	case DISCOV_TYPE_BREDR:
5867 		*mgmt_status = mgmt_bredr_support(hdev);
5868 		if (*mgmt_status)
5869 			return false;
5870 		break;
5871 	default:
5872 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5873 		return false;
5874 	}
5875 
5876 	return true;
5877 }
5878 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5879 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5880 {
5881 	struct mgmt_pending_cmd *cmd = data;
5882 
5883 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5884 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5885 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5886 		return;
5887 
5888 	bt_dev_dbg(hdev, "err %d", err);
5889 
5890 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5891 			  cmd->param, 1);
5892 	mgmt_pending_remove(cmd);
5893 
5894 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5895 				DISCOVERY_FINDING);
5896 }
5897 
start_discovery_sync(struct hci_dev * hdev,void * data)5898 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5899 {
5900 	return hci_start_discovery_sync(hdev);
5901 }
5902 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5903 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5904 				    u16 op, void *data, u16 len)
5905 {
5906 	struct mgmt_cp_start_discovery *cp = data;
5907 	struct mgmt_pending_cmd *cmd;
5908 	u8 status;
5909 	int err;
5910 
5911 	bt_dev_dbg(hdev, "sock %p", sk);
5912 
5913 	hci_dev_lock(hdev);
5914 
5915 	if (!hdev_is_powered(hdev)) {
5916 		err = mgmt_cmd_complete(sk, hdev->id, op,
5917 					MGMT_STATUS_NOT_POWERED,
5918 					&cp->type, sizeof(cp->type));
5919 		goto failed;
5920 	}
5921 
5922 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5923 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5924 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5925 					&cp->type, sizeof(cp->type));
5926 		goto failed;
5927 	}
5928 
5929 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5930 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5931 					&cp->type, sizeof(cp->type));
5932 		goto failed;
5933 	}
5934 
5935 	/* Can't start discovery when it is paused */
5936 	if (hdev->discovery_paused) {
5937 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5938 					&cp->type, sizeof(cp->type));
5939 		goto failed;
5940 	}
5941 
5942 	/* Clear the discovery filter first to free any previously
5943 	 * allocated memory for the UUID list.
5944 	 */
5945 	hci_discovery_filter_clear(hdev);
5946 
5947 	hdev->discovery.type = cp->type;
5948 	hdev->discovery.report_invalid_rssi = false;
5949 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5950 		hdev->discovery.limited = true;
5951 	else
5952 		hdev->discovery.limited = false;
5953 
5954 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5955 	if (!cmd) {
5956 		err = -ENOMEM;
5957 		goto failed;
5958 	}
5959 
5960 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5961 				 start_discovery_complete);
5962 	if (err < 0) {
5963 		mgmt_pending_remove(cmd);
5964 		goto failed;
5965 	}
5966 
5967 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5968 
5969 failed:
5970 	hci_dev_unlock(hdev);
5971 	return err;
5972 }
5973 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5974 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5975 			   void *data, u16 len)
5976 {
5977 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5978 					data, len);
5979 }
5980 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5981 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5982 				   void *data, u16 len)
5983 {
5984 	return start_discovery_internal(sk, hdev,
5985 					MGMT_OP_START_LIMITED_DISCOVERY,
5986 					data, len);
5987 }
5988 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5989 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5990 				   void *data, u16 len)
5991 {
5992 	struct mgmt_cp_start_service_discovery *cp = data;
5993 	struct mgmt_pending_cmd *cmd;
5994 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5995 	u16 uuid_count, expected_len;
5996 	u8 status;
5997 	int err;
5998 
5999 	bt_dev_dbg(hdev, "sock %p", sk);
6000 
6001 	hci_dev_lock(hdev);
6002 
6003 	if (!hdev_is_powered(hdev)) {
6004 		err = mgmt_cmd_complete(sk, hdev->id,
6005 					MGMT_OP_START_SERVICE_DISCOVERY,
6006 					MGMT_STATUS_NOT_POWERED,
6007 					&cp->type, sizeof(cp->type));
6008 		goto failed;
6009 	}
6010 
6011 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6012 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6013 		err = mgmt_cmd_complete(sk, hdev->id,
6014 					MGMT_OP_START_SERVICE_DISCOVERY,
6015 					MGMT_STATUS_BUSY, &cp->type,
6016 					sizeof(cp->type));
6017 		goto failed;
6018 	}
6019 
6020 	if (hdev->discovery_paused) {
6021 		err = mgmt_cmd_complete(sk, hdev->id,
6022 					MGMT_OP_START_SERVICE_DISCOVERY,
6023 					MGMT_STATUS_BUSY, &cp->type,
6024 					sizeof(cp->type));
6025 		goto failed;
6026 	}
6027 
6028 	uuid_count = __le16_to_cpu(cp->uuid_count);
6029 	if (uuid_count > max_uuid_count) {
6030 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6031 			   uuid_count);
6032 		err = mgmt_cmd_complete(sk, hdev->id,
6033 					MGMT_OP_START_SERVICE_DISCOVERY,
6034 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6035 					sizeof(cp->type));
6036 		goto failed;
6037 	}
6038 
6039 	expected_len = sizeof(*cp) + uuid_count * 16;
6040 	if (expected_len != len) {
6041 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6042 			   expected_len, len);
6043 		err = mgmt_cmd_complete(sk, hdev->id,
6044 					MGMT_OP_START_SERVICE_DISCOVERY,
6045 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6046 					sizeof(cp->type));
6047 		goto failed;
6048 	}
6049 
6050 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6051 		err = mgmt_cmd_complete(sk, hdev->id,
6052 					MGMT_OP_START_SERVICE_DISCOVERY,
6053 					status, &cp->type, sizeof(cp->type));
6054 		goto failed;
6055 	}
6056 
6057 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6058 			       hdev, data, len);
6059 	if (!cmd) {
6060 		err = -ENOMEM;
6061 		goto failed;
6062 	}
6063 
6064 	/* Clear the discovery filter first to free any previously
6065 	 * allocated memory for the UUID list.
6066 	 */
6067 	hci_discovery_filter_clear(hdev);
6068 
6069 	hdev->discovery.result_filtering = true;
6070 	hdev->discovery.type = cp->type;
6071 	hdev->discovery.rssi = cp->rssi;
6072 	hdev->discovery.uuid_count = uuid_count;
6073 
6074 	if (uuid_count > 0) {
6075 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6076 						GFP_KERNEL);
6077 		if (!hdev->discovery.uuids) {
6078 			err = mgmt_cmd_complete(sk, hdev->id,
6079 						MGMT_OP_START_SERVICE_DISCOVERY,
6080 						MGMT_STATUS_FAILED,
6081 						&cp->type, sizeof(cp->type));
6082 			mgmt_pending_remove(cmd);
6083 			goto failed;
6084 		}
6085 	}
6086 
6087 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6088 				 start_discovery_complete);
6089 	if (err < 0) {
6090 		mgmt_pending_remove(cmd);
6091 		goto failed;
6092 	}
6093 
6094 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6095 
6096 failed:
6097 	hci_dev_unlock(hdev);
6098 	return err;
6099 }
6100 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6101 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6102 {
6103 	struct mgmt_pending_cmd *cmd;
6104 
6105 	bt_dev_dbg(hdev, "status %u", status);
6106 
6107 	hci_dev_lock(hdev);
6108 
6109 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6110 	if (cmd) {
6111 		cmd->cmd_complete(cmd, mgmt_status(status));
6112 		mgmt_pending_remove(cmd);
6113 	}
6114 
6115 	hci_dev_unlock(hdev);
6116 }
6117 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6118 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6119 {
6120 	struct mgmt_pending_cmd *cmd = data;
6121 
6122 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6123 		return;
6124 
6125 	bt_dev_dbg(hdev, "err %d", err);
6126 
6127 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6128 			  cmd->param, 1);
6129 	mgmt_pending_remove(cmd);
6130 
6131 	if (!err)
6132 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6133 }
6134 
stop_discovery_sync(struct hci_dev * hdev,void * data)6135 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6136 {
6137 	return hci_stop_discovery_sync(hdev);
6138 }
6139 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6140 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6141 			  u16 len)
6142 {
6143 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6144 	struct mgmt_pending_cmd *cmd;
6145 	int err;
6146 
6147 	bt_dev_dbg(hdev, "sock %p", sk);
6148 
6149 	hci_dev_lock(hdev);
6150 
6151 	if (!hci_discovery_active(hdev)) {
6152 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6153 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6154 					sizeof(mgmt_cp->type));
6155 		goto unlock;
6156 	}
6157 
6158 	if (hdev->discovery.type != mgmt_cp->type) {
6159 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160 					MGMT_STATUS_INVALID_PARAMS,
6161 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6162 		goto unlock;
6163 	}
6164 
6165 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6166 	if (!cmd) {
6167 		err = -ENOMEM;
6168 		goto unlock;
6169 	}
6170 
6171 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6172 				 stop_discovery_complete);
6173 	if (err < 0) {
6174 		mgmt_pending_remove(cmd);
6175 		goto unlock;
6176 	}
6177 
6178 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6179 
6180 unlock:
6181 	hci_dev_unlock(hdev);
6182 	return err;
6183 }
6184 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6185 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6186 			u16 len)
6187 {
6188 	struct mgmt_cp_confirm_name *cp = data;
6189 	struct inquiry_entry *e;
6190 	int err;
6191 
6192 	bt_dev_dbg(hdev, "sock %p", sk);
6193 
6194 	hci_dev_lock(hdev);
6195 
6196 	if (!hci_discovery_active(hdev)) {
6197 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6198 					MGMT_STATUS_FAILED, &cp->addr,
6199 					sizeof(cp->addr));
6200 		goto failed;
6201 	}
6202 
6203 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6204 	if (!e) {
6205 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6206 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6207 					sizeof(cp->addr));
6208 		goto failed;
6209 	}
6210 
6211 	if (cp->name_known) {
6212 		e->name_state = NAME_KNOWN;
6213 		list_del(&e->list);
6214 	} else {
6215 		e->name_state = NAME_NEEDED;
6216 		hci_inquiry_cache_update_resolve(hdev, e);
6217 	}
6218 
6219 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6220 				&cp->addr, sizeof(cp->addr));
6221 
6222 failed:
6223 	hci_dev_unlock(hdev);
6224 	return err;
6225 }
6226 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6227 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6228 			u16 len)
6229 {
6230 	struct mgmt_cp_block_device *cp = data;
6231 	u8 status;
6232 	int err;
6233 
6234 	bt_dev_dbg(hdev, "sock %p", sk);
6235 
6236 	if (!bdaddr_type_is_valid(cp->addr.type))
6237 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6238 					 MGMT_STATUS_INVALID_PARAMS,
6239 					 &cp->addr, sizeof(cp->addr));
6240 
6241 	hci_dev_lock(hdev);
6242 
6243 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6244 				  cp->addr.type);
6245 	if (err < 0) {
6246 		status = MGMT_STATUS_FAILED;
6247 		goto done;
6248 	}
6249 
6250 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6251 		   sk);
6252 	status = MGMT_STATUS_SUCCESS;
6253 
6254 done:
6255 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6256 				&cp->addr, sizeof(cp->addr));
6257 
6258 	hci_dev_unlock(hdev);
6259 
6260 	return err;
6261 }
6262 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6263 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6264 			  u16 len)
6265 {
6266 	struct mgmt_cp_unblock_device *cp = data;
6267 	u8 status;
6268 	int err;
6269 
6270 	bt_dev_dbg(hdev, "sock %p", sk);
6271 
6272 	if (!bdaddr_type_is_valid(cp->addr.type))
6273 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6274 					 MGMT_STATUS_INVALID_PARAMS,
6275 					 &cp->addr, sizeof(cp->addr));
6276 
6277 	hci_dev_lock(hdev);
6278 
6279 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6280 				  cp->addr.type);
6281 	if (err < 0) {
6282 		status = MGMT_STATUS_INVALID_PARAMS;
6283 		goto done;
6284 	}
6285 
6286 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6287 		   sk);
6288 	status = MGMT_STATUS_SUCCESS;
6289 
6290 done:
6291 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6292 				&cp->addr, sizeof(cp->addr));
6293 
6294 	hci_dev_unlock(hdev);
6295 
6296 	return err;
6297 }
6298 
set_device_id_sync(struct hci_dev * hdev,void * data)6299 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6300 {
6301 	return hci_update_eir_sync(hdev);
6302 }
6303 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6304 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6305 			 u16 len)
6306 {
6307 	struct mgmt_cp_set_device_id *cp = data;
6308 	int err;
6309 	__u16 source;
6310 
6311 	bt_dev_dbg(hdev, "sock %p", sk);
6312 
6313 	source = __le16_to_cpu(cp->source);
6314 
6315 	if (source > 0x0002)
6316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6317 				       MGMT_STATUS_INVALID_PARAMS);
6318 
6319 	hci_dev_lock(hdev);
6320 
6321 	hdev->devid_source = source;
6322 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6323 	hdev->devid_product = __le16_to_cpu(cp->product);
6324 	hdev->devid_version = __le16_to_cpu(cp->version);
6325 
6326 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6327 				NULL, 0);
6328 
6329 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6330 
6331 	hci_dev_unlock(hdev);
6332 
6333 	return err;
6334 }
6335 
enable_advertising_instance(struct hci_dev * hdev,int err)6336 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6337 {
6338 	if (err)
6339 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6340 	else
6341 		bt_dev_dbg(hdev, "status %d", err);
6342 }
6343 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6344 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6345 {
6346 	struct cmd_lookup match = { NULL, hdev };
6347 	u8 instance;
6348 	struct adv_info *adv_instance;
6349 	u8 status = mgmt_status(err);
6350 
6351 	if (status) {
6352 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6353 				     cmd_status_rsp, &status);
6354 		return;
6355 	}
6356 
6357 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6358 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6359 	else
6360 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6361 
6362 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6363 			     &match);
6364 
6365 	new_settings(hdev, match.sk);
6366 
6367 	if (match.sk)
6368 		sock_put(match.sk);
6369 
6370 	/* If "Set Advertising" was just disabled and instance advertising was
6371 	 * set up earlier, then re-enable multi-instance advertising.
6372 	 */
6373 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6374 	    list_empty(&hdev->adv_instances))
6375 		return;
6376 
6377 	instance = hdev->cur_adv_instance;
6378 	if (!instance) {
6379 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6380 							struct adv_info, list);
6381 		if (!adv_instance)
6382 			return;
6383 
6384 		instance = adv_instance->instance;
6385 	}
6386 
6387 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6388 
6389 	enable_advertising_instance(hdev, err);
6390 }
6391 
set_adv_sync(struct hci_dev * hdev,void * data)6392 static int set_adv_sync(struct hci_dev *hdev, void *data)
6393 {
6394 	struct mgmt_pending_cmd *cmd = data;
6395 	struct mgmt_mode *cp = cmd->param;
6396 	u8 val = !!cp->val;
6397 
6398 	if (cp->val == 0x02)
6399 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6400 	else
6401 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402 
6403 	cancel_adv_timeout(hdev);
6404 
6405 	if (val) {
6406 		/* Switch to instance "0" for the Set Advertising setting.
6407 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6408 		 * HCI_ADVERTISING flag is not yet set.
6409 		 */
6410 		hdev->cur_adv_instance = 0x00;
6411 
6412 		if (ext_adv_capable(hdev)) {
6413 			hci_start_ext_adv_sync(hdev, 0x00);
6414 		} else {
6415 			hci_update_adv_data_sync(hdev, 0x00);
6416 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6417 			hci_enable_advertising_sync(hdev);
6418 		}
6419 	} else {
6420 		hci_disable_advertising_sync(hdev);
6421 	}
6422 
6423 	return 0;
6424 }
6425 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6426 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6427 			   u16 len)
6428 {
6429 	struct mgmt_mode *cp = data;
6430 	struct mgmt_pending_cmd *cmd;
6431 	u8 val, status;
6432 	int err;
6433 
6434 	bt_dev_dbg(hdev, "sock %p", sk);
6435 
6436 	status = mgmt_le_support(hdev);
6437 	if (status)
6438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6439 				       status);
6440 
6441 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6443 				       MGMT_STATUS_INVALID_PARAMS);
6444 
6445 	if (hdev->advertising_paused)
6446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6447 				       MGMT_STATUS_BUSY);
6448 
6449 	hci_dev_lock(hdev);
6450 
6451 	val = !!cp->val;
6452 
6453 	/* The following conditions are ones which mean that we should
6454 	 * not do any HCI communication but directly send a mgmt
6455 	 * response to user space (after toggling the flag if
6456 	 * necessary).
6457 	 */
6458 	if (!hdev_is_powered(hdev) ||
6459 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6460 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6461 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6462 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6463 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6464 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6465 		bool changed;
6466 
6467 		if (cp->val) {
6468 			hdev->cur_adv_instance = 0x00;
6469 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6470 			if (cp->val == 0x02)
6471 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6472 			else
6473 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 		} else {
6475 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6476 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 		}
6478 
6479 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6480 		if (err < 0)
6481 			goto unlock;
6482 
6483 		if (changed)
6484 			err = new_settings(hdev, sk);
6485 
6486 		goto unlock;
6487 	}
6488 
6489 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6490 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6491 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6492 				      MGMT_STATUS_BUSY);
6493 		goto unlock;
6494 	}
6495 
6496 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6497 	if (!cmd)
6498 		err = -ENOMEM;
6499 	else
6500 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6501 					 set_advertising_complete);
6502 
6503 	if (err < 0 && cmd)
6504 		mgmt_pending_remove(cmd);
6505 
6506 unlock:
6507 	hci_dev_unlock(hdev);
6508 	return err;
6509 }
6510 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6511 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6512 			      void *data, u16 len)
6513 {
6514 	struct mgmt_cp_set_static_address *cp = data;
6515 	int err;
6516 
6517 	bt_dev_dbg(hdev, "sock %p", sk);
6518 
6519 	if (!lmp_le_capable(hdev))
6520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6521 				       MGMT_STATUS_NOT_SUPPORTED);
6522 
6523 	if (hdev_is_powered(hdev))
6524 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6525 				       MGMT_STATUS_REJECTED);
6526 
6527 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6528 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6529 			return mgmt_cmd_status(sk, hdev->id,
6530 					       MGMT_OP_SET_STATIC_ADDRESS,
6531 					       MGMT_STATUS_INVALID_PARAMS);
6532 
6533 		/* Two most significant bits shall be set */
6534 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6535 			return mgmt_cmd_status(sk, hdev->id,
6536 					       MGMT_OP_SET_STATIC_ADDRESS,
6537 					       MGMT_STATUS_INVALID_PARAMS);
6538 	}
6539 
6540 	hci_dev_lock(hdev);
6541 
6542 	bacpy(&hdev->static_addr, &cp->bdaddr);
6543 
6544 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6545 	if (err < 0)
6546 		goto unlock;
6547 
6548 	err = new_settings(hdev, sk);
6549 
6550 unlock:
6551 	hci_dev_unlock(hdev);
6552 	return err;
6553 }
6554 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6555 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6556 			   void *data, u16 len)
6557 {
6558 	struct mgmt_cp_set_scan_params *cp = data;
6559 	__u16 interval, window;
6560 	int err;
6561 
6562 	bt_dev_dbg(hdev, "sock %p", sk);
6563 
6564 	if (!lmp_le_capable(hdev))
6565 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6566 				       MGMT_STATUS_NOT_SUPPORTED);
6567 
6568 	interval = __le16_to_cpu(cp->interval);
6569 
6570 	if (interval < 0x0004 || interval > 0x4000)
6571 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6572 				       MGMT_STATUS_INVALID_PARAMS);
6573 
6574 	window = __le16_to_cpu(cp->window);
6575 
6576 	if (window < 0x0004 || window > 0x4000)
6577 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6578 				       MGMT_STATUS_INVALID_PARAMS);
6579 
6580 	if (window > interval)
6581 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6582 				       MGMT_STATUS_INVALID_PARAMS);
6583 
6584 	hci_dev_lock(hdev);
6585 
6586 	hdev->le_scan_interval = interval;
6587 	hdev->le_scan_window = window;
6588 
6589 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6590 				NULL, 0);
6591 
6592 	/* If background scan is running, restart it so new parameters are
6593 	 * loaded.
6594 	 */
6595 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6596 	    hdev->discovery.state == DISCOVERY_STOPPED)
6597 		hci_update_passive_scan(hdev);
6598 
6599 	hci_dev_unlock(hdev);
6600 
6601 	return err;
6602 }
6603 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6604 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6605 {
6606 	struct mgmt_pending_cmd *cmd = data;
6607 
6608 	bt_dev_dbg(hdev, "err %d", err);
6609 
6610 	if (err) {
6611 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6612 				mgmt_status(err));
6613 	} else {
6614 		struct mgmt_mode *cp = cmd->param;
6615 
6616 		if (cp->val)
6617 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6618 		else
6619 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6620 
6621 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6622 		new_settings(hdev, cmd->sk);
6623 	}
6624 
6625 	mgmt_pending_free(cmd);
6626 }
6627 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6628 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6629 {
6630 	struct mgmt_pending_cmd *cmd = data;
6631 	struct mgmt_mode *cp = cmd->param;
6632 
6633 	return hci_write_fast_connectable_sync(hdev, cp->val);
6634 }
6635 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6636 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6637 				void *data, u16 len)
6638 {
6639 	struct mgmt_mode *cp = data;
6640 	struct mgmt_pending_cmd *cmd;
6641 	int err;
6642 
6643 	bt_dev_dbg(hdev, "sock %p", sk);
6644 
6645 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6646 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6647 		return mgmt_cmd_status(sk, hdev->id,
6648 				       MGMT_OP_SET_FAST_CONNECTABLE,
6649 				       MGMT_STATUS_NOT_SUPPORTED);
6650 
6651 	if (cp->val != 0x00 && cp->val != 0x01)
6652 		return mgmt_cmd_status(sk, hdev->id,
6653 				       MGMT_OP_SET_FAST_CONNECTABLE,
6654 				       MGMT_STATUS_INVALID_PARAMS);
6655 
6656 	hci_dev_lock(hdev);
6657 
6658 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6659 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6660 		goto unlock;
6661 	}
6662 
6663 	if (!hdev_is_powered(hdev)) {
6664 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6665 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 		new_settings(hdev, sk);
6667 		goto unlock;
6668 	}
6669 
6670 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6671 			       len);
6672 	if (!cmd)
6673 		err = -ENOMEM;
6674 	else
6675 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6676 					 fast_connectable_complete);
6677 
6678 	if (err < 0) {
6679 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6680 				MGMT_STATUS_FAILED);
6681 
6682 		if (cmd)
6683 			mgmt_pending_free(cmd);
6684 	}
6685 
6686 unlock:
6687 	hci_dev_unlock(hdev);
6688 
6689 	return err;
6690 }
6691 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6692 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6693 {
6694 	struct mgmt_pending_cmd *cmd = data;
6695 
6696 	bt_dev_dbg(hdev, "err %d", err);
6697 
6698 	if (err) {
6699 		u8 mgmt_err = mgmt_status(err);
6700 
6701 		/* We need to restore the flag if related HCI commands
6702 		 * failed.
6703 		 */
6704 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6705 
6706 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6707 	} else {
6708 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6709 		new_settings(hdev, cmd->sk);
6710 	}
6711 
6712 	mgmt_pending_free(cmd);
6713 }
6714 
set_bredr_sync(struct hci_dev * hdev,void * data)6715 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6716 {
6717 	int status;
6718 
6719 	status = hci_write_fast_connectable_sync(hdev, false);
6720 
6721 	if (!status)
6722 		status = hci_update_scan_sync(hdev);
6723 
6724 	/* Since only the advertising data flags will change, there
6725 	 * is no need to update the scan response data.
6726 	 */
6727 	if (!status)
6728 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6729 
6730 	return status;
6731 }
6732 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6733 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6734 {
6735 	struct mgmt_mode *cp = data;
6736 	struct mgmt_pending_cmd *cmd;
6737 	int err;
6738 
6739 	bt_dev_dbg(hdev, "sock %p", sk);
6740 
6741 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6743 				       MGMT_STATUS_NOT_SUPPORTED);
6744 
6745 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747 				       MGMT_STATUS_REJECTED);
6748 
6749 	if (cp->val != 0x00 && cp->val != 0x01)
6750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6751 				       MGMT_STATUS_INVALID_PARAMS);
6752 
6753 	hci_dev_lock(hdev);
6754 
6755 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6756 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6757 		goto unlock;
6758 	}
6759 
6760 	if (!hdev_is_powered(hdev)) {
6761 		if (!cp->val) {
6762 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6763 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6764 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6765 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6766 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6767 		}
6768 
6769 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6770 
6771 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6772 		if (err < 0)
6773 			goto unlock;
6774 
6775 		err = new_settings(hdev, sk);
6776 		goto unlock;
6777 	}
6778 
6779 	/* Reject disabling when powered on */
6780 	if (!cp->val) {
6781 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6782 				      MGMT_STATUS_REJECTED);
6783 		goto unlock;
6784 	} else {
6785 		/* When configuring a dual-mode controller to operate
6786 		 * with LE only and using a static address, then switching
6787 		 * BR/EDR back on is not allowed.
6788 		 *
6789 		 * Dual-mode controllers shall operate with the public
6790 		 * address as its identity address for BR/EDR and LE. So
6791 		 * reject the attempt to create an invalid configuration.
6792 		 *
6793 		 * The same restrictions applies when secure connections
6794 		 * has been enabled. For BR/EDR this is a controller feature
6795 		 * while for LE it is a host stack feature. This means that
6796 		 * switching BR/EDR back on when secure connections has been
6797 		 * enabled is not a supported transaction.
6798 		 */
6799 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6800 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6801 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6802 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6803 					      MGMT_STATUS_REJECTED);
6804 			goto unlock;
6805 		}
6806 	}
6807 
6808 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6809 	if (!cmd)
6810 		err = -ENOMEM;
6811 	else
6812 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6813 					 set_bredr_complete);
6814 
6815 	if (err < 0) {
6816 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6817 				MGMT_STATUS_FAILED);
6818 		if (cmd)
6819 			mgmt_pending_free(cmd);
6820 
6821 		goto unlock;
6822 	}
6823 
6824 	/* We need to flip the bit already here so that
6825 	 * hci_req_update_adv_data generates the correct flags.
6826 	 */
6827 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6828 
6829 unlock:
6830 	hci_dev_unlock(hdev);
6831 	return err;
6832 }
6833 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6834 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6835 {
6836 	struct mgmt_pending_cmd *cmd = data;
6837 	struct mgmt_mode *cp;
6838 
6839 	bt_dev_dbg(hdev, "err %d", err);
6840 
6841 	if (err) {
6842 		u8 mgmt_err = mgmt_status(err);
6843 
6844 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6845 		goto done;
6846 	}
6847 
6848 	cp = cmd->param;
6849 
6850 	switch (cp->val) {
6851 	case 0x00:
6852 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6853 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6854 		break;
6855 	case 0x01:
6856 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6857 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6858 		break;
6859 	case 0x02:
6860 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6861 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6862 		break;
6863 	}
6864 
6865 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6866 	new_settings(hdev, cmd->sk);
6867 
6868 done:
6869 	mgmt_pending_free(cmd);
6870 }
6871 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6872 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6873 {
6874 	struct mgmt_pending_cmd *cmd = data;
6875 	struct mgmt_mode *cp = cmd->param;
6876 	u8 val = !!cp->val;
6877 
6878 	/* Force write of val */
6879 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6880 
6881 	return hci_write_sc_support_sync(hdev, val);
6882 }
6883 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6884 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6885 			   void *data, u16 len)
6886 {
6887 	struct mgmt_mode *cp = data;
6888 	struct mgmt_pending_cmd *cmd;
6889 	u8 val;
6890 	int err;
6891 
6892 	bt_dev_dbg(hdev, "sock %p", sk);
6893 
6894 	if (!lmp_sc_capable(hdev) &&
6895 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6896 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6897 				       MGMT_STATUS_NOT_SUPPORTED);
6898 
6899 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6900 	    lmp_sc_capable(hdev) &&
6901 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6903 				       MGMT_STATUS_REJECTED);
6904 
6905 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6906 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6907 				       MGMT_STATUS_INVALID_PARAMS);
6908 
6909 	hci_dev_lock(hdev);
6910 
6911 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6912 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6913 		bool changed;
6914 
6915 		if (cp->val) {
6916 			changed = !hci_dev_test_and_set_flag(hdev,
6917 							     HCI_SC_ENABLED);
6918 			if (cp->val == 0x02)
6919 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6920 			else
6921 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6922 		} else {
6923 			changed = hci_dev_test_and_clear_flag(hdev,
6924 							      HCI_SC_ENABLED);
6925 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 		}
6927 
6928 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6929 		if (err < 0)
6930 			goto failed;
6931 
6932 		if (changed)
6933 			err = new_settings(hdev, sk);
6934 
6935 		goto failed;
6936 	}
6937 
6938 	val = !!cp->val;
6939 
6940 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6941 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6942 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6943 		goto failed;
6944 	}
6945 
6946 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6947 	if (!cmd)
6948 		err = -ENOMEM;
6949 	else
6950 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6951 					 set_secure_conn_complete);
6952 
6953 	if (err < 0) {
6954 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6955 				MGMT_STATUS_FAILED);
6956 		if (cmd)
6957 			mgmt_pending_free(cmd);
6958 	}
6959 
6960 failed:
6961 	hci_dev_unlock(hdev);
6962 	return err;
6963 }
6964 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6965 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6966 			  void *data, u16 len)
6967 {
6968 	struct mgmt_mode *cp = data;
6969 	bool changed, use_changed;
6970 	int err;
6971 
6972 	bt_dev_dbg(hdev, "sock %p", sk);
6973 
6974 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6976 				       MGMT_STATUS_INVALID_PARAMS);
6977 
6978 	hci_dev_lock(hdev);
6979 
6980 	if (cp->val)
6981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6982 	else
6983 		changed = hci_dev_test_and_clear_flag(hdev,
6984 						      HCI_KEEP_DEBUG_KEYS);
6985 
6986 	if (cp->val == 0x02)
6987 		use_changed = !hci_dev_test_and_set_flag(hdev,
6988 							 HCI_USE_DEBUG_KEYS);
6989 	else
6990 		use_changed = hci_dev_test_and_clear_flag(hdev,
6991 							  HCI_USE_DEBUG_KEYS);
6992 
6993 	if (hdev_is_powered(hdev) && use_changed &&
6994 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6995 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6996 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6997 			     sizeof(mode), &mode);
6998 	}
6999 
7000 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7001 	if (err < 0)
7002 		goto unlock;
7003 
7004 	if (changed)
7005 		err = new_settings(hdev, sk);
7006 
7007 unlock:
7008 	hci_dev_unlock(hdev);
7009 	return err;
7010 }
7011 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7012 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7013 		       u16 len)
7014 {
7015 	struct mgmt_cp_set_privacy *cp = cp_data;
7016 	bool changed;
7017 	int err;
7018 
7019 	bt_dev_dbg(hdev, "sock %p", sk);
7020 
7021 	if (!lmp_le_capable(hdev))
7022 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7023 				       MGMT_STATUS_NOT_SUPPORTED);
7024 
7025 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7026 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7027 				       MGMT_STATUS_INVALID_PARAMS);
7028 
7029 	if (hdev_is_powered(hdev))
7030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7031 				       MGMT_STATUS_REJECTED);
7032 
7033 	hci_dev_lock(hdev);
7034 
7035 	/* If user space supports this command it is also expected to
7036 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7037 	 */
7038 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7039 
7040 	if (cp->privacy) {
7041 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7042 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7043 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7044 		hci_adv_instances_set_rpa_expired(hdev, true);
7045 		if (cp->privacy == 0x02)
7046 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7047 		else
7048 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7049 	} else {
7050 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7051 		memset(hdev->irk, 0, sizeof(hdev->irk));
7052 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7053 		hci_adv_instances_set_rpa_expired(hdev, false);
7054 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7055 	}
7056 
7057 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7058 	if (err < 0)
7059 		goto unlock;
7060 
7061 	if (changed)
7062 		err = new_settings(hdev, sk);
7063 
7064 unlock:
7065 	hci_dev_unlock(hdev);
7066 	return err;
7067 }
7068 
irk_is_valid(struct mgmt_irk_info * irk)7069 static bool irk_is_valid(struct mgmt_irk_info *irk)
7070 {
7071 	switch (irk->addr.type) {
7072 	case BDADDR_LE_PUBLIC:
7073 		return true;
7074 
7075 	case BDADDR_LE_RANDOM:
7076 		/* Two most significant bits shall be set */
7077 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7078 			return false;
7079 		return true;
7080 	}
7081 
7082 	return false;
7083 }
7084 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7085 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7086 		     u16 len)
7087 {
7088 	struct mgmt_cp_load_irks *cp = cp_data;
7089 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7090 				   sizeof(struct mgmt_irk_info));
7091 	u16 irk_count, expected_len;
7092 	int i, err;
7093 
7094 	bt_dev_dbg(hdev, "sock %p", sk);
7095 
7096 	if (!lmp_le_capable(hdev))
7097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7098 				       MGMT_STATUS_NOT_SUPPORTED);
7099 
7100 	irk_count = __le16_to_cpu(cp->irk_count);
7101 	if (irk_count > max_irk_count) {
7102 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7103 			   irk_count);
7104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105 				       MGMT_STATUS_INVALID_PARAMS);
7106 	}
7107 
7108 	expected_len = struct_size(cp, irks, irk_count);
7109 	if (expected_len != len) {
7110 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7111 			   expected_len, len);
7112 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 				       MGMT_STATUS_INVALID_PARAMS);
7114 	}
7115 
7116 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7117 
7118 	for (i = 0; i < irk_count; i++) {
7119 		struct mgmt_irk_info *key = &cp->irks[i];
7120 
7121 		if (!irk_is_valid(key))
7122 			return mgmt_cmd_status(sk, hdev->id,
7123 					       MGMT_OP_LOAD_IRKS,
7124 					       MGMT_STATUS_INVALID_PARAMS);
7125 	}
7126 
7127 	hci_dev_lock(hdev);
7128 
7129 	hci_smp_irks_clear(hdev);
7130 
7131 	for (i = 0; i < irk_count; i++) {
7132 		struct mgmt_irk_info *irk = &cp->irks[i];
7133 		u8 addr_type = le_addr_type(irk->addr.type);
7134 
7135 		if (hci_is_blocked_key(hdev,
7136 				       HCI_BLOCKED_KEY_TYPE_IRK,
7137 				       irk->val)) {
7138 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7139 				    &irk->addr.bdaddr);
7140 			continue;
7141 		}
7142 
7143 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7144 		if (irk->addr.type == BDADDR_BREDR)
7145 			addr_type = BDADDR_BREDR;
7146 
7147 		hci_add_irk(hdev, &irk->addr.bdaddr,
7148 			    addr_type, irk->val,
7149 			    BDADDR_ANY);
7150 	}
7151 
7152 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7153 
7154 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7155 
7156 	hci_dev_unlock(hdev);
7157 
7158 	return err;
7159 }
7160 
ltk_is_valid(struct mgmt_ltk_info * key)7161 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7162 {
7163 	if (key->initiator != 0x00 && key->initiator != 0x01)
7164 		return false;
7165 
7166 	switch (key->addr.type) {
7167 	case BDADDR_LE_PUBLIC:
7168 		return true;
7169 
7170 	case BDADDR_LE_RANDOM:
7171 		/* Two most significant bits shall be set */
7172 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7173 			return false;
7174 		return true;
7175 	}
7176 
7177 	return false;
7178 }
7179 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7180 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7181 			       void *cp_data, u16 len)
7182 {
7183 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7184 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7185 				   sizeof(struct mgmt_ltk_info));
7186 	u16 key_count, expected_len;
7187 	int i, err;
7188 
7189 	bt_dev_dbg(hdev, "sock %p", sk);
7190 
7191 	if (!lmp_le_capable(hdev))
7192 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7193 				       MGMT_STATUS_NOT_SUPPORTED);
7194 
7195 	key_count = __le16_to_cpu(cp->key_count);
7196 	if (key_count > max_key_count) {
7197 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7198 			   key_count);
7199 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7200 				       MGMT_STATUS_INVALID_PARAMS);
7201 	}
7202 
7203 	expected_len = struct_size(cp, keys, key_count);
7204 	if (expected_len != len) {
7205 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7206 			   expected_len, len);
7207 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7208 				       MGMT_STATUS_INVALID_PARAMS);
7209 	}
7210 
7211 	bt_dev_dbg(hdev, "key_count %u", key_count);
7212 
7213 	for (i = 0; i < key_count; i++) {
7214 		struct mgmt_ltk_info *key = &cp->keys[i];
7215 
7216 		if (!ltk_is_valid(key))
7217 			return mgmt_cmd_status(sk, hdev->id,
7218 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7219 					       MGMT_STATUS_INVALID_PARAMS);
7220 	}
7221 
7222 	hci_dev_lock(hdev);
7223 
7224 	hci_smp_ltks_clear(hdev);
7225 
7226 	for (i = 0; i < key_count; i++) {
7227 		struct mgmt_ltk_info *key = &cp->keys[i];
7228 		u8 type, authenticated;
7229 		u8 addr_type = le_addr_type(key->addr.type);
7230 
7231 		if (hci_is_blocked_key(hdev,
7232 				       HCI_BLOCKED_KEY_TYPE_LTK,
7233 				       key->val)) {
7234 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7235 				    &key->addr.bdaddr);
7236 			continue;
7237 		}
7238 
7239 		switch (key->type) {
7240 		case MGMT_LTK_UNAUTHENTICATED:
7241 			authenticated = 0x00;
7242 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7243 			break;
7244 		case MGMT_LTK_AUTHENTICATED:
7245 			authenticated = 0x01;
7246 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7247 			break;
7248 		case MGMT_LTK_P256_UNAUTH:
7249 			authenticated = 0x00;
7250 			type = SMP_LTK_P256;
7251 			break;
7252 		case MGMT_LTK_P256_AUTH:
7253 			authenticated = 0x01;
7254 			type = SMP_LTK_P256;
7255 			break;
7256 		case MGMT_LTK_P256_DEBUG:
7257 			authenticated = 0x00;
7258 			type = SMP_LTK_P256_DEBUG;
7259 			fallthrough;
7260 		default:
7261 			continue;
7262 		}
7263 
7264 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7265 		if (key->addr.type == BDADDR_BREDR)
7266 			addr_type = BDADDR_BREDR;
7267 
7268 		hci_add_ltk(hdev, &key->addr.bdaddr,
7269 			    addr_type, type, authenticated,
7270 			    key->val, key->enc_size, key->ediv, key->rand);
7271 	}
7272 
7273 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7274 			   NULL, 0);
7275 
7276 	hci_dev_unlock(hdev);
7277 
7278 	return err;
7279 }
7280 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7281 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7282 {
7283 	struct mgmt_pending_cmd *cmd = data;
7284 	struct hci_conn *conn = cmd->user_data;
7285 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7286 	struct mgmt_rp_get_conn_info rp;
7287 	u8 status;
7288 
7289 	bt_dev_dbg(hdev, "err %d", err);
7290 
7291 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7292 
7293 	status = mgmt_status(err);
7294 	if (status == MGMT_STATUS_SUCCESS) {
7295 		rp.rssi = conn->rssi;
7296 		rp.tx_power = conn->tx_power;
7297 		rp.max_tx_power = conn->max_tx_power;
7298 	} else {
7299 		rp.rssi = HCI_RSSI_INVALID;
7300 		rp.tx_power = HCI_TX_POWER_INVALID;
7301 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7302 	}
7303 
7304 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7305 			  &rp, sizeof(rp));
7306 
7307 	mgmt_pending_free(cmd);
7308 }
7309 
get_conn_info_sync(struct hci_dev * hdev,void * data)7310 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7311 {
7312 	struct mgmt_pending_cmd *cmd = data;
7313 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7314 	struct hci_conn *conn;
7315 	int err;
7316 	__le16   handle;
7317 
7318 	/* Make sure we are still connected */
7319 	if (cp->addr.type == BDADDR_BREDR)
7320 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7321 					       &cp->addr.bdaddr);
7322 	else
7323 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7324 
7325 	if (!conn || conn->state != BT_CONNECTED)
7326 		return MGMT_STATUS_NOT_CONNECTED;
7327 
7328 	cmd->user_data = conn;
7329 	handle = cpu_to_le16(conn->handle);
7330 
7331 	/* Refresh RSSI each time */
7332 	err = hci_read_rssi_sync(hdev, handle);
7333 
7334 	/* For LE links TX power does not change thus we don't need to
7335 	 * query for it once value is known.
7336 	 */
7337 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7338 		     conn->tx_power == HCI_TX_POWER_INVALID))
7339 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7340 
7341 	/* Max TX power needs to be read only once per connection */
7342 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7343 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7344 
7345 	return err;
7346 }
7347 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7348 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7349 			 u16 len)
7350 {
7351 	struct mgmt_cp_get_conn_info *cp = data;
7352 	struct mgmt_rp_get_conn_info rp;
7353 	struct hci_conn *conn;
7354 	unsigned long conn_info_age;
7355 	int err = 0;
7356 
7357 	bt_dev_dbg(hdev, "sock %p", sk);
7358 
7359 	memset(&rp, 0, sizeof(rp));
7360 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7361 	rp.addr.type = cp->addr.type;
7362 
7363 	if (!bdaddr_type_is_valid(cp->addr.type))
7364 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7365 					 MGMT_STATUS_INVALID_PARAMS,
7366 					 &rp, sizeof(rp));
7367 
7368 	hci_dev_lock(hdev);
7369 
7370 	if (!hdev_is_powered(hdev)) {
7371 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7372 					MGMT_STATUS_NOT_POWERED, &rp,
7373 					sizeof(rp));
7374 		goto unlock;
7375 	}
7376 
7377 	if (cp->addr.type == BDADDR_BREDR)
7378 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7379 					       &cp->addr.bdaddr);
7380 	else
7381 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7382 
7383 	if (!conn || conn->state != BT_CONNECTED) {
7384 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7385 					MGMT_STATUS_NOT_CONNECTED, &rp,
7386 					sizeof(rp));
7387 		goto unlock;
7388 	}
7389 
7390 	/* To avoid client trying to guess when to poll again for information we
7391 	 * calculate conn info age as random value between min/max set in hdev.
7392 	 */
7393 	conn_info_age = hdev->conn_info_min_age +
7394 			prandom_u32_max(hdev->conn_info_max_age -
7395 					hdev->conn_info_min_age);
7396 
7397 	/* Query controller to refresh cached values if they are too old or were
7398 	 * never read.
7399 	 */
7400 	if (time_after(jiffies, conn->conn_info_timestamp +
7401 		       msecs_to_jiffies(conn_info_age)) ||
7402 	    !conn->conn_info_timestamp) {
7403 		struct mgmt_pending_cmd *cmd;
7404 
7405 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7406 				       len);
7407 		if (!cmd) {
7408 			err = -ENOMEM;
7409 		} else {
7410 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7411 						 cmd, get_conn_info_complete);
7412 		}
7413 
7414 		if (err < 0) {
7415 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7416 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7417 
7418 			if (cmd)
7419 				mgmt_pending_free(cmd);
7420 
7421 			goto unlock;
7422 		}
7423 
7424 		conn->conn_info_timestamp = jiffies;
7425 	} else {
7426 		/* Cache is valid, just reply with values cached in hci_conn */
7427 		rp.rssi = conn->rssi;
7428 		rp.tx_power = conn->tx_power;
7429 		rp.max_tx_power = conn->max_tx_power;
7430 
7431 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7432 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7433 	}
7434 
7435 unlock:
7436 	hci_dev_unlock(hdev);
7437 	return err;
7438 }
7439 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7440 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7441 {
7442 	struct mgmt_pending_cmd *cmd = data;
7443 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7444 	struct mgmt_rp_get_clock_info rp;
7445 	struct hci_conn *conn = cmd->user_data;
7446 	u8 status = mgmt_status(err);
7447 
7448 	bt_dev_dbg(hdev, "err %d", err);
7449 
7450 	memset(&rp, 0, sizeof(rp));
7451 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7452 	rp.addr.type = cp->addr.type;
7453 
7454 	if (err)
7455 		goto complete;
7456 
7457 	rp.local_clock = cpu_to_le32(hdev->clock);
7458 
7459 	if (conn) {
7460 		rp.piconet_clock = cpu_to_le32(conn->clock);
7461 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7462 	}
7463 
7464 complete:
7465 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7466 			  sizeof(rp));
7467 
7468 	mgmt_pending_free(cmd);
7469 }
7470 
get_clock_info_sync(struct hci_dev * hdev,void * data)7471 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7472 {
7473 	struct mgmt_pending_cmd *cmd = data;
7474 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7475 	struct hci_cp_read_clock hci_cp;
7476 	struct hci_conn *conn;
7477 
7478 	memset(&hci_cp, 0, sizeof(hci_cp));
7479 	hci_read_clock_sync(hdev, &hci_cp);
7480 
7481 	/* Make sure connection still exists */
7482 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7483 	if (!conn || conn->state != BT_CONNECTED)
7484 		return MGMT_STATUS_NOT_CONNECTED;
7485 
7486 	cmd->user_data = conn;
7487 	hci_cp.handle = cpu_to_le16(conn->handle);
7488 	hci_cp.which = 0x01; /* Piconet clock */
7489 
7490 	return hci_read_clock_sync(hdev, &hci_cp);
7491 }
7492 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7493 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7494 								u16 len)
7495 {
7496 	struct mgmt_cp_get_clock_info *cp = data;
7497 	struct mgmt_rp_get_clock_info rp;
7498 	struct mgmt_pending_cmd *cmd;
7499 	struct hci_conn *conn;
7500 	int err;
7501 
7502 	bt_dev_dbg(hdev, "sock %p", sk);
7503 
7504 	memset(&rp, 0, sizeof(rp));
7505 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7506 	rp.addr.type = cp->addr.type;
7507 
7508 	if (cp->addr.type != BDADDR_BREDR)
7509 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7510 					 MGMT_STATUS_INVALID_PARAMS,
7511 					 &rp, sizeof(rp));
7512 
7513 	hci_dev_lock(hdev);
7514 
7515 	if (!hdev_is_powered(hdev)) {
7516 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517 					MGMT_STATUS_NOT_POWERED, &rp,
7518 					sizeof(rp));
7519 		goto unlock;
7520 	}
7521 
7522 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7523 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7524 					       &cp->addr.bdaddr);
7525 		if (!conn || conn->state != BT_CONNECTED) {
7526 			err = mgmt_cmd_complete(sk, hdev->id,
7527 						MGMT_OP_GET_CLOCK_INFO,
7528 						MGMT_STATUS_NOT_CONNECTED,
7529 						&rp, sizeof(rp));
7530 			goto unlock;
7531 		}
7532 	} else {
7533 		conn = NULL;
7534 	}
7535 
7536 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7537 	if (!cmd)
7538 		err = -ENOMEM;
7539 	else
7540 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7541 					 get_clock_info_complete);
7542 
7543 	if (err < 0) {
7544 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7545 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7546 
7547 		if (cmd)
7548 			mgmt_pending_free(cmd);
7549 	}
7550 
7551 
7552 unlock:
7553 	hci_dev_unlock(hdev);
7554 	return err;
7555 }
7556 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7557 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7558 {
7559 	struct hci_conn *conn;
7560 
7561 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7562 	if (!conn)
7563 		return false;
7564 
7565 	if (conn->dst_type != type)
7566 		return false;
7567 
7568 	if (conn->state != BT_CONNECTED)
7569 		return false;
7570 
7571 	return true;
7572 }
7573 
7574 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7575 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7576 			       u8 addr_type, u8 auto_connect)
7577 {
7578 	struct hci_conn_params *params;
7579 
7580 	params = hci_conn_params_add(hdev, addr, addr_type);
7581 	if (!params)
7582 		return -EIO;
7583 
7584 	if (params->auto_connect == auto_connect)
7585 		return 0;
7586 
7587 	hci_pend_le_list_del_init(params);
7588 
7589 	switch (auto_connect) {
7590 	case HCI_AUTO_CONN_DISABLED:
7591 	case HCI_AUTO_CONN_LINK_LOSS:
7592 		/* If auto connect is being disabled when we're trying to
7593 		 * connect to device, keep connecting.
7594 		 */
7595 		if (params->explicit_connect)
7596 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7597 		break;
7598 	case HCI_AUTO_CONN_REPORT:
7599 		if (params->explicit_connect)
7600 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7601 		else
7602 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7603 		break;
7604 	case HCI_AUTO_CONN_DIRECT:
7605 	case HCI_AUTO_CONN_ALWAYS:
7606 		if (!is_connected(hdev, addr, addr_type))
7607 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7608 		break;
7609 	}
7610 
7611 	params->auto_connect = auto_connect;
7612 
7613 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7614 		   addr, addr_type, auto_connect);
7615 
7616 	return 0;
7617 }
7618 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7619 static void device_added(struct sock *sk, struct hci_dev *hdev,
7620 			 bdaddr_t *bdaddr, u8 type, u8 action)
7621 {
7622 	struct mgmt_ev_device_added ev;
7623 
7624 	bacpy(&ev.addr.bdaddr, bdaddr);
7625 	ev.addr.type = type;
7626 	ev.action = action;
7627 
7628 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7629 }
7630 
add_device_sync(struct hci_dev * hdev,void * data)7631 static int add_device_sync(struct hci_dev *hdev, void *data)
7632 {
7633 	return hci_update_passive_scan_sync(hdev);
7634 }
7635 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7636 static int add_device(struct sock *sk, struct hci_dev *hdev,
7637 		      void *data, u16 len)
7638 {
7639 	struct mgmt_cp_add_device *cp = data;
7640 	u8 auto_conn, addr_type;
7641 	struct hci_conn_params *params;
7642 	int err;
7643 	u32 current_flags = 0;
7644 	u32 supported_flags;
7645 
7646 	bt_dev_dbg(hdev, "sock %p", sk);
7647 
7648 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7649 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7650 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 					 MGMT_STATUS_INVALID_PARAMS,
7652 					 &cp->addr, sizeof(cp->addr));
7653 
7654 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7655 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7656 					 MGMT_STATUS_INVALID_PARAMS,
7657 					 &cp->addr, sizeof(cp->addr));
7658 
7659 	hci_dev_lock(hdev);
7660 
7661 	if (cp->addr.type == BDADDR_BREDR) {
7662 		/* Only incoming connections action is supported for now */
7663 		if (cp->action != 0x01) {
7664 			err = mgmt_cmd_complete(sk, hdev->id,
7665 						MGMT_OP_ADD_DEVICE,
7666 						MGMT_STATUS_INVALID_PARAMS,
7667 						&cp->addr, sizeof(cp->addr));
7668 			goto unlock;
7669 		}
7670 
7671 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7672 						     &cp->addr.bdaddr,
7673 						     cp->addr.type, 0);
7674 		if (err)
7675 			goto unlock;
7676 
7677 		hci_update_scan(hdev);
7678 
7679 		goto added;
7680 	}
7681 
7682 	addr_type = le_addr_type(cp->addr.type);
7683 
7684 	if (cp->action == 0x02)
7685 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7686 	else if (cp->action == 0x01)
7687 		auto_conn = HCI_AUTO_CONN_DIRECT;
7688 	else
7689 		auto_conn = HCI_AUTO_CONN_REPORT;
7690 
7691 	/* Kernel internally uses conn_params with resolvable private
7692 	 * address, but Add Device allows only identity addresses.
7693 	 * Make sure it is enforced before calling
7694 	 * hci_conn_params_lookup.
7695 	 */
7696 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7697 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7698 					MGMT_STATUS_INVALID_PARAMS,
7699 					&cp->addr, sizeof(cp->addr));
7700 		goto unlock;
7701 	}
7702 
7703 	/* If the connection parameters don't exist for this device,
7704 	 * they will be created and configured with defaults.
7705 	 */
7706 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7707 				auto_conn) < 0) {
7708 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7709 					MGMT_STATUS_FAILED, &cp->addr,
7710 					sizeof(cp->addr));
7711 		goto unlock;
7712 	} else {
7713 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7714 						addr_type);
7715 		if (params)
7716 			current_flags = params->flags;
7717 	}
7718 
7719 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7720 	if (err < 0)
7721 		goto unlock;
7722 
7723 added:
7724 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7725 	supported_flags = hdev->conn_flags;
7726 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7727 			     supported_flags, current_flags);
7728 
7729 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7730 				MGMT_STATUS_SUCCESS, &cp->addr,
7731 				sizeof(cp->addr));
7732 
7733 unlock:
7734 	hci_dev_unlock(hdev);
7735 	return err;
7736 }
7737 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7738 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7739 			   bdaddr_t *bdaddr, u8 type)
7740 {
7741 	struct mgmt_ev_device_removed ev;
7742 
7743 	bacpy(&ev.addr.bdaddr, bdaddr);
7744 	ev.addr.type = type;
7745 
7746 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7747 }
7748 
remove_device_sync(struct hci_dev * hdev,void * data)7749 static int remove_device_sync(struct hci_dev *hdev, void *data)
7750 {
7751 	return hci_update_passive_scan_sync(hdev);
7752 }
7753 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7754 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7755 			 void *data, u16 len)
7756 {
7757 	struct mgmt_cp_remove_device *cp = data;
7758 	int err;
7759 
7760 	bt_dev_dbg(hdev, "sock %p", sk);
7761 
7762 	hci_dev_lock(hdev);
7763 
7764 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7765 		struct hci_conn_params *params;
7766 		u8 addr_type;
7767 
7768 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7769 			err = mgmt_cmd_complete(sk, hdev->id,
7770 						MGMT_OP_REMOVE_DEVICE,
7771 						MGMT_STATUS_INVALID_PARAMS,
7772 						&cp->addr, sizeof(cp->addr));
7773 			goto unlock;
7774 		}
7775 
7776 		if (cp->addr.type == BDADDR_BREDR) {
7777 			err = hci_bdaddr_list_del(&hdev->accept_list,
7778 						  &cp->addr.bdaddr,
7779 						  cp->addr.type);
7780 			if (err) {
7781 				err = mgmt_cmd_complete(sk, hdev->id,
7782 							MGMT_OP_REMOVE_DEVICE,
7783 							MGMT_STATUS_INVALID_PARAMS,
7784 							&cp->addr,
7785 							sizeof(cp->addr));
7786 				goto unlock;
7787 			}
7788 
7789 			hci_update_scan(hdev);
7790 
7791 			device_removed(sk, hdev, &cp->addr.bdaddr,
7792 				       cp->addr.type);
7793 			goto complete;
7794 		}
7795 
7796 		addr_type = le_addr_type(cp->addr.type);
7797 
7798 		/* Kernel internally uses conn_params with resolvable private
7799 		 * address, but Remove Device allows only identity addresses.
7800 		 * Make sure it is enforced before calling
7801 		 * hci_conn_params_lookup.
7802 		 */
7803 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7804 			err = mgmt_cmd_complete(sk, hdev->id,
7805 						MGMT_OP_REMOVE_DEVICE,
7806 						MGMT_STATUS_INVALID_PARAMS,
7807 						&cp->addr, sizeof(cp->addr));
7808 			goto unlock;
7809 		}
7810 
7811 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7812 						addr_type);
7813 		if (!params) {
7814 			err = mgmt_cmd_complete(sk, hdev->id,
7815 						MGMT_OP_REMOVE_DEVICE,
7816 						MGMT_STATUS_INVALID_PARAMS,
7817 						&cp->addr, sizeof(cp->addr));
7818 			goto unlock;
7819 		}
7820 
7821 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7822 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7823 			err = mgmt_cmd_complete(sk, hdev->id,
7824 						MGMT_OP_REMOVE_DEVICE,
7825 						MGMT_STATUS_INVALID_PARAMS,
7826 						&cp->addr, sizeof(cp->addr));
7827 			goto unlock;
7828 		}
7829 
7830 		hci_conn_params_free(params);
7831 
7832 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7833 	} else {
7834 		struct hci_conn_params *p, *tmp;
7835 		struct bdaddr_list *b, *btmp;
7836 
7837 		if (cp->addr.type) {
7838 			err = mgmt_cmd_complete(sk, hdev->id,
7839 						MGMT_OP_REMOVE_DEVICE,
7840 						MGMT_STATUS_INVALID_PARAMS,
7841 						&cp->addr, sizeof(cp->addr));
7842 			goto unlock;
7843 		}
7844 
7845 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7846 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7847 			list_del(&b->list);
7848 			kfree(b);
7849 		}
7850 
7851 		hci_update_scan(hdev);
7852 
7853 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7854 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7855 				continue;
7856 			device_removed(sk, hdev, &p->addr, p->addr_type);
7857 			if (p->explicit_connect) {
7858 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7859 				continue;
7860 			}
7861 			hci_conn_params_free(p);
7862 		}
7863 
7864 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7865 	}
7866 
7867 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7868 
7869 complete:
7870 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7871 				MGMT_STATUS_SUCCESS, &cp->addr,
7872 				sizeof(cp->addr));
7873 unlock:
7874 	hci_dev_unlock(hdev);
7875 	return err;
7876 }
7877 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7878 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7879 			   u16 len)
7880 {
7881 	struct mgmt_cp_load_conn_param *cp = data;
7882 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7883 				     sizeof(struct mgmt_conn_param));
7884 	u16 param_count, expected_len;
7885 	int i;
7886 
7887 	if (!lmp_le_capable(hdev))
7888 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7889 				       MGMT_STATUS_NOT_SUPPORTED);
7890 
7891 	param_count = __le16_to_cpu(cp->param_count);
7892 	if (param_count > max_param_count) {
7893 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7894 			   param_count);
7895 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 				       MGMT_STATUS_INVALID_PARAMS);
7897 	}
7898 
7899 	expected_len = struct_size(cp, params, param_count);
7900 	if (expected_len != len) {
7901 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7902 			   expected_len, len);
7903 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7904 				       MGMT_STATUS_INVALID_PARAMS);
7905 	}
7906 
7907 	bt_dev_dbg(hdev, "param_count %u", param_count);
7908 
7909 	hci_dev_lock(hdev);
7910 
7911 	hci_conn_params_clear_disabled(hdev);
7912 
7913 	for (i = 0; i < param_count; i++) {
7914 		struct mgmt_conn_param *param = &cp->params[i];
7915 		struct hci_conn_params *hci_param;
7916 		u16 min, max, latency, timeout;
7917 		u8 addr_type;
7918 
7919 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7920 			   param->addr.type);
7921 
7922 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7923 			addr_type = ADDR_LE_DEV_PUBLIC;
7924 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7925 			addr_type = ADDR_LE_DEV_RANDOM;
7926 		} else {
7927 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7928 			continue;
7929 		}
7930 
7931 		min = le16_to_cpu(param->min_interval);
7932 		max = le16_to_cpu(param->max_interval);
7933 		latency = le16_to_cpu(param->latency);
7934 		timeout = le16_to_cpu(param->timeout);
7935 
7936 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7937 			   min, max, latency, timeout);
7938 
7939 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7940 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7941 			continue;
7942 		}
7943 
7944 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7945 						addr_type);
7946 		if (!hci_param) {
7947 			bt_dev_err(hdev, "failed to add connection parameters");
7948 			continue;
7949 		}
7950 
7951 		hci_param->conn_min_interval = min;
7952 		hci_param->conn_max_interval = max;
7953 		hci_param->conn_latency = latency;
7954 		hci_param->supervision_timeout = timeout;
7955 	}
7956 
7957 	hci_dev_unlock(hdev);
7958 
7959 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7960 				 NULL, 0);
7961 }
7962 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7963 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7964 			       void *data, u16 len)
7965 {
7966 	struct mgmt_cp_set_external_config *cp = data;
7967 	bool changed;
7968 	int err;
7969 
7970 	bt_dev_dbg(hdev, "sock %p", sk);
7971 
7972 	if (hdev_is_powered(hdev))
7973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 				       MGMT_STATUS_REJECTED);
7975 
7976 	if (cp->config != 0x00 && cp->config != 0x01)
7977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7978 				         MGMT_STATUS_INVALID_PARAMS);
7979 
7980 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7982 				       MGMT_STATUS_NOT_SUPPORTED);
7983 
7984 	hci_dev_lock(hdev);
7985 
7986 	if (cp->config)
7987 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7988 	else
7989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7990 
7991 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7992 	if (err < 0)
7993 		goto unlock;
7994 
7995 	if (!changed)
7996 		goto unlock;
7997 
7998 	err = new_options(hdev, sk);
7999 
8000 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8001 		mgmt_index_removed(hdev);
8002 
8003 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8004 			hci_dev_set_flag(hdev, HCI_CONFIG);
8005 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8006 
8007 			queue_work(hdev->req_workqueue, &hdev->power_on);
8008 		} else {
8009 			set_bit(HCI_RAW, &hdev->flags);
8010 			mgmt_index_added(hdev);
8011 		}
8012 	}
8013 
8014 unlock:
8015 	hci_dev_unlock(hdev);
8016 	return err;
8017 }
8018 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8019 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8020 			      void *data, u16 len)
8021 {
8022 	struct mgmt_cp_set_public_address *cp = data;
8023 	bool changed;
8024 	int err;
8025 
8026 	bt_dev_dbg(hdev, "sock %p", sk);
8027 
8028 	if (hdev_is_powered(hdev))
8029 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 				       MGMT_STATUS_REJECTED);
8031 
8032 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8033 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8034 				       MGMT_STATUS_INVALID_PARAMS);
8035 
8036 	if (!hdev->set_bdaddr)
8037 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8038 				       MGMT_STATUS_NOT_SUPPORTED);
8039 
8040 	hci_dev_lock(hdev);
8041 
8042 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8043 	bacpy(&hdev->public_addr, &cp->bdaddr);
8044 
8045 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8046 	if (err < 0)
8047 		goto unlock;
8048 
8049 	if (!changed)
8050 		goto unlock;
8051 
8052 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8053 		err = new_options(hdev, sk);
8054 
8055 	if (is_configured(hdev)) {
8056 		mgmt_index_removed(hdev);
8057 
8058 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8059 
8060 		hci_dev_set_flag(hdev, HCI_CONFIG);
8061 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8062 
8063 		queue_work(hdev->req_workqueue, &hdev->power_on);
8064 	}
8065 
8066 unlock:
8067 	hci_dev_unlock(hdev);
8068 	return err;
8069 }
8070 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8071 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8072 					     int err)
8073 {
8074 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8075 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8076 	u8 *h192, *r192, *h256, *r256;
8077 	struct mgmt_pending_cmd *cmd = data;
8078 	struct sk_buff *skb = cmd->skb;
8079 	u8 status = mgmt_status(err);
8080 	u16 eir_len;
8081 
8082 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8083 		return;
8084 
8085 	if (!status) {
8086 		if (!skb)
8087 			status = MGMT_STATUS_FAILED;
8088 		else if (IS_ERR(skb))
8089 			status = mgmt_status(PTR_ERR(skb));
8090 		else
8091 			status = mgmt_status(skb->data[0]);
8092 	}
8093 
8094 	bt_dev_dbg(hdev, "status %u", status);
8095 
8096 	mgmt_cp = cmd->param;
8097 
8098 	if (status) {
8099 		status = mgmt_status(status);
8100 		eir_len = 0;
8101 
8102 		h192 = NULL;
8103 		r192 = NULL;
8104 		h256 = NULL;
8105 		r256 = NULL;
8106 	} else if (!bredr_sc_enabled(hdev)) {
8107 		struct hci_rp_read_local_oob_data *rp;
8108 
8109 		if (skb->len != sizeof(*rp)) {
8110 			status = MGMT_STATUS_FAILED;
8111 			eir_len = 0;
8112 		} else {
8113 			status = MGMT_STATUS_SUCCESS;
8114 			rp = (void *)skb->data;
8115 
8116 			eir_len = 5 + 18 + 18;
8117 			h192 = rp->hash;
8118 			r192 = rp->rand;
8119 			h256 = NULL;
8120 			r256 = NULL;
8121 		}
8122 	} else {
8123 		struct hci_rp_read_local_oob_ext_data *rp;
8124 
8125 		if (skb->len != sizeof(*rp)) {
8126 			status = MGMT_STATUS_FAILED;
8127 			eir_len = 0;
8128 		} else {
8129 			status = MGMT_STATUS_SUCCESS;
8130 			rp = (void *)skb->data;
8131 
8132 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8133 				eir_len = 5 + 18 + 18;
8134 				h192 = NULL;
8135 				r192 = NULL;
8136 			} else {
8137 				eir_len = 5 + 18 + 18 + 18 + 18;
8138 				h192 = rp->hash192;
8139 				r192 = rp->rand192;
8140 			}
8141 
8142 			h256 = rp->hash256;
8143 			r256 = rp->rand256;
8144 		}
8145 	}
8146 
8147 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8148 	if (!mgmt_rp)
8149 		goto done;
8150 
8151 	if (eir_len == 0)
8152 		goto send_rsp;
8153 
8154 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8155 				  hdev->dev_class, 3);
8156 
8157 	if (h192 && r192) {
8158 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8159 					  EIR_SSP_HASH_C192, h192, 16);
8160 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8161 					  EIR_SSP_RAND_R192, r192, 16);
8162 	}
8163 
8164 	if (h256 && r256) {
8165 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166 					  EIR_SSP_HASH_C256, h256, 16);
8167 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168 					  EIR_SSP_RAND_R256, r256, 16);
8169 	}
8170 
8171 send_rsp:
8172 	mgmt_rp->type = mgmt_cp->type;
8173 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8174 
8175 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8176 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8177 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8178 	if (err < 0 || status)
8179 		goto done;
8180 
8181 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8182 
8183 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8184 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8185 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8186 done:
8187 	if (skb && !IS_ERR(skb))
8188 		kfree_skb(skb);
8189 
8190 	kfree(mgmt_rp);
8191 	mgmt_pending_remove(cmd);
8192 }
8193 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8194 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8195 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8196 {
8197 	struct mgmt_pending_cmd *cmd;
8198 	int err;
8199 
8200 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8201 			       cp, sizeof(*cp));
8202 	if (!cmd)
8203 		return -ENOMEM;
8204 
8205 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8206 				 read_local_oob_ext_data_complete);
8207 
8208 	if (err < 0) {
8209 		mgmt_pending_remove(cmd);
8210 		return err;
8211 	}
8212 
8213 	return 0;
8214 }
8215 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8216 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8217 				   void *data, u16 data_len)
8218 {
8219 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8220 	struct mgmt_rp_read_local_oob_ext_data *rp;
8221 	size_t rp_len;
8222 	u16 eir_len;
8223 	u8 status, flags, role, addr[7], hash[16], rand[16];
8224 	int err;
8225 
8226 	bt_dev_dbg(hdev, "sock %p", sk);
8227 
8228 	if (hdev_is_powered(hdev)) {
8229 		switch (cp->type) {
8230 		case BIT(BDADDR_BREDR):
8231 			status = mgmt_bredr_support(hdev);
8232 			if (status)
8233 				eir_len = 0;
8234 			else
8235 				eir_len = 5;
8236 			break;
8237 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8238 			status = mgmt_le_support(hdev);
8239 			if (status)
8240 				eir_len = 0;
8241 			else
8242 				eir_len = 9 + 3 + 18 + 18 + 3;
8243 			break;
8244 		default:
8245 			status = MGMT_STATUS_INVALID_PARAMS;
8246 			eir_len = 0;
8247 			break;
8248 		}
8249 	} else {
8250 		status = MGMT_STATUS_NOT_POWERED;
8251 		eir_len = 0;
8252 	}
8253 
8254 	rp_len = sizeof(*rp) + eir_len;
8255 	rp = kmalloc(rp_len, GFP_ATOMIC);
8256 	if (!rp)
8257 		return -ENOMEM;
8258 
8259 	if (!status && !lmp_ssp_capable(hdev)) {
8260 		status = MGMT_STATUS_NOT_SUPPORTED;
8261 		eir_len = 0;
8262 	}
8263 
8264 	if (status)
8265 		goto complete;
8266 
8267 	hci_dev_lock(hdev);
8268 
8269 	eir_len = 0;
8270 	switch (cp->type) {
8271 	case BIT(BDADDR_BREDR):
8272 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8273 			err = read_local_ssp_oob_req(hdev, sk, cp);
8274 			hci_dev_unlock(hdev);
8275 			if (!err)
8276 				goto done;
8277 
8278 			status = MGMT_STATUS_FAILED;
8279 			goto complete;
8280 		} else {
8281 			eir_len = eir_append_data(rp->eir, eir_len,
8282 						  EIR_CLASS_OF_DEV,
8283 						  hdev->dev_class, 3);
8284 		}
8285 		break;
8286 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8287 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8288 		    smp_generate_oob(hdev, hash, rand) < 0) {
8289 			hci_dev_unlock(hdev);
8290 			status = MGMT_STATUS_FAILED;
8291 			goto complete;
8292 		}
8293 
8294 		/* This should return the active RPA, but since the RPA
8295 		 * is only programmed on demand, it is really hard to fill
8296 		 * this in at the moment. For now disallow retrieving
8297 		 * local out-of-band data when privacy is in use.
8298 		 *
8299 		 * Returning the identity address will not help here since
8300 		 * pairing happens before the identity resolving key is
8301 		 * known and thus the connection establishment happens
8302 		 * based on the RPA and not the identity address.
8303 		 */
8304 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8305 			hci_dev_unlock(hdev);
8306 			status = MGMT_STATUS_REJECTED;
8307 			goto complete;
8308 		}
8309 
8310 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8311 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8312 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8313 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8314 			memcpy(addr, &hdev->static_addr, 6);
8315 			addr[6] = 0x01;
8316 		} else {
8317 			memcpy(addr, &hdev->bdaddr, 6);
8318 			addr[6] = 0x00;
8319 		}
8320 
8321 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8322 					  addr, sizeof(addr));
8323 
8324 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8325 			role = 0x02;
8326 		else
8327 			role = 0x01;
8328 
8329 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8330 					  &role, sizeof(role));
8331 
8332 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8333 			eir_len = eir_append_data(rp->eir, eir_len,
8334 						  EIR_LE_SC_CONFIRM,
8335 						  hash, sizeof(hash));
8336 
8337 			eir_len = eir_append_data(rp->eir, eir_len,
8338 						  EIR_LE_SC_RANDOM,
8339 						  rand, sizeof(rand));
8340 		}
8341 
8342 		flags = mgmt_get_adv_discov_flags(hdev);
8343 
8344 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8345 			flags |= LE_AD_NO_BREDR;
8346 
8347 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8348 					  &flags, sizeof(flags));
8349 		break;
8350 	}
8351 
8352 	hci_dev_unlock(hdev);
8353 
8354 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8355 
8356 	status = MGMT_STATUS_SUCCESS;
8357 
8358 complete:
8359 	rp->type = cp->type;
8360 	rp->eir_len = cpu_to_le16(eir_len);
8361 
8362 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8363 				status, rp, sizeof(*rp) + eir_len);
8364 	if (err < 0 || status)
8365 		goto done;
8366 
8367 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8368 				 rp, sizeof(*rp) + eir_len,
8369 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8370 
8371 done:
8372 	kfree(rp);
8373 
8374 	return err;
8375 }
8376 
get_supported_adv_flags(struct hci_dev * hdev)8377 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8378 {
8379 	u32 flags = 0;
8380 
8381 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8382 	flags |= MGMT_ADV_FLAG_DISCOV;
8383 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8384 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8385 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8386 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8387 	flags |= MGMT_ADV_PARAM_DURATION;
8388 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8389 	flags |= MGMT_ADV_PARAM_INTERVALS;
8390 	flags |= MGMT_ADV_PARAM_TX_POWER;
8391 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8392 
8393 	/* In extended adv TX_POWER returned from Set Adv Param
8394 	 * will be always valid.
8395 	 */
8396 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8397 		flags |= MGMT_ADV_FLAG_TX_POWER;
8398 
8399 	if (ext_adv_capable(hdev)) {
8400 		flags |= MGMT_ADV_FLAG_SEC_1M;
8401 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8402 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8403 
8404 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
8405 			flags |= MGMT_ADV_FLAG_SEC_2M;
8406 
8407 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8408 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8409 	}
8410 
8411 	return flags;
8412 }
8413 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8414 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8415 			     void *data, u16 data_len)
8416 {
8417 	struct mgmt_rp_read_adv_features *rp;
8418 	size_t rp_len;
8419 	int err;
8420 	struct adv_info *adv_instance;
8421 	u32 supported_flags;
8422 	u8 *instance;
8423 
8424 	bt_dev_dbg(hdev, "sock %p", sk);
8425 
8426 	if (!lmp_le_capable(hdev))
8427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8428 				       MGMT_STATUS_REJECTED);
8429 
8430 	hci_dev_lock(hdev);
8431 
8432 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8433 	rp = kmalloc(rp_len, GFP_ATOMIC);
8434 	if (!rp) {
8435 		hci_dev_unlock(hdev);
8436 		return -ENOMEM;
8437 	}
8438 
8439 	supported_flags = get_supported_adv_flags(hdev);
8440 
8441 	rp->supported_flags = cpu_to_le32(supported_flags);
8442 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8443 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8444 	rp->max_instances = hdev->le_num_of_adv_sets;
8445 	rp->num_instances = hdev->adv_instance_cnt;
8446 
8447 	instance = rp->instance;
8448 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8449 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8450 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8451 			*instance = adv_instance->instance;
8452 			instance++;
8453 		} else {
8454 			rp->num_instances--;
8455 			rp_len--;
8456 		}
8457 	}
8458 
8459 	hci_dev_unlock(hdev);
8460 
8461 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8462 				MGMT_STATUS_SUCCESS, rp, rp_len);
8463 
8464 	kfree(rp);
8465 
8466 	return err;
8467 }
8468 
calculate_name_len(struct hci_dev * hdev)8469 static u8 calculate_name_len(struct hci_dev *hdev)
8470 {
8471 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8472 
8473 	return eir_append_local_name(hdev, buf, 0);
8474 }
8475 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8476 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8477 			   bool is_adv_data)
8478 {
8479 	u8 max_len = HCI_MAX_AD_LENGTH;
8480 
8481 	if (is_adv_data) {
8482 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8483 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8484 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8485 			max_len -= 3;
8486 
8487 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8488 			max_len -= 3;
8489 	} else {
8490 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8491 			max_len -= calculate_name_len(hdev);
8492 
8493 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8494 			max_len -= 4;
8495 	}
8496 
8497 	return max_len;
8498 }
8499 
flags_managed(u32 adv_flags)8500 static bool flags_managed(u32 adv_flags)
8501 {
8502 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8503 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8504 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8505 }
8506 
tx_power_managed(u32 adv_flags)8507 static bool tx_power_managed(u32 adv_flags)
8508 {
8509 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8510 }
8511 
name_managed(u32 adv_flags)8512 static bool name_managed(u32 adv_flags)
8513 {
8514 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8515 }
8516 
appearance_managed(u32 adv_flags)8517 static bool appearance_managed(u32 adv_flags)
8518 {
8519 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8520 }
8521 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8522 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8523 			      u8 len, bool is_adv_data)
8524 {
8525 	int i, cur_len;
8526 	u8 max_len;
8527 
8528 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8529 
8530 	if (len > max_len)
8531 		return false;
8532 
8533 	/* Make sure that the data is correctly formatted. */
8534 	for (i = 0; i < len; i += (cur_len + 1)) {
8535 		cur_len = data[i];
8536 
8537 		if (!cur_len)
8538 			continue;
8539 
8540 		if (data[i + 1] == EIR_FLAGS &&
8541 		    (!is_adv_data || flags_managed(adv_flags)))
8542 			return false;
8543 
8544 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8545 			return false;
8546 
8547 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8548 			return false;
8549 
8550 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8551 			return false;
8552 
8553 		if (data[i + 1] == EIR_APPEARANCE &&
8554 		    appearance_managed(adv_flags))
8555 			return false;
8556 
8557 		/* If the current field length would exceed the total data
8558 		 * length, then it's invalid.
8559 		 */
8560 		if (i + cur_len >= len)
8561 			return false;
8562 	}
8563 
8564 	return true;
8565 }
8566 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8567 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8568 {
8569 	u32 supported_flags, phy_flags;
8570 
8571 	/* The current implementation only supports a subset of the specified
8572 	 * flags. Also need to check mutual exclusiveness of sec flags.
8573 	 */
8574 	supported_flags = get_supported_adv_flags(hdev);
8575 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8576 	if (adv_flags & ~supported_flags ||
8577 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8578 		return false;
8579 
8580 	return true;
8581 }
8582 
adv_busy(struct hci_dev * hdev)8583 static bool adv_busy(struct hci_dev *hdev)
8584 {
8585 	return pending_find(MGMT_OP_SET_LE, hdev);
8586 }
8587 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8588 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8589 			     int err)
8590 {
8591 	struct adv_info *adv, *n;
8592 
8593 	bt_dev_dbg(hdev, "err %d", err);
8594 
8595 	hci_dev_lock(hdev);
8596 
8597 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8598 		u8 instance;
8599 
8600 		if (!adv->pending)
8601 			continue;
8602 
8603 		if (!err) {
8604 			adv->pending = false;
8605 			continue;
8606 		}
8607 
8608 		instance = adv->instance;
8609 
8610 		if (hdev->cur_adv_instance == instance)
8611 			cancel_adv_timeout(hdev);
8612 
8613 		hci_remove_adv_instance(hdev, instance);
8614 		mgmt_advertising_removed(sk, hdev, instance);
8615 	}
8616 
8617 	hci_dev_unlock(hdev);
8618 }
8619 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8620 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8621 {
8622 	struct mgmt_pending_cmd *cmd = data;
8623 	struct mgmt_cp_add_advertising *cp = cmd->param;
8624 	struct mgmt_rp_add_advertising rp;
8625 
8626 	memset(&rp, 0, sizeof(rp));
8627 
8628 	rp.instance = cp->instance;
8629 
8630 	if (err)
8631 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8632 				mgmt_status(err));
8633 	else
8634 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8635 				  mgmt_status(err), &rp, sizeof(rp));
8636 
8637 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8638 
8639 	mgmt_pending_free(cmd);
8640 }
8641 
add_advertising_sync(struct hci_dev * hdev,void * data)8642 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8643 {
8644 	struct mgmt_pending_cmd *cmd = data;
8645 	struct mgmt_cp_add_advertising *cp = cmd->param;
8646 
8647 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8648 }
8649 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8650 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8651 			   void *data, u16 data_len)
8652 {
8653 	struct mgmt_cp_add_advertising *cp = data;
8654 	struct mgmt_rp_add_advertising rp;
8655 	u32 flags;
8656 	u8 status;
8657 	u16 timeout, duration;
8658 	unsigned int prev_instance_cnt;
8659 	u8 schedule_instance = 0;
8660 	struct adv_info *adv, *next_instance;
8661 	int err;
8662 	struct mgmt_pending_cmd *cmd;
8663 
8664 	bt_dev_dbg(hdev, "sock %p", sk);
8665 
8666 	status = mgmt_le_support(hdev);
8667 	if (status)
8668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 				       status);
8670 
8671 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673 				       MGMT_STATUS_INVALID_PARAMS);
8674 
8675 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 				       MGMT_STATUS_INVALID_PARAMS);
8678 
8679 	flags = __le32_to_cpu(cp->flags);
8680 	timeout = __le16_to_cpu(cp->timeout);
8681 	duration = __le16_to_cpu(cp->duration);
8682 
8683 	if (!requested_adv_flags_are_valid(hdev, flags))
8684 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8685 				       MGMT_STATUS_INVALID_PARAMS);
8686 
8687 	hci_dev_lock(hdev);
8688 
8689 	if (timeout && !hdev_is_powered(hdev)) {
8690 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691 				      MGMT_STATUS_REJECTED);
8692 		goto unlock;
8693 	}
8694 
8695 	if (adv_busy(hdev)) {
8696 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697 				      MGMT_STATUS_BUSY);
8698 		goto unlock;
8699 	}
8700 
8701 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8702 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8703 			       cp->scan_rsp_len, false)) {
8704 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8705 				      MGMT_STATUS_INVALID_PARAMS);
8706 		goto unlock;
8707 	}
8708 
8709 	prev_instance_cnt = hdev->adv_instance_cnt;
8710 
8711 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8712 				   cp->adv_data_len, cp->data,
8713 				   cp->scan_rsp_len,
8714 				   cp->data + cp->adv_data_len,
8715 				   timeout, duration,
8716 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8717 				   hdev->le_adv_min_interval,
8718 				   hdev->le_adv_max_interval, 0);
8719 	if (IS_ERR(adv)) {
8720 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8721 				      MGMT_STATUS_FAILED);
8722 		goto unlock;
8723 	}
8724 
8725 	/* Only trigger an advertising added event if a new instance was
8726 	 * actually added.
8727 	 */
8728 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8729 		mgmt_advertising_added(sk, hdev, cp->instance);
8730 
8731 	if (hdev->cur_adv_instance == cp->instance) {
8732 		/* If the currently advertised instance is being changed then
8733 		 * cancel the current advertising and schedule the next
8734 		 * instance. If there is only one instance then the overridden
8735 		 * advertising data will be visible right away.
8736 		 */
8737 		cancel_adv_timeout(hdev);
8738 
8739 		next_instance = hci_get_next_instance(hdev, cp->instance);
8740 		if (next_instance)
8741 			schedule_instance = next_instance->instance;
8742 	} else if (!hdev->adv_instance_timeout) {
8743 		/* Immediately advertise the new instance if no other
8744 		 * instance is currently being advertised.
8745 		 */
8746 		schedule_instance = cp->instance;
8747 	}
8748 
8749 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8750 	 * there is no instance to be advertised then we have no HCI
8751 	 * communication to make. Simply return.
8752 	 */
8753 	if (!hdev_is_powered(hdev) ||
8754 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8755 	    !schedule_instance) {
8756 		rp.instance = cp->instance;
8757 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8758 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8759 		goto unlock;
8760 	}
8761 
8762 	/* We're good to go, update advertising data, parameters, and start
8763 	 * advertising.
8764 	 */
8765 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8766 			       data_len);
8767 	if (!cmd) {
8768 		err = -ENOMEM;
8769 		goto unlock;
8770 	}
8771 
8772 	cp->instance = schedule_instance;
8773 
8774 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8775 				 add_advertising_complete);
8776 	if (err < 0)
8777 		mgmt_pending_free(cmd);
8778 
8779 unlock:
8780 	hci_dev_unlock(hdev);
8781 
8782 	return err;
8783 }
8784 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8785 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8786 					int err)
8787 {
8788 	struct mgmt_pending_cmd *cmd = data;
8789 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8790 	struct mgmt_rp_add_ext_adv_params rp;
8791 	struct adv_info *adv;
8792 	u32 flags;
8793 
8794 	BT_DBG("%s", hdev->name);
8795 
8796 	hci_dev_lock(hdev);
8797 
8798 	adv = hci_find_adv_instance(hdev, cp->instance);
8799 	if (!adv)
8800 		goto unlock;
8801 
8802 	rp.instance = cp->instance;
8803 	rp.tx_power = adv->tx_power;
8804 
8805 	/* While we're at it, inform userspace of the available space for this
8806 	 * advertisement, given the flags that will be used.
8807 	 */
8808 	flags = __le32_to_cpu(cp->flags);
8809 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8810 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8811 
8812 	if (err) {
8813 		/* If this advertisement was previously advertising and we
8814 		 * failed to update it, we signal that it has been removed and
8815 		 * delete its structure
8816 		 */
8817 		if (!adv->pending)
8818 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8819 
8820 		hci_remove_adv_instance(hdev, cp->instance);
8821 
8822 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8823 				mgmt_status(err));
8824 	} else {
8825 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8826 				  mgmt_status(err), &rp, sizeof(rp));
8827 	}
8828 
8829 unlock:
8830 	if (cmd)
8831 		mgmt_pending_free(cmd);
8832 
8833 	hci_dev_unlock(hdev);
8834 }
8835 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8836 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8837 {
8838 	struct mgmt_pending_cmd *cmd = data;
8839 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8840 
8841 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8842 }
8843 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8844 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8845 			      void *data, u16 data_len)
8846 {
8847 	struct mgmt_cp_add_ext_adv_params *cp = data;
8848 	struct mgmt_rp_add_ext_adv_params rp;
8849 	struct mgmt_pending_cmd *cmd = NULL;
8850 	struct adv_info *adv;
8851 	u32 flags, min_interval, max_interval;
8852 	u16 timeout, duration;
8853 	u8 status;
8854 	s8 tx_power;
8855 	int err;
8856 
8857 	BT_DBG("%s", hdev->name);
8858 
8859 	status = mgmt_le_support(hdev);
8860 	if (status)
8861 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 				       status);
8863 
8864 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8866 				       MGMT_STATUS_INVALID_PARAMS);
8867 
8868 	/* The purpose of breaking add_advertising into two separate MGMT calls
8869 	 * for params and data is to allow more parameters to be added to this
8870 	 * structure in the future. For this reason, we verify that we have the
8871 	 * bare minimum structure we know of when the interface was defined. Any
8872 	 * extra parameters we don't know about will be ignored in this request.
8873 	 */
8874 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8875 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876 				       MGMT_STATUS_INVALID_PARAMS);
8877 
8878 	flags = __le32_to_cpu(cp->flags);
8879 
8880 	if (!requested_adv_flags_are_valid(hdev, flags))
8881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 				       MGMT_STATUS_INVALID_PARAMS);
8883 
8884 	hci_dev_lock(hdev);
8885 
8886 	/* In new interface, we require that we are powered to register */
8887 	if (!hdev_is_powered(hdev)) {
8888 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889 				      MGMT_STATUS_REJECTED);
8890 		goto unlock;
8891 	}
8892 
8893 	if (adv_busy(hdev)) {
8894 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8895 				      MGMT_STATUS_BUSY);
8896 		goto unlock;
8897 	}
8898 
8899 	/* Parse defined parameters from request, use defaults otherwise */
8900 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8901 		  __le16_to_cpu(cp->timeout) : 0;
8902 
8903 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8904 		   __le16_to_cpu(cp->duration) :
8905 		   hdev->def_multi_adv_rotation_duration;
8906 
8907 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908 		       __le32_to_cpu(cp->min_interval) :
8909 		       hdev->le_adv_min_interval;
8910 
8911 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8912 		       __le32_to_cpu(cp->max_interval) :
8913 		       hdev->le_adv_max_interval;
8914 
8915 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8916 		   cp->tx_power :
8917 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8918 
8919 	/* Create advertising instance with no advertising or response data */
8920 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8921 				   timeout, duration, tx_power, min_interval,
8922 				   max_interval, 0);
8923 
8924 	if (IS_ERR(adv)) {
8925 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8926 				      MGMT_STATUS_FAILED);
8927 		goto unlock;
8928 	}
8929 
8930 	/* Submit request for advertising params if ext adv available */
8931 	if (ext_adv_capable(hdev)) {
8932 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8933 				       data, data_len);
8934 		if (!cmd) {
8935 			err = -ENOMEM;
8936 			hci_remove_adv_instance(hdev, cp->instance);
8937 			goto unlock;
8938 		}
8939 
8940 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8941 					 add_ext_adv_params_complete);
8942 		if (err < 0)
8943 			mgmt_pending_free(cmd);
8944 	} else {
8945 		rp.instance = cp->instance;
8946 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8947 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8948 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8949 		err = mgmt_cmd_complete(sk, hdev->id,
8950 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8951 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8952 	}
8953 
8954 unlock:
8955 	hci_dev_unlock(hdev);
8956 
8957 	return err;
8958 }
8959 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8960 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8961 {
8962 	struct mgmt_pending_cmd *cmd = data;
8963 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8964 	struct mgmt_rp_add_advertising rp;
8965 
8966 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8967 
8968 	memset(&rp, 0, sizeof(rp));
8969 
8970 	rp.instance = cp->instance;
8971 
8972 	if (err)
8973 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8974 				mgmt_status(err));
8975 	else
8976 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8977 				  mgmt_status(err), &rp, sizeof(rp));
8978 
8979 	mgmt_pending_free(cmd);
8980 }
8981 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8982 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8983 {
8984 	struct mgmt_pending_cmd *cmd = data;
8985 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8986 	int err;
8987 
8988 	if (ext_adv_capable(hdev)) {
8989 		err = hci_update_adv_data_sync(hdev, cp->instance);
8990 		if (err)
8991 			return err;
8992 
8993 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8994 		if (err)
8995 			return err;
8996 
8997 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8998 	}
8999 
9000 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9001 }
9002 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9003 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9004 			    u16 data_len)
9005 {
9006 	struct mgmt_cp_add_ext_adv_data *cp = data;
9007 	struct mgmt_rp_add_ext_adv_data rp;
9008 	u8 schedule_instance = 0;
9009 	struct adv_info *next_instance;
9010 	struct adv_info *adv_instance;
9011 	int err = 0;
9012 	struct mgmt_pending_cmd *cmd;
9013 
9014 	BT_DBG("%s", hdev->name);
9015 
9016 	hci_dev_lock(hdev);
9017 
9018 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9019 
9020 	if (!adv_instance) {
9021 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022 				      MGMT_STATUS_INVALID_PARAMS);
9023 		goto unlock;
9024 	}
9025 
9026 	/* In new interface, we require that we are powered to register */
9027 	if (!hdev_is_powered(hdev)) {
9028 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029 				      MGMT_STATUS_REJECTED);
9030 		goto clear_new_instance;
9031 	}
9032 
9033 	if (adv_busy(hdev)) {
9034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9035 				      MGMT_STATUS_BUSY);
9036 		goto clear_new_instance;
9037 	}
9038 
9039 	/* Validate new data */
9040 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9041 			       cp->adv_data_len, true) ||
9042 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9043 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9044 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045 				      MGMT_STATUS_INVALID_PARAMS);
9046 		goto clear_new_instance;
9047 	}
9048 
9049 	/* Set the data in the advertising instance */
9050 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9051 				  cp->data, cp->scan_rsp_len,
9052 				  cp->data + cp->adv_data_len);
9053 
9054 	/* If using software rotation, determine next instance to use */
9055 	if (hdev->cur_adv_instance == cp->instance) {
9056 		/* If the currently advertised instance is being changed
9057 		 * then cancel the current advertising and schedule the
9058 		 * next instance. If there is only one instance then the
9059 		 * overridden advertising data will be visible right
9060 		 * away
9061 		 */
9062 		cancel_adv_timeout(hdev);
9063 
9064 		next_instance = hci_get_next_instance(hdev, cp->instance);
9065 		if (next_instance)
9066 			schedule_instance = next_instance->instance;
9067 	} else if (!hdev->adv_instance_timeout) {
9068 		/* Immediately advertise the new instance if no other
9069 		 * instance is currently being advertised.
9070 		 */
9071 		schedule_instance = cp->instance;
9072 	}
9073 
9074 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9075 	 * be advertised then we have no HCI communication to make.
9076 	 * Simply return.
9077 	 */
9078 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9079 		if (adv_instance->pending) {
9080 			mgmt_advertising_added(sk, hdev, cp->instance);
9081 			adv_instance->pending = false;
9082 		}
9083 		rp.instance = cp->instance;
9084 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9085 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9086 		goto unlock;
9087 	}
9088 
9089 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9090 			       data_len);
9091 	if (!cmd) {
9092 		err = -ENOMEM;
9093 		goto clear_new_instance;
9094 	}
9095 
9096 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9097 				 add_ext_adv_data_complete);
9098 	if (err < 0) {
9099 		mgmt_pending_free(cmd);
9100 		goto clear_new_instance;
9101 	}
9102 
9103 	/* We were successful in updating data, so trigger advertising_added
9104 	 * event if this is an instance that wasn't previously advertising. If
9105 	 * a failure occurs in the requests we initiated, we will remove the
9106 	 * instance again in add_advertising_complete
9107 	 */
9108 	if (adv_instance->pending)
9109 		mgmt_advertising_added(sk, hdev, cp->instance);
9110 
9111 	goto unlock;
9112 
9113 clear_new_instance:
9114 	hci_remove_adv_instance(hdev, cp->instance);
9115 
9116 unlock:
9117 	hci_dev_unlock(hdev);
9118 
9119 	return err;
9120 }
9121 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9122 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9123 					int err)
9124 {
9125 	struct mgmt_pending_cmd *cmd = data;
9126 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9127 	struct mgmt_rp_remove_advertising rp;
9128 
9129 	bt_dev_dbg(hdev, "err %d", err);
9130 
9131 	memset(&rp, 0, sizeof(rp));
9132 	rp.instance = cp->instance;
9133 
9134 	if (err)
9135 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9136 				mgmt_status(err));
9137 	else
9138 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9139 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9140 
9141 	mgmt_pending_free(cmd);
9142 }
9143 
remove_advertising_sync(struct hci_dev * hdev,void * data)9144 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9145 {
9146 	struct mgmt_pending_cmd *cmd = data;
9147 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9148 	int err;
9149 
9150 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9151 	if (err)
9152 		return err;
9153 
9154 	if (list_empty(&hdev->adv_instances))
9155 		err = hci_disable_advertising_sync(hdev);
9156 
9157 	return err;
9158 }
9159 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9160 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9161 			      void *data, u16 data_len)
9162 {
9163 	struct mgmt_cp_remove_advertising *cp = data;
9164 	struct mgmt_pending_cmd *cmd;
9165 	int err;
9166 
9167 	bt_dev_dbg(hdev, "sock %p", sk);
9168 
9169 	hci_dev_lock(hdev);
9170 
9171 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9172 		err = mgmt_cmd_status(sk, hdev->id,
9173 				      MGMT_OP_REMOVE_ADVERTISING,
9174 				      MGMT_STATUS_INVALID_PARAMS);
9175 		goto unlock;
9176 	}
9177 
9178 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9179 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9180 				      MGMT_STATUS_BUSY);
9181 		goto unlock;
9182 	}
9183 
9184 	if (list_empty(&hdev->adv_instances)) {
9185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9186 				      MGMT_STATUS_INVALID_PARAMS);
9187 		goto unlock;
9188 	}
9189 
9190 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9191 			       data_len);
9192 	if (!cmd) {
9193 		err = -ENOMEM;
9194 		goto unlock;
9195 	}
9196 
9197 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9198 				 remove_advertising_complete);
9199 	if (err < 0)
9200 		mgmt_pending_free(cmd);
9201 
9202 unlock:
9203 	hci_dev_unlock(hdev);
9204 
9205 	return err;
9206 }
9207 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9208 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9209 			     void *data, u16 data_len)
9210 {
9211 	struct mgmt_cp_get_adv_size_info *cp = data;
9212 	struct mgmt_rp_get_adv_size_info rp;
9213 	u32 flags, supported_flags;
9214 
9215 	bt_dev_dbg(hdev, "sock %p", sk);
9216 
9217 	if (!lmp_le_capable(hdev))
9218 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 				       MGMT_STATUS_REJECTED);
9220 
9221 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9222 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223 				       MGMT_STATUS_INVALID_PARAMS);
9224 
9225 	flags = __le32_to_cpu(cp->flags);
9226 
9227 	/* The current implementation only supports a subset of the specified
9228 	 * flags.
9229 	 */
9230 	supported_flags = get_supported_adv_flags(hdev);
9231 	if (flags & ~supported_flags)
9232 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 				       MGMT_STATUS_INVALID_PARAMS);
9234 
9235 	rp.instance = cp->instance;
9236 	rp.flags = cp->flags;
9237 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9238 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9239 
9240 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9241 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9242 }
9243 
9244 static const struct hci_mgmt_handler mgmt_handlers[] = {
9245 	{ NULL }, /* 0x0000 (no command) */
9246 	{ read_version,            MGMT_READ_VERSION_SIZE,
9247 						HCI_MGMT_NO_HDEV |
9248 						HCI_MGMT_UNTRUSTED },
9249 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9250 						HCI_MGMT_NO_HDEV |
9251 						HCI_MGMT_UNTRUSTED },
9252 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9253 						HCI_MGMT_NO_HDEV |
9254 						HCI_MGMT_UNTRUSTED },
9255 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9256 						HCI_MGMT_UNTRUSTED },
9257 	{ set_powered,             MGMT_SETTING_SIZE },
9258 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9259 	{ set_connectable,         MGMT_SETTING_SIZE },
9260 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9261 	{ set_bondable,            MGMT_SETTING_SIZE },
9262 	{ set_link_security,       MGMT_SETTING_SIZE },
9263 	{ set_ssp,                 MGMT_SETTING_SIZE },
9264 	{ set_hs,                  MGMT_SETTING_SIZE },
9265 	{ set_le,                  MGMT_SETTING_SIZE },
9266 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9267 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9268 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9269 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9270 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9271 						HCI_MGMT_VAR_LEN },
9272 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9273 						HCI_MGMT_VAR_LEN },
9274 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9275 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9276 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9277 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9278 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9279 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9280 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9281 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9282 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9283 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9284 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9285 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9286 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9287 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9288 						HCI_MGMT_VAR_LEN },
9289 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9290 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9291 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9292 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9293 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9294 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9295 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9296 	{ set_advertising,         MGMT_SETTING_SIZE },
9297 	{ set_bredr,               MGMT_SETTING_SIZE },
9298 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9299 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9300 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9301 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9302 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9303 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9304 						HCI_MGMT_VAR_LEN },
9305 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9306 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9307 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9308 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9309 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9310 						HCI_MGMT_VAR_LEN },
9311 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9312 						HCI_MGMT_NO_HDEV |
9313 						HCI_MGMT_UNTRUSTED },
9314 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9315 						HCI_MGMT_UNCONFIGURED |
9316 						HCI_MGMT_UNTRUSTED },
9317 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9318 						HCI_MGMT_UNCONFIGURED },
9319 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9320 						HCI_MGMT_UNCONFIGURED },
9321 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9322 						HCI_MGMT_VAR_LEN },
9323 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9324 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9325 						HCI_MGMT_NO_HDEV |
9326 						HCI_MGMT_UNTRUSTED },
9327 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9328 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9329 						HCI_MGMT_VAR_LEN },
9330 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9331 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9332 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9333 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9334 						HCI_MGMT_UNTRUSTED },
9335 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9336 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9337 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9338 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9339 						HCI_MGMT_VAR_LEN },
9340 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9341 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9342 						HCI_MGMT_UNTRUSTED },
9343 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9344 						HCI_MGMT_UNTRUSTED |
9345 						HCI_MGMT_HDEV_OPTIONAL },
9346 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9347 						HCI_MGMT_VAR_LEN |
9348 						HCI_MGMT_HDEV_OPTIONAL },
9349 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9350 						HCI_MGMT_UNTRUSTED },
9351 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9352 						HCI_MGMT_VAR_LEN },
9353 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9354 						HCI_MGMT_UNTRUSTED },
9355 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9356 						HCI_MGMT_VAR_LEN },
9357 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9358 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9359 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9360 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9361 						HCI_MGMT_VAR_LEN },
9362 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9363 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9364 						HCI_MGMT_VAR_LEN },
9365 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9366 						HCI_MGMT_VAR_LEN },
9367 	{ add_adv_patterns_monitor_rssi,
9368 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9369 						HCI_MGMT_VAR_LEN },
9370 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9371 						HCI_MGMT_VAR_LEN },
9372 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9373 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9374 						HCI_MGMT_VAR_LEN },
9375 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9376 };
9377 
mgmt_index_added(struct hci_dev * hdev)9378 void mgmt_index_added(struct hci_dev *hdev)
9379 {
9380 	struct mgmt_ev_ext_index ev;
9381 
9382 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9383 		return;
9384 
9385 	switch (hdev->dev_type) {
9386 	case HCI_PRIMARY:
9387 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9388 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9389 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9390 			ev.type = 0x01;
9391 		} else {
9392 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9393 					 HCI_MGMT_INDEX_EVENTS);
9394 			ev.type = 0x00;
9395 		}
9396 		break;
9397 	case HCI_AMP:
9398 		ev.type = 0x02;
9399 		break;
9400 	default:
9401 		return;
9402 	}
9403 
9404 	ev.bus = hdev->bus;
9405 
9406 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9407 			 HCI_MGMT_EXT_INDEX_EVENTS);
9408 }
9409 
mgmt_index_removed(struct hci_dev * hdev)9410 void mgmt_index_removed(struct hci_dev *hdev)
9411 {
9412 	struct mgmt_ev_ext_index ev;
9413 	u8 status = MGMT_STATUS_INVALID_INDEX;
9414 
9415 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9416 		return;
9417 
9418 	switch (hdev->dev_type) {
9419 	case HCI_PRIMARY:
9420 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9421 
9422 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9423 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9424 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9425 			ev.type = 0x01;
9426 		} else {
9427 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9428 					 HCI_MGMT_INDEX_EVENTS);
9429 			ev.type = 0x00;
9430 		}
9431 		break;
9432 	case HCI_AMP:
9433 		ev.type = 0x02;
9434 		break;
9435 	default:
9436 		return;
9437 	}
9438 
9439 	ev.bus = hdev->bus;
9440 
9441 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9442 			 HCI_MGMT_EXT_INDEX_EVENTS);
9443 
9444 	/* Cancel any remaining timed work */
9445 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9446 		return;
9447 	cancel_delayed_work_sync(&hdev->discov_off);
9448 	cancel_delayed_work_sync(&hdev->service_cache);
9449 	cancel_delayed_work_sync(&hdev->rpa_expired);
9450 }
9451 
mgmt_power_on(struct hci_dev * hdev,int err)9452 void mgmt_power_on(struct hci_dev *hdev, int err)
9453 {
9454 	struct cmd_lookup match = { NULL, hdev };
9455 
9456 	bt_dev_dbg(hdev, "err %d", err);
9457 
9458 	hci_dev_lock(hdev);
9459 
9460 	if (!err) {
9461 		restart_le_actions(hdev);
9462 		hci_update_passive_scan(hdev);
9463 	}
9464 
9465 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9466 
9467 	new_settings(hdev, match.sk);
9468 
9469 	if (match.sk)
9470 		sock_put(match.sk);
9471 
9472 	hci_dev_unlock(hdev);
9473 }
9474 
__mgmt_power_off(struct hci_dev * hdev)9475 void __mgmt_power_off(struct hci_dev *hdev)
9476 {
9477 	struct cmd_lookup match = { NULL, hdev };
9478 	u8 status, zero_cod[] = { 0, 0, 0 };
9479 
9480 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9481 
9482 	/* If the power off is because of hdev unregistration let
9483 	 * use the appropriate INVALID_INDEX status. Otherwise use
9484 	 * NOT_POWERED. We cover both scenarios here since later in
9485 	 * mgmt_index_removed() any hci_conn callbacks will have already
9486 	 * been triggered, potentially causing misleading DISCONNECTED
9487 	 * status responses.
9488 	 */
9489 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9490 		status = MGMT_STATUS_INVALID_INDEX;
9491 	else
9492 		status = MGMT_STATUS_NOT_POWERED;
9493 
9494 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9495 
9496 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9497 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9498 				   zero_cod, sizeof(zero_cod),
9499 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9500 		ext_info_changed(hdev, NULL);
9501 	}
9502 
9503 	new_settings(hdev, match.sk);
9504 
9505 	if (match.sk)
9506 		sock_put(match.sk);
9507 }
9508 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9509 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9510 {
9511 	struct mgmt_pending_cmd *cmd;
9512 	u8 status;
9513 
9514 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9515 	if (!cmd)
9516 		return;
9517 
9518 	if (err == -ERFKILL)
9519 		status = MGMT_STATUS_RFKILLED;
9520 	else
9521 		status = MGMT_STATUS_FAILED;
9522 
9523 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9524 
9525 	mgmt_pending_remove(cmd);
9526 }
9527 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9528 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9529 		       bool persistent)
9530 {
9531 	struct mgmt_ev_new_link_key ev;
9532 
9533 	memset(&ev, 0, sizeof(ev));
9534 
9535 	ev.store_hint = persistent;
9536 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9538 	ev.key.type = key->type;
9539 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9540 	ev.key.pin_len = key->pin_len;
9541 
9542 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9543 }
9544 
mgmt_ltk_type(struct smp_ltk * ltk)9545 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9546 {
9547 	switch (ltk->type) {
9548 	case SMP_LTK:
9549 	case SMP_LTK_RESPONDER:
9550 		if (ltk->authenticated)
9551 			return MGMT_LTK_AUTHENTICATED;
9552 		return MGMT_LTK_UNAUTHENTICATED;
9553 	case SMP_LTK_P256:
9554 		if (ltk->authenticated)
9555 			return MGMT_LTK_P256_AUTH;
9556 		return MGMT_LTK_P256_UNAUTH;
9557 	case SMP_LTK_P256_DEBUG:
9558 		return MGMT_LTK_P256_DEBUG;
9559 	}
9560 
9561 	return MGMT_LTK_UNAUTHENTICATED;
9562 }
9563 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9564 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9565 {
9566 	struct mgmt_ev_new_long_term_key ev;
9567 
9568 	memset(&ev, 0, sizeof(ev));
9569 
9570 	/* Devices using resolvable or non-resolvable random addresses
9571 	 * without providing an identity resolving key don't require
9572 	 * to store long term keys. Their addresses will change the
9573 	 * next time around.
9574 	 *
9575 	 * Only when a remote device provides an identity address
9576 	 * make sure the long term key is stored. If the remote
9577 	 * identity is known, the long term keys are internally
9578 	 * mapped to the identity address. So allow static random
9579 	 * and public addresses here.
9580 	 */
9581 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9583 		ev.store_hint = 0x00;
9584 	else
9585 		ev.store_hint = persistent;
9586 
9587 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9588 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9589 	ev.key.type = mgmt_ltk_type(key);
9590 	ev.key.enc_size = key->enc_size;
9591 	ev.key.ediv = key->ediv;
9592 	ev.key.rand = key->rand;
9593 
9594 	if (key->type == SMP_LTK)
9595 		ev.key.initiator = 1;
9596 
9597 	/* Make sure we copy only the significant bytes based on the
9598 	 * encryption key size, and set the rest of the value to zeroes.
9599 	 */
9600 	memcpy(ev.key.val, key->val, key->enc_size);
9601 	memset(ev.key.val + key->enc_size, 0,
9602 	       sizeof(ev.key.val) - key->enc_size);
9603 
9604 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9605 }
9606 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9607 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9608 {
9609 	struct mgmt_ev_new_irk ev;
9610 
9611 	memset(&ev, 0, sizeof(ev));
9612 
9613 	ev.store_hint = persistent;
9614 
9615 	bacpy(&ev.rpa, &irk->rpa);
9616 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9617 	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9618 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9619 
9620 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9621 }
9622 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9623 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9624 		   bool persistent)
9625 {
9626 	struct mgmt_ev_new_csrk ev;
9627 
9628 	memset(&ev, 0, sizeof(ev));
9629 
9630 	/* Devices using resolvable or non-resolvable random addresses
9631 	 * without providing an identity resolving key don't require
9632 	 * to store signature resolving keys. Their addresses will change
9633 	 * the next time around.
9634 	 *
9635 	 * Only when a remote device provides an identity address
9636 	 * make sure the signature resolving key is stored. So allow
9637 	 * static random and public addresses here.
9638 	 */
9639 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9640 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9641 		ev.store_hint = 0x00;
9642 	else
9643 		ev.store_hint = persistent;
9644 
9645 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9646 	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9647 	ev.key.type = csrk->type;
9648 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9649 
9650 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9651 }
9652 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9653 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9654 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9655 			 u16 max_interval, u16 latency, u16 timeout)
9656 {
9657 	struct mgmt_ev_new_conn_param ev;
9658 
9659 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9660 		return;
9661 
9662 	memset(&ev, 0, sizeof(ev));
9663 	bacpy(&ev.addr.bdaddr, bdaddr);
9664 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9665 	ev.store_hint = store_hint;
9666 	ev.min_interval = cpu_to_le16(min_interval);
9667 	ev.max_interval = cpu_to_le16(max_interval);
9668 	ev.latency = cpu_to_le16(latency);
9669 	ev.timeout = cpu_to_le16(timeout);
9670 
9671 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9672 }
9673 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9674 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9675 			   u8 *name, u8 name_len)
9676 {
9677 	struct sk_buff *skb;
9678 	struct mgmt_ev_device_connected *ev;
9679 	u16 eir_len = 0;
9680 	u32 flags = 0;
9681 
9682 	/* allocate buff for LE or BR/EDR adv */
9683 	if (conn->le_adv_data_len > 0)
9684 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685 				     sizeof(*ev) + conn->le_adv_data_len);
9686 	else
9687 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9688 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9689 				     eir_precalc_len(sizeof(conn->dev_class)));
9690 
9691 	ev = skb_put(skb, sizeof(*ev));
9692 	bacpy(&ev->addr.bdaddr, &conn->dst);
9693 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9694 
9695 	if (conn->out)
9696 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9697 
9698 	ev->flags = __cpu_to_le32(flags);
9699 
9700 	/* We must ensure that the EIR Data fields are ordered and
9701 	 * unique. Keep it simple for now and avoid the problem by not
9702 	 * adding any BR/EDR data to the LE adv.
9703 	 */
9704 	if (conn->le_adv_data_len > 0) {
9705 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9706 		eir_len = conn->le_adv_data_len;
9707 	} else {
9708 		if (name)
9709 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9710 
9711 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9712 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9713 						    conn->dev_class, sizeof(conn->dev_class));
9714 	}
9715 
9716 	ev->eir_len = cpu_to_le16(eir_len);
9717 
9718 	mgmt_event_skb(skb, NULL);
9719 }
9720 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9721 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9722 {
9723 	struct sock **sk = data;
9724 
9725 	cmd->cmd_complete(cmd, 0);
9726 
9727 	*sk = cmd->sk;
9728 	sock_hold(*sk);
9729 
9730 	mgmt_pending_remove(cmd);
9731 }
9732 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9733 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9734 {
9735 	struct hci_dev *hdev = data;
9736 	struct mgmt_cp_unpair_device *cp = cmd->param;
9737 
9738 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9739 
9740 	cmd->cmd_complete(cmd, 0);
9741 	mgmt_pending_remove(cmd);
9742 }
9743 
mgmt_powering_down(struct hci_dev * hdev)9744 bool mgmt_powering_down(struct hci_dev *hdev)
9745 {
9746 	struct mgmt_pending_cmd *cmd;
9747 	struct mgmt_mode *cp;
9748 
9749 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9750 	if (!cmd)
9751 		return false;
9752 
9753 	cp = cmd->param;
9754 	if (!cp->val)
9755 		return true;
9756 
9757 	return false;
9758 }
9759 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9760 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761 			      u8 link_type, u8 addr_type, u8 reason,
9762 			      bool mgmt_connected)
9763 {
9764 	struct mgmt_ev_device_disconnected ev;
9765 	struct sock *sk = NULL;
9766 
9767 	/* The connection is still in hci_conn_hash so test for 1
9768 	 * instead of 0 to know if this is the last one.
9769 	 */
9770 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9771 		cancel_delayed_work(&hdev->power_off);
9772 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9773 	}
9774 
9775 	if (!mgmt_connected)
9776 		return;
9777 
9778 	if (link_type != ACL_LINK && link_type != LE_LINK)
9779 		return;
9780 
9781 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9782 
9783 	bacpy(&ev.addr.bdaddr, bdaddr);
9784 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9785 	ev.reason = reason;
9786 
9787 	/* Report disconnects due to suspend */
9788 	if (hdev->suspended)
9789 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9790 
9791 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9792 
9793 	if (sk)
9794 		sock_put(sk);
9795 
9796 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9797 			     hdev);
9798 }
9799 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9800 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801 			    u8 link_type, u8 addr_type, u8 status)
9802 {
9803 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9804 	struct mgmt_cp_disconnect *cp;
9805 	struct mgmt_pending_cmd *cmd;
9806 
9807 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9808 			     hdev);
9809 
9810 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9811 	if (!cmd)
9812 		return;
9813 
9814 	cp = cmd->param;
9815 
9816 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9817 		return;
9818 
9819 	if (cp->addr.type != bdaddr_type)
9820 		return;
9821 
9822 	cmd->cmd_complete(cmd, mgmt_status(status));
9823 	mgmt_pending_remove(cmd);
9824 }
9825 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9826 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9827 			 u8 addr_type, u8 status)
9828 {
9829 	struct mgmt_ev_connect_failed ev;
9830 
9831 	/* The connection is still in hci_conn_hash so test for 1
9832 	 * instead of 0 to know if this is the last one.
9833 	 */
9834 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9835 		cancel_delayed_work(&hdev->power_off);
9836 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9837 	}
9838 
9839 	bacpy(&ev.addr.bdaddr, bdaddr);
9840 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9841 	ev.status = mgmt_status(status);
9842 
9843 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9844 }
9845 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9846 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9847 {
9848 	struct mgmt_ev_pin_code_request ev;
9849 
9850 	bacpy(&ev.addr.bdaddr, bdaddr);
9851 	ev.addr.type = BDADDR_BREDR;
9852 	ev.secure = secure;
9853 
9854 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9855 }
9856 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9857 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9858 				  u8 status)
9859 {
9860 	struct mgmt_pending_cmd *cmd;
9861 
9862 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9863 	if (!cmd)
9864 		return;
9865 
9866 	cmd->cmd_complete(cmd, mgmt_status(status));
9867 	mgmt_pending_remove(cmd);
9868 }
9869 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9870 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9871 				      u8 status)
9872 {
9873 	struct mgmt_pending_cmd *cmd;
9874 
9875 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9876 	if (!cmd)
9877 		return;
9878 
9879 	cmd->cmd_complete(cmd, mgmt_status(status));
9880 	mgmt_pending_remove(cmd);
9881 }
9882 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9883 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 			      u8 link_type, u8 addr_type, u32 value,
9885 			      u8 confirm_hint)
9886 {
9887 	struct mgmt_ev_user_confirm_request ev;
9888 
9889 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9890 
9891 	bacpy(&ev.addr.bdaddr, bdaddr);
9892 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9893 	ev.confirm_hint = confirm_hint;
9894 	ev.value = cpu_to_le32(value);
9895 
9896 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9897 			  NULL);
9898 }
9899 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9900 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 			      u8 link_type, u8 addr_type)
9902 {
9903 	struct mgmt_ev_user_passkey_request ev;
9904 
9905 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9906 
9907 	bacpy(&ev.addr.bdaddr, bdaddr);
9908 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9909 
9910 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9911 			  NULL);
9912 }
9913 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9914 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 				      u8 link_type, u8 addr_type, u8 status,
9916 				      u8 opcode)
9917 {
9918 	struct mgmt_pending_cmd *cmd;
9919 
9920 	cmd = pending_find(opcode, hdev);
9921 	if (!cmd)
9922 		return -ENOENT;
9923 
9924 	cmd->cmd_complete(cmd, mgmt_status(status));
9925 	mgmt_pending_remove(cmd);
9926 
9927 	return 0;
9928 }
9929 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9930 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931 				     u8 link_type, u8 addr_type, u8 status)
9932 {
9933 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9934 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9935 }
9936 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9937 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938 					 u8 link_type, u8 addr_type, u8 status)
9939 {
9940 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9941 					  status,
9942 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9943 }
9944 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9945 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9946 				     u8 link_type, u8 addr_type, u8 status)
9947 {
9948 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9949 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9950 }
9951 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9952 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 					 u8 link_type, u8 addr_type, u8 status)
9954 {
9955 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9956 					  status,
9957 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9958 }
9959 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9960 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961 			     u8 link_type, u8 addr_type, u32 passkey,
9962 			     u8 entered)
9963 {
9964 	struct mgmt_ev_passkey_notify ev;
9965 
9966 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9967 
9968 	bacpy(&ev.addr.bdaddr, bdaddr);
9969 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9970 	ev.passkey = __cpu_to_le32(passkey);
9971 	ev.entered = entered;
9972 
9973 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9974 }
9975 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9976 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9977 {
9978 	struct mgmt_ev_auth_failed ev;
9979 	struct mgmt_pending_cmd *cmd;
9980 	u8 status = mgmt_status(hci_status);
9981 
9982 	bacpy(&ev.addr.bdaddr, &conn->dst);
9983 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9984 	ev.status = status;
9985 
9986 	cmd = find_pairing(conn);
9987 
9988 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9989 		    cmd ? cmd->sk : NULL);
9990 
9991 	if (cmd) {
9992 		cmd->cmd_complete(cmd, status);
9993 		mgmt_pending_remove(cmd);
9994 	}
9995 }
9996 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9997 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9998 {
9999 	struct cmd_lookup match = { NULL, hdev };
10000 	bool changed;
10001 
10002 	if (status) {
10003 		u8 mgmt_err = mgmt_status(status);
10004 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10005 				     cmd_status_rsp, &mgmt_err);
10006 		return;
10007 	}
10008 
10009 	if (test_bit(HCI_AUTH, &hdev->flags))
10010 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10011 	else
10012 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10013 
10014 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10015 			     &match);
10016 
10017 	if (changed)
10018 		new_settings(hdev, match.sk);
10019 
10020 	if (match.sk)
10021 		sock_put(match.sk);
10022 }
10023 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10024 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10025 {
10026 	struct cmd_lookup *match = data;
10027 
10028 	if (match->sk == NULL) {
10029 		match->sk = cmd->sk;
10030 		sock_hold(match->sk);
10031 	}
10032 }
10033 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10034 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10035 				    u8 status)
10036 {
10037 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10038 
10039 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10040 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10041 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10042 
10043 	if (!status) {
10044 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10045 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10046 		ext_info_changed(hdev, NULL);
10047 	}
10048 
10049 	if (match.sk)
10050 		sock_put(match.sk);
10051 }
10052 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10053 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10054 {
10055 	struct mgmt_cp_set_local_name ev;
10056 	struct mgmt_pending_cmd *cmd;
10057 
10058 	if (status)
10059 		return;
10060 
10061 	memset(&ev, 0, sizeof(ev));
10062 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10063 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10064 
10065 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10066 	if (!cmd) {
10067 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10068 
10069 		/* If this is a HCI command related to powering on the
10070 		 * HCI dev don't send any mgmt signals.
10071 		 */
10072 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10073 			return;
10074 	}
10075 
10076 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10077 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10078 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10079 }
10080 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10081 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10082 {
10083 	int i;
10084 
10085 	for (i = 0; i < uuid_count; i++) {
10086 		if (!memcmp(uuid, uuids[i], 16))
10087 			return true;
10088 	}
10089 
10090 	return false;
10091 }
10092 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10093 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10094 {
10095 	u16 parsed = 0;
10096 
10097 	while (parsed < eir_len) {
10098 		u8 field_len = eir[0];
10099 		u8 uuid[16];
10100 		int i;
10101 
10102 		if (field_len == 0)
10103 			break;
10104 
10105 		if (eir_len - parsed < field_len + 1)
10106 			break;
10107 
10108 		switch (eir[1]) {
10109 		case EIR_UUID16_ALL:
10110 		case EIR_UUID16_SOME:
10111 			for (i = 0; i + 3 <= field_len; i += 2) {
10112 				memcpy(uuid, bluetooth_base_uuid, 16);
10113 				uuid[13] = eir[i + 3];
10114 				uuid[12] = eir[i + 2];
10115 				if (has_uuid(uuid, uuid_count, uuids))
10116 					return true;
10117 			}
10118 			break;
10119 		case EIR_UUID32_ALL:
10120 		case EIR_UUID32_SOME:
10121 			for (i = 0; i + 5 <= field_len; i += 4) {
10122 				memcpy(uuid, bluetooth_base_uuid, 16);
10123 				uuid[15] = eir[i + 5];
10124 				uuid[14] = eir[i + 4];
10125 				uuid[13] = eir[i + 3];
10126 				uuid[12] = eir[i + 2];
10127 				if (has_uuid(uuid, uuid_count, uuids))
10128 					return true;
10129 			}
10130 			break;
10131 		case EIR_UUID128_ALL:
10132 		case EIR_UUID128_SOME:
10133 			for (i = 0; i + 17 <= field_len; i += 16) {
10134 				memcpy(uuid, eir + i + 2, 16);
10135 				if (has_uuid(uuid, uuid_count, uuids))
10136 					return true;
10137 			}
10138 			break;
10139 		}
10140 
10141 		parsed += field_len + 1;
10142 		eir += field_len + 1;
10143 	}
10144 
10145 	return false;
10146 }
10147 
restart_le_scan(struct hci_dev * hdev)10148 static void restart_le_scan(struct hci_dev *hdev)
10149 {
10150 	/* If controller is not scanning we are done. */
10151 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10152 		return;
10153 
10154 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10155 		       hdev->discovery.scan_start +
10156 		       hdev->discovery.scan_duration))
10157 		return;
10158 
10159 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10160 			   DISCOV_LE_RESTART_DELAY);
10161 }
10162 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10163 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10164 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10165 {
10166 	/* If a RSSI threshold has been specified, and
10167 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10168 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10169 	 * is set, let it through for further processing, as we might need to
10170 	 * restart the scan.
10171 	 *
10172 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10173 	 * the results are also dropped.
10174 	 */
10175 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10176 	    (rssi == HCI_RSSI_INVALID ||
10177 	    (rssi < hdev->discovery.rssi &&
10178 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10179 		return  false;
10180 
10181 	if (hdev->discovery.uuid_count != 0) {
10182 		/* If a list of UUIDs is provided in filter, results with no
10183 		 * matching UUID should be dropped.
10184 		 */
10185 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10186 				   hdev->discovery.uuids) &&
10187 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10188 				   hdev->discovery.uuid_count,
10189 				   hdev->discovery.uuids))
10190 			return false;
10191 	}
10192 
10193 	/* If duplicate filtering does not report RSSI changes, then restart
10194 	 * scanning to ensure updated result with updated RSSI values.
10195 	 */
10196 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10197 		restart_le_scan(hdev);
10198 
10199 		/* Validate RSSI value against the RSSI threshold once more. */
10200 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10201 		    rssi < hdev->discovery.rssi)
10202 			return false;
10203 	}
10204 
10205 	return true;
10206 }
10207 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10208 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10209 				  bdaddr_t *bdaddr, u8 addr_type)
10210 {
10211 	struct mgmt_ev_adv_monitor_device_lost ev;
10212 
10213 	ev.monitor_handle = cpu_to_le16(handle);
10214 	bacpy(&ev.addr.bdaddr, bdaddr);
10215 	ev.addr.type = addr_type;
10216 
10217 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10218 		   NULL);
10219 }
10220 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10221 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10222 					       struct sk_buff *skb,
10223 					       struct sock *skip_sk,
10224 					       u16 handle)
10225 {
10226 	struct sk_buff *advmon_skb;
10227 	size_t advmon_skb_len;
10228 	__le16 *monitor_handle;
10229 
10230 	if (!skb)
10231 		return;
10232 
10233 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10234 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10235 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10236 				    advmon_skb_len);
10237 	if (!advmon_skb)
10238 		return;
10239 
10240 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10241 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10242 	 * store monitor_handle of the matched monitor.
10243 	 */
10244 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10245 	*monitor_handle = cpu_to_le16(handle);
10246 	skb_put_data(advmon_skb, skb->data, skb->len);
10247 
10248 	mgmt_event_skb(advmon_skb, skip_sk);
10249 }
10250 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10251 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10252 					  bdaddr_t *bdaddr, bool report_device,
10253 					  struct sk_buff *skb,
10254 					  struct sock *skip_sk)
10255 {
10256 	struct monitored_device *dev, *tmp;
10257 	bool matched = false;
10258 	bool notified = false;
10259 
10260 	/* We have received the Advertisement Report because:
10261 	 * 1. the kernel has initiated active discovery
10262 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10263 	 *    passive scanning
10264 	 * 3. if none of the above is true, we have one or more active
10265 	 *    Advertisement Monitor
10266 	 *
10267 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10268 	 * and report ONLY one advertisement per device for the matched Monitor
10269 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10270 	 *
10271 	 * For case 3, since we are not active scanning and all advertisements
10272 	 * received are due to a matched Advertisement Monitor, report all
10273 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10274 	 */
10275 	if (report_device && !hdev->advmon_pend_notify) {
10276 		mgmt_event_skb(skb, skip_sk);
10277 		return;
10278 	}
10279 
10280 	hdev->advmon_pend_notify = false;
10281 
10282 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10283 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10284 			matched = true;
10285 
10286 			if (!dev->notified) {
10287 				mgmt_send_adv_monitor_device_found(hdev, skb,
10288 								   skip_sk,
10289 								   dev->handle);
10290 				notified = true;
10291 				dev->notified = true;
10292 			}
10293 		}
10294 
10295 		if (!dev->notified)
10296 			hdev->advmon_pend_notify = true;
10297 	}
10298 
10299 	if (!report_device &&
10300 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10301 		/* Handle 0 indicates that we are not active scanning and this
10302 		 * is a subsequent advertisement report for an already matched
10303 		 * Advertisement Monitor or the controller offloading support
10304 		 * is not available.
10305 		 */
10306 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10307 	}
10308 
10309 	if (report_device)
10310 		mgmt_event_skb(skb, skip_sk);
10311 	else
10312 		kfree_skb(skb);
10313 }
10314 
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10315 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10316 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10317 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10318 			      u64 instant)
10319 {
10320 	struct sk_buff *skb;
10321 	struct mgmt_ev_mesh_device_found *ev;
10322 	int i, j;
10323 
10324 	if (!hdev->mesh_ad_types[0])
10325 		goto accepted;
10326 
10327 	/* Scan for requested AD types */
10328 	if (eir_len > 0) {
10329 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10330 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10331 				if (!hdev->mesh_ad_types[j])
10332 					break;
10333 
10334 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10335 					goto accepted;
10336 			}
10337 		}
10338 	}
10339 
10340 	if (scan_rsp_len > 0) {
10341 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10342 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10343 				if (!hdev->mesh_ad_types[j])
10344 					break;
10345 
10346 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10347 					goto accepted;
10348 			}
10349 		}
10350 	}
10351 
10352 	return;
10353 
10354 accepted:
10355 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10356 			     sizeof(*ev) + eir_len + scan_rsp_len);
10357 	if (!skb)
10358 		return;
10359 
10360 	ev = skb_put(skb, sizeof(*ev));
10361 
10362 	bacpy(&ev->addr.bdaddr, bdaddr);
10363 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10364 	ev->rssi = rssi;
10365 	ev->flags = cpu_to_le32(flags);
10366 	ev->instant = cpu_to_le64(instant);
10367 
10368 	if (eir_len > 0)
10369 		/* Copy EIR or advertising data into event */
10370 		skb_put_data(skb, eir, eir_len);
10371 
10372 	if (scan_rsp_len > 0)
10373 		/* Append scan response data to event */
10374 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10375 
10376 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10377 
10378 	mgmt_event_skb(skb, NULL);
10379 }
10380 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10381 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10382 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10383 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10384 		       u64 instant)
10385 {
10386 	struct sk_buff *skb;
10387 	struct mgmt_ev_device_found *ev;
10388 	bool report_device = hci_discovery_active(hdev);
10389 
10390 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10391 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10392 				  eir, eir_len, scan_rsp, scan_rsp_len,
10393 				  instant);
10394 
10395 	/* Don't send events for a non-kernel initiated discovery. With
10396 	 * LE one exception is if we have pend_le_reports > 0 in which
10397 	 * case we're doing passive scanning and want these events.
10398 	 */
10399 	if (!hci_discovery_active(hdev)) {
10400 		if (link_type == ACL_LINK)
10401 			return;
10402 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10403 			report_device = true;
10404 		else if (!hci_is_adv_monitoring(hdev))
10405 			return;
10406 	}
10407 
10408 	if (hdev->discovery.result_filtering) {
10409 		/* We are using service discovery */
10410 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10411 				     scan_rsp_len))
10412 			return;
10413 	}
10414 
10415 	if (hdev->discovery.limited) {
10416 		/* Check for limited discoverable bit */
10417 		if (dev_class) {
10418 			if (!(dev_class[1] & 0x20))
10419 				return;
10420 		} else {
10421 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10422 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10423 				return;
10424 		}
10425 	}
10426 
10427 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10428 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10429 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10430 	if (!skb)
10431 		return;
10432 
10433 	ev = skb_put(skb, sizeof(*ev));
10434 
10435 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10436 	 * RSSI value was reported as 0 when not available. This behavior
10437 	 * is kept when using device discovery. This is required for full
10438 	 * backwards compatibility with the API.
10439 	 *
10440 	 * However when using service discovery, the value 127 will be
10441 	 * returned when the RSSI is not available.
10442 	 */
10443 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10444 	    link_type == ACL_LINK)
10445 		rssi = 0;
10446 
10447 	bacpy(&ev->addr.bdaddr, bdaddr);
10448 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10449 	ev->rssi = rssi;
10450 	ev->flags = cpu_to_le32(flags);
10451 
10452 	if (eir_len > 0)
10453 		/* Copy EIR or advertising data into event */
10454 		skb_put_data(skb, eir, eir_len);
10455 
10456 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10457 		u8 eir_cod[5];
10458 
10459 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10460 					   dev_class, 3);
10461 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10462 	}
10463 
10464 	if (scan_rsp_len > 0)
10465 		/* Append scan response data to event */
10466 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10467 
10468 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10469 
10470 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10471 }
10472 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10473 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10474 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10475 {
10476 	struct sk_buff *skb;
10477 	struct mgmt_ev_device_found *ev;
10478 	u16 eir_len = 0;
10479 	u32 flags = 0;
10480 
10481 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10482 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10483 
10484 	ev = skb_put(skb, sizeof(*ev));
10485 	bacpy(&ev->addr.bdaddr, bdaddr);
10486 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10487 	ev->rssi = rssi;
10488 
10489 	if (name)
10490 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10491 	else
10492 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10493 
10494 	ev->eir_len = cpu_to_le16(eir_len);
10495 	ev->flags = cpu_to_le32(flags);
10496 
10497 	mgmt_event_skb(skb, NULL);
10498 }
10499 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10500 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10501 {
10502 	struct mgmt_ev_discovering ev;
10503 
10504 	bt_dev_dbg(hdev, "discovering %u", discovering);
10505 
10506 	memset(&ev, 0, sizeof(ev));
10507 	ev.type = hdev->discovery.type;
10508 	ev.discovering = discovering;
10509 
10510 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10511 }
10512 
mgmt_suspending(struct hci_dev * hdev,u8 state)10513 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10514 {
10515 	struct mgmt_ev_controller_suspend ev;
10516 
10517 	ev.suspend_state = state;
10518 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10519 }
10520 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10521 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10522 		   u8 addr_type)
10523 {
10524 	struct mgmt_ev_controller_resume ev;
10525 
10526 	ev.wake_reason = reason;
10527 	if (bdaddr) {
10528 		bacpy(&ev.addr.bdaddr, bdaddr);
10529 		ev.addr.type = addr_type;
10530 	} else {
10531 		memset(&ev.addr, 0, sizeof(ev.addr));
10532 	}
10533 
10534 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10535 }
10536 
10537 static struct hci_mgmt_chan chan = {
10538 	.channel	= HCI_CHANNEL_CONTROL,
10539 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10540 	.handlers	= mgmt_handlers,
10541 	.hdev_init	= mgmt_init_hdev,
10542 };
10543 
mgmt_init(void)10544 int mgmt_init(void)
10545 {
10546 	return hci_mgmt_chan_register(&chan);
10547 }
10548 
mgmt_exit(void)10549 void mgmt_exit(void)
10550 {
10551 	hci_mgmt_chan_unregister(&chan);
10552 }
10553 
mgmt_cleanup(struct sock * sk)10554 void mgmt_cleanup(struct sock *sk)
10555 {
10556 	struct mgmt_mesh_tx *mesh_tx;
10557 	struct hci_dev *hdev;
10558 
10559 	read_lock(&hci_dev_list_lock);
10560 
10561 	list_for_each_entry(hdev, &hci_dev_list, list) {
10562 		do {
10563 			mesh_tx = mgmt_mesh_next(hdev, sk);
10564 
10565 			if (mesh_tx)
10566 				mesh_send_complete(hdev, mesh_tx, true);
10567 		} while (mesh_tx);
10568 	}
10569 
10570 	read_unlock(&hci_dev_list_lock);
10571 }
10572