• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 
34 #define HCI_REQ_DONE	  0
35 #define HCI_REQ_PEND	  1
36 #define HCI_REQ_CANCELED  2
37 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40 	skb_queue_head_init(&req->cmd_q);
41 	req->hdev = hdev;
42 	req->err = 0;
43 }
44 
hci_req_purge(struct hci_request * req)45 void hci_req_purge(struct hci_request *req)
46 {
47 	skb_queue_purge(&req->cmd_q);
48 }
49 
hci_req_status_pend(struct hci_dev * hdev)50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52 	return hdev->req_status == HCI_REQ_PEND;
53 }
54 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 		   hci_req_complete_skb_t complete_skb)
57 {
58 	struct hci_dev *hdev = req->hdev;
59 	struct sk_buff *skb;
60 	unsigned long flags;
61 
62 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63 
64 	/* If an error occurred during request building, remove all HCI
65 	 * commands queued on the HCI request queue.
66 	 */
67 	if (req->err) {
68 		skb_queue_purge(&req->cmd_q);
69 		return req->err;
70 	}
71 
72 	/* Do not allow empty requests */
73 	if (skb_queue_empty(&req->cmd_q))
74 		return -ENODATA;
75 
76 	skb = skb_peek_tail(&req->cmd_q);
77 	if (complete) {
78 		bt_cb(skb)->hci.req_complete = complete;
79 	} else if (complete_skb) {
80 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82 	}
83 
84 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 
88 	queue_work(hdev->workqueue, &hdev->cmd_work);
89 
90 	return 0;
91 }
92 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95 	return req_run(req, complete, NULL);
96 }
97 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100 	return req_run(req, NULL, complete);
101 }
102 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104 				  struct sk_buff *skb)
105 {
106 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
107 
108 	if (hdev->req_status == HCI_REQ_PEND) {
109 		hdev->req_result = result;
110 		hdev->req_status = HCI_REQ_DONE;
111 		if (skb)
112 			hdev->req_skb = skb_get(skb);
113 		wake_up_interruptible(&hdev->req_wait_q);
114 	}
115 }
116 
hci_req_sync_cancel(struct hci_dev * hdev,int err)117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
120 
121 	if (hdev->req_status == HCI_REQ_PEND) {
122 		hdev->req_result = err;
123 		hdev->req_status = HCI_REQ_CANCELED;
124 		wake_up_interruptible(&hdev->req_wait_q);
125 	}
126 }
127 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 				  const void *param, u8 event, u32 timeout)
130 {
131 	struct hci_request req;
132 	struct sk_buff *skb;
133 	int err = 0;
134 
135 	bt_dev_dbg(hdev, "");
136 
137 	hci_req_init(&req, hdev);
138 
139 	hci_req_add_ev(&req, opcode, plen, param, event);
140 
141 	hdev->req_status = HCI_REQ_PEND;
142 
143 	err = hci_req_run_skb(&req, hci_req_sync_complete);
144 	if (err < 0)
145 		return ERR_PTR(err);
146 
147 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 			hdev->req_status != HCI_REQ_PEND, timeout);
149 
150 	if (err == -ERESTARTSYS)
151 		return ERR_PTR(-EINTR);
152 
153 	switch (hdev->req_status) {
154 	case HCI_REQ_DONE:
155 		err = -bt_to_errno(hdev->req_result);
156 		break;
157 
158 	case HCI_REQ_CANCELED:
159 		err = -hdev->req_result;
160 		break;
161 
162 	default:
163 		err = -ETIMEDOUT;
164 		break;
165 	}
166 
167 	hdev->req_status = hdev->req_result = 0;
168 	skb = hdev->req_skb;
169 	hdev->req_skb = NULL;
170 
171 	bt_dev_dbg(hdev, "end: err %d", err);
172 
173 	if (err < 0) {
174 		kfree_skb(skb);
175 		return ERR_PTR(err);
176 	}
177 
178 	if (!skb)
179 		return ERR_PTR(-ENODATA);
180 
181 	return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 			       const void *param, u32 timeout)
187 {
188 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191 
192 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 						     unsigned long opt),
195 		   unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197 	struct hci_request req;
198 	int err = 0;
199 
200 	bt_dev_dbg(hdev, "start");
201 
202 	hci_req_init(&req, hdev);
203 
204 	hdev->req_status = HCI_REQ_PEND;
205 
206 	err = func(&req, opt);
207 	if (err) {
208 		if (hci_status)
209 			*hci_status = HCI_ERROR_UNSPECIFIED;
210 		return err;
211 	}
212 
213 	err = hci_req_run_skb(&req, hci_req_sync_complete);
214 	if (err < 0) {
215 		hdev->req_status = 0;
216 
217 		/* ENODATA means the HCI request command queue is empty.
218 		 * This can happen when a request with conditionals doesn't
219 		 * trigger any commands to be sent. This is normal behavior
220 		 * and should not trigger an error return.
221 		 */
222 		if (err == -ENODATA) {
223 			if (hci_status)
224 				*hci_status = 0;
225 			return 0;
226 		}
227 
228 		if (hci_status)
229 			*hci_status = HCI_ERROR_UNSPECIFIED;
230 
231 		return err;
232 	}
233 
234 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 			hdev->req_status != HCI_REQ_PEND, timeout);
236 
237 	if (err == -ERESTARTSYS)
238 		return -EINTR;
239 
240 	switch (hdev->req_status) {
241 	case HCI_REQ_DONE:
242 		err = -bt_to_errno(hdev->req_result);
243 		if (hci_status)
244 			*hci_status = hdev->req_result;
245 		break;
246 
247 	case HCI_REQ_CANCELED:
248 		err = -hdev->req_result;
249 		if (hci_status)
250 			*hci_status = HCI_ERROR_UNSPECIFIED;
251 		break;
252 
253 	default:
254 		err = -ETIMEDOUT;
255 		if (hci_status)
256 			*hci_status = HCI_ERROR_UNSPECIFIED;
257 		break;
258 	}
259 
260 	kfree_skb(hdev->req_skb);
261 	hdev->req_skb = NULL;
262 	hdev->req_status = hdev->req_result = 0;
263 
264 	bt_dev_dbg(hdev, "end: err %d", err);
265 
266 	return err;
267 }
268 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 						  unsigned long opt),
271 		 unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273 	int ret;
274 
275 	/* Serialize all requests */
276 	hci_req_sync_lock(hdev);
277 	/* check the state after obtaing the lock to protect the HCI_UP
278 	 * against any races from hci_dev_do_close when the controller
279 	 * gets removed.
280 	 */
281 	if (test_bit(HCI_UP, &hdev->flags))
282 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283 	else
284 		ret = -ENETDOWN;
285 	hci_req_sync_unlock(hdev);
286 
287 	return ret;
288 }
289 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291 				const void *param)
292 {
293 	int len = HCI_COMMAND_HDR_SIZE + plen;
294 	struct hci_command_hdr *hdr;
295 	struct sk_buff *skb;
296 
297 	skb = bt_skb_alloc(len, GFP_ATOMIC);
298 	if (!skb)
299 		return NULL;
300 
301 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302 	hdr->opcode = cpu_to_le16(opcode);
303 	hdr->plen   = plen;
304 
305 	if (plen)
306 		skb_put_data(skb, param, plen);
307 
308 	bt_dev_dbg(hdev, "skb len %d", skb->len);
309 
310 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311 	hci_skb_opcode(skb) = opcode;
312 
313 	return skb;
314 }
315 
316 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318 		    const void *param, u8 event)
319 {
320 	struct hci_dev *hdev = req->hdev;
321 	struct sk_buff *skb;
322 
323 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
324 
325 	/* If an error occurred during request building, there is no point in
326 	 * queueing the HCI command. We can simply return.
327 	 */
328 	if (req->err)
329 		return;
330 
331 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
332 	if (!skb) {
333 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334 			   opcode);
335 		req->err = -ENOMEM;
336 		return;
337 	}
338 
339 	if (skb_queue_empty(&req->cmd_q))
340 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 
342 	bt_cb(skb)->hci.req_event = event;
343 
344 	skb_queue_tail(&req->cmd_q, skb);
345 }
346 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348 		 const void *param)
349 {
350 	hci_req_add_ev(req, opcode, plen, param, 0);
351 }
352 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 {
355 	struct hci_dev *hdev = req->hdev;
356 	struct hci_cp_write_page_scan_activity acp;
357 	u8 type;
358 
359 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
360 		return;
361 
362 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
363 		return;
364 
365 	if (enable) {
366 		type = PAGE_SCAN_TYPE_INTERLACED;
367 
368 		/* 160 msec page scan interval */
369 		acp.interval = cpu_to_le16(0x0100);
370 	} else {
371 		type = hdev->def_page_scan_type;
372 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
373 	}
374 
375 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
376 
377 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
379 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380 			    sizeof(acp), &acp);
381 
382 	if (hdev->page_scan_type != type)
383 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385 
start_interleave_scan(struct hci_dev * hdev)386 static void start_interleave_scan(struct hci_dev *hdev)
387 {
388 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389 	queue_delayed_work(hdev->req_workqueue,
390 			   &hdev->interleave_scan, 0);
391 }
392 
is_interleave_scanning(struct hci_dev * hdev)393 static bool is_interleave_scanning(struct hci_dev *hdev)
394 {
395 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
396 }
397 
cancel_interleave_scan(struct hci_dev * hdev)398 static void cancel_interleave_scan(struct hci_dev *hdev)
399 {
400 	bt_dev_dbg(hdev, "cancelling interleave scan");
401 
402 	cancel_delayed_work_sync(&hdev->interleave_scan);
403 
404 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
405 }
406 
407 /* Return true if interleave_scan wasn't started until exiting this function,
408  * otherwise, return false
409  */
__hci_update_interleaved_scan(struct hci_dev * hdev)410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
411 {
412 	/* Do interleaved scan only if all of the following are true:
413 	 * - There is at least one ADV monitor
414 	 * - At least one pending LE connection or one device to be scanned for
415 	 * - Monitor offloading is not supported
416 	 * If so, we should alternate between allowlist scan and one without
417 	 * any filters to save power.
418 	 */
419 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420 				!(list_empty(&hdev->pend_le_conns) &&
421 				  list_empty(&hdev->pend_le_reports)) &&
422 				hci_get_adv_monitor_offload_ext(hdev) ==
423 				    HCI_ADV_MONITOR_EXT_NONE;
424 	bool is_interleaving = is_interleave_scanning(hdev);
425 
426 	if (use_interleaving && !is_interleaving) {
427 		start_interleave_scan(hdev);
428 		bt_dev_dbg(hdev, "starting interleave scan");
429 		return true;
430 	}
431 
432 	if (!use_interleaving && is_interleaving)
433 		cancel_interleave_scan(hdev);
434 
435 	return false;
436 }
437 
438 /* This function controls the background scanning based on hdev->pend_le_conns
439  * list. If there are pending LE connection we start the background scanning,
440  * otherwise we stop it.
441  *
442  * This function requires the caller holds hdev->lock.
443  */
__hci_update_background_scan(struct hci_request * req)444 static void __hci_update_background_scan(struct hci_request *req)
445 {
446 	struct hci_dev *hdev = req->hdev;
447 
448 	if (!test_bit(HCI_UP, &hdev->flags) ||
449 	    test_bit(HCI_INIT, &hdev->flags) ||
450 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
451 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
452 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
454 		return;
455 
456 	/* No point in doing scanning if LE support hasn't been enabled */
457 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
458 		return;
459 
460 	/* If discovery is active don't interfere with it */
461 	if (hdev->discovery.state != DISCOVERY_STOPPED)
462 		return;
463 
464 	/* Reset RSSI and UUID filters when starting background scanning
465 	 * since these filters are meant for service discovery only.
466 	 *
467 	 * The Start Discovery and Start Service Discovery operations
468 	 * ensure to set proper values for RSSI threshold and UUID
469 	 * filter list. So it is safe to just reset them here.
470 	 */
471 	hci_discovery_filter_clear(hdev);
472 
473 	bt_dev_dbg(hdev, "ADV monitoring is %s",
474 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
475 
476 	if (list_empty(&hdev->pend_le_conns) &&
477 	    list_empty(&hdev->pend_le_reports) &&
478 	    !hci_is_adv_monitoring(hdev)) {
479 		/* If there is no pending LE connections or devices
480 		 * to be scanned for or no ADV monitors, we should stop the
481 		 * background scanning.
482 		 */
483 
484 		/* If controller is not scanning we are done. */
485 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
486 			return;
487 
488 		hci_req_add_le_scan_disable(req, false);
489 
490 		bt_dev_dbg(hdev, "stopping background scanning");
491 	} else {
492 		/* If there is at least one pending LE connection, we should
493 		 * keep the background scan running.
494 		 */
495 
496 		/* If controller is connecting, we should not start scanning
497 		 * since some controllers are not able to scan and connect at
498 		 * the same time.
499 		 */
500 		if (hci_lookup_le_connect(hdev))
501 			return;
502 
503 		/* If controller is currently scanning, we stop it to ensure we
504 		 * don't miss any advertising (due to duplicates filter).
505 		 */
506 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507 			hci_req_add_le_scan_disable(req, false);
508 
509 		hci_req_add_le_passive_scan(req);
510 		bt_dev_dbg(hdev, "starting background scanning");
511 	}
512 }
513 
__hci_req_update_name(struct hci_request * req)514 void __hci_req_update_name(struct hci_request *req)
515 {
516 	struct hci_dev *hdev = req->hdev;
517 	struct hci_cp_write_local_name cp;
518 
519 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
520 
521 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
522 }
523 
524 #define PNP_INFO_SVCLASS_ID		0x1200
525 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 	u8 *ptr = data, *uuids_start = NULL;
529 	struct bt_uuid *uuid;
530 
531 	if (len < 4)
532 		return ptr;
533 
534 	list_for_each_entry(uuid, &hdev->uuids, list) {
535 		u16 uuid16;
536 
537 		if (uuid->size != 16)
538 			continue;
539 
540 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
541 		if (uuid16 < 0x1100)
542 			continue;
543 
544 		if (uuid16 == PNP_INFO_SVCLASS_ID)
545 			continue;
546 
547 		if (!uuids_start) {
548 			uuids_start = ptr;
549 			uuids_start[0] = 1;
550 			uuids_start[1] = EIR_UUID16_ALL;
551 			ptr += 2;
552 		}
553 
554 		/* Stop if not enough space to put next UUID */
555 		if ((ptr - data) + sizeof(u16) > len) {
556 			uuids_start[1] = EIR_UUID16_SOME;
557 			break;
558 		}
559 
560 		*ptr++ = (uuid16 & 0x00ff);
561 		*ptr++ = (uuid16 & 0xff00) >> 8;
562 		uuids_start[0] += sizeof(uuid16);
563 	}
564 
565 	return ptr;
566 }
567 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
569 {
570 	u8 *ptr = data, *uuids_start = NULL;
571 	struct bt_uuid *uuid;
572 
573 	if (len < 6)
574 		return ptr;
575 
576 	list_for_each_entry(uuid, &hdev->uuids, list) {
577 		if (uuid->size != 32)
578 			continue;
579 
580 		if (!uuids_start) {
581 			uuids_start = ptr;
582 			uuids_start[0] = 1;
583 			uuids_start[1] = EIR_UUID32_ALL;
584 			ptr += 2;
585 		}
586 
587 		/* Stop if not enough space to put next UUID */
588 		if ((ptr - data) + sizeof(u32) > len) {
589 			uuids_start[1] = EIR_UUID32_SOME;
590 			break;
591 		}
592 
593 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
594 		ptr += sizeof(u32);
595 		uuids_start[0] += sizeof(u32);
596 	}
597 
598 	return ptr;
599 }
600 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
602 {
603 	u8 *ptr = data, *uuids_start = NULL;
604 	struct bt_uuid *uuid;
605 
606 	if (len < 18)
607 		return ptr;
608 
609 	list_for_each_entry(uuid, &hdev->uuids, list) {
610 		if (uuid->size != 128)
611 			continue;
612 
613 		if (!uuids_start) {
614 			uuids_start = ptr;
615 			uuids_start[0] = 1;
616 			uuids_start[1] = EIR_UUID128_ALL;
617 			ptr += 2;
618 		}
619 
620 		/* Stop if not enough space to put next UUID */
621 		if ((ptr - data) + 16 > len) {
622 			uuids_start[1] = EIR_UUID128_SOME;
623 			break;
624 		}
625 
626 		memcpy(ptr, uuid->uuid, 16);
627 		ptr += 16;
628 		uuids_start[0] += 16;
629 	}
630 
631 	return ptr;
632 }
633 
create_eir(struct hci_dev * hdev,u8 * data)634 static void create_eir(struct hci_dev *hdev, u8 *data)
635 {
636 	u8 *ptr = data;
637 	size_t name_len;
638 
639 	name_len = strlen(hdev->dev_name);
640 
641 	if (name_len > 0) {
642 		/* EIR Data type */
643 		if (name_len > 48) {
644 			name_len = 48;
645 			ptr[1] = EIR_NAME_SHORT;
646 		} else
647 			ptr[1] = EIR_NAME_COMPLETE;
648 
649 		/* EIR Data length */
650 		ptr[0] = name_len + 1;
651 
652 		memcpy(ptr + 2, hdev->dev_name, name_len);
653 
654 		ptr += (name_len + 2);
655 	}
656 
657 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
658 		ptr[0] = 2;
659 		ptr[1] = EIR_TX_POWER;
660 		ptr[2] = (u8) hdev->inq_tx_power;
661 
662 		ptr += 3;
663 	}
664 
665 	if (hdev->devid_source > 0) {
666 		ptr[0] = 9;
667 		ptr[1] = EIR_DEVICE_ID;
668 
669 		put_unaligned_le16(hdev->devid_source, ptr + 2);
670 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671 		put_unaligned_le16(hdev->devid_product, ptr + 6);
672 		put_unaligned_le16(hdev->devid_version, ptr + 8);
673 
674 		ptr += 10;
675 	}
676 
677 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
680 }
681 
__hci_req_update_eir(struct hci_request * req)682 void __hci_req_update_eir(struct hci_request *req)
683 {
684 	struct hci_dev *hdev = req->hdev;
685 	struct hci_cp_write_eir cp;
686 
687 	if (!hdev_is_powered(hdev))
688 		return;
689 
690 	if (!lmp_ext_inq_capable(hdev))
691 		return;
692 
693 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
694 		return;
695 
696 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
697 		return;
698 
699 	memset(&cp, 0, sizeof(cp));
700 
701 	create_eir(hdev, cp.data);
702 
703 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
704 		return;
705 
706 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
707 
708 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
709 }
710 
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
712 {
713 	struct hci_dev *hdev = req->hdev;
714 
715 	if (hdev->scanning_paused) {
716 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
717 		return;
718 	}
719 
720 	if (hdev->suspended)
721 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
722 
723 	if (use_ext_scan(hdev)) {
724 		struct hci_cp_le_set_ext_scan_enable cp;
725 
726 		memset(&cp, 0, sizeof(cp));
727 		cp.enable = LE_SCAN_DISABLE;
728 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
729 			    &cp);
730 	} else {
731 		struct hci_cp_le_set_scan_enable cp;
732 
733 		memset(&cp, 0, sizeof(cp));
734 		cp.enable = LE_SCAN_DISABLE;
735 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
736 	}
737 
738 	/* Disable address resolution */
739 	if (use_ll_privacy(hdev) &&
740 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
742 		__u8 enable = 0x00;
743 
744 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
745 	}
746 }
747 
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)748 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
749 				 u8 bdaddr_type)
750 {
751 	struct hci_cp_le_del_from_accept_list cp;
752 
753 	cp.bdaddr_type = bdaddr_type;
754 	bacpy(&cp.bdaddr, bdaddr);
755 
756 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
757 		   cp.bdaddr_type);
758 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
759 
760 	if (use_ll_privacy(req->hdev) &&
761 	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
762 		struct smp_irk *irk;
763 
764 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
765 		if (irk) {
766 			struct hci_cp_le_del_from_resolv_list cp;
767 
768 			cp.bdaddr_type = bdaddr_type;
769 			bacpy(&cp.bdaddr, bdaddr);
770 
771 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
772 				    sizeof(cp), &cp);
773 		}
774 	}
775 }
776 
777 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)778 static int add_to_accept_list(struct hci_request *req,
779 			      struct hci_conn_params *params, u8 *num_entries,
780 			      bool allow_rpa)
781 {
782 	struct hci_cp_le_add_to_accept_list cp;
783 	struct hci_dev *hdev = req->hdev;
784 
785 	/* Already in accept list */
786 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
787 				   params->addr_type))
788 		return 0;
789 
790 	/* Select filter policy to accept all advertising */
791 	if (*num_entries >= hdev->le_accept_list_size)
792 		return -1;
793 
794 	/* Accept list can not be used with RPAs */
795 	if (!allow_rpa &&
796 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
798 		return -1;
799 	}
800 
801 	/* During suspend, only wakeable devices can be in accept list */
802 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803 						   params->current_flags))
804 		return 0;
805 
806 	*num_entries += 1;
807 	cp.bdaddr_type = params->addr_type;
808 	bacpy(&cp.bdaddr, &params->addr);
809 
810 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
811 		   cp.bdaddr_type);
812 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
813 
814 	if (use_ll_privacy(hdev) &&
815 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
816 		struct smp_irk *irk;
817 
818 		irk = hci_find_irk_by_addr(hdev, &params->addr,
819 					   params->addr_type);
820 		if (irk) {
821 			struct hci_cp_le_add_to_resolv_list cp;
822 
823 			cp.bdaddr_type = params->addr_type;
824 			bacpy(&cp.bdaddr, &params->addr);
825 			memcpy(cp.peer_irk, irk->val, 16);
826 
827 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 				memcpy(cp.local_irk, hdev->irk, 16);
829 			else
830 				memset(cp.local_irk, 0, 16);
831 
832 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
833 				    sizeof(cp), &cp);
834 		}
835 	}
836 
837 	return 0;
838 }
839 
update_accept_list(struct hci_request * req)840 static u8 update_accept_list(struct hci_request *req)
841 {
842 	struct hci_dev *hdev = req->hdev;
843 	struct hci_conn_params *params;
844 	struct bdaddr_list *b;
845 	u8 num_entries = 0;
846 	bool pend_conn, pend_report;
847 	/* We allow usage of accept list even with RPAs in suspend. In the worst
848 	 * case, we won't be able to wake from devices that use the privacy1.2
849 	 * features. Additionally, once we support privacy1.2 and IRK
850 	 * offloading, we can update this to also check for those conditions.
851 	 */
852 	bool allow_rpa = hdev->suspended;
853 
854 	if (use_ll_privacy(hdev) &&
855 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
856 		allow_rpa = true;
857 
858 	/* Go through the current accept list programmed into the
859 	 * controller one by one and check if that address is still
860 	 * in the list of pending connections or list of devices to
861 	 * report. If not present in either list, then queue the
862 	 * command to remove it from the controller.
863 	 */
864 	list_for_each_entry(b, &hdev->le_accept_list, list) {
865 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
866 						      &b->bdaddr,
867 						      b->bdaddr_type);
868 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
869 							&b->bdaddr,
870 							b->bdaddr_type);
871 
872 		/* If the device is not likely to connect or report,
873 		 * remove it from the accept list.
874 		 */
875 		if (!pend_conn && !pend_report) {
876 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
877 			continue;
878 		}
879 
880 		/* Accept list can not be used with RPAs */
881 		if (!allow_rpa &&
882 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
883 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
884 			return 0x00;
885 		}
886 
887 		num_entries++;
888 	}
889 
890 	/* Since all no longer valid accept list entries have been
891 	 * removed, walk through the list of pending connections
892 	 * and ensure that any new device gets programmed into
893 	 * the controller.
894 	 *
895 	 * If the list of the devices is larger than the list of
896 	 * available accept list entries in the controller, then
897 	 * just abort and return filer policy value to not use the
898 	 * accept list.
899 	 */
900 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
901 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
902 			return 0x00;
903 	}
904 
905 	/* After adding all new pending connections, walk through
906 	 * the list of pending reports and also add these to the
907 	 * accept list if there is still space. Abort if space runs out.
908 	 */
909 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
910 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
911 			return 0x00;
912 	}
913 
914 	/* Use the allowlist unless the following conditions are all true:
915 	 * - We are not currently suspending
916 	 * - There are 1 or more ADV monitors registered and it's not offloaded
917 	 * - Interleaved scanning is not currently using the allowlist
918 	 */
919 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
920 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
921 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
922 		return 0x00;
923 
924 	/* Select filter policy to use accept list */
925 	return 0x01;
926 }
927 
scan_use_rpa(struct hci_dev * hdev)928 static bool scan_use_rpa(struct hci_dev *hdev)
929 {
930 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
931 }
932 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool filter_dup,bool addr_resolv)933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
934 			       u16 window, u8 own_addr_type, u8 filter_policy,
935 			       bool filter_dup, bool addr_resolv)
936 {
937 	struct hci_dev *hdev = req->hdev;
938 
939 	if (hdev->scanning_paused) {
940 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
941 		return;
942 	}
943 
944 	if (use_ll_privacy(hdev) &&
945 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
946 	    addr_resolv) {
947 		u8 enable = 0x01;
948 
949 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
950 	}
951 
952 	/* Use ext scanning if set ext scan param and ext scan enable is
953 	 * supported
954 	 */
955 	if (use_ext_scan(hdev)) {
956 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
957 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
958 		struct hci_cp_le_scan_phy_params *phy_params;
959 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
960 		u32 plen;
961 
962 		ext_param_cp = (void *)data;
963 		phy_params = (void *)ext_param_cp->data;
964 
965 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
966 		ext_param_cp->own_addr_type = own_addr_type;
967 		ext_param_cp->filter_policy = filter_policy;
968 
969 		plen = sizeof(*ext_param_cp);
970 
971 		if (scan_1m(hdev) || scan_2m(hdev)) {
972 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
973 
974 			memset(phy_params, 0, sizeof(*phy_params));
975 			phy_params->type = type;
976 			phy_params->interval = cpu_to_le16(interval);
977 			phy_params->window = cpu_to_le16(window);
978 
979 			plen += sizeof(*phy_params);
980 			phy_params++;
981 		}
982 
983 		if (scan_coded(hdev)) {
984 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
985 
986 			memset(phy_params, 0, sizeof(*phy_params));
987 			phy_params->type = type;
988 			phy_params->interval = cpu_to_le16(interval);
989 			phy_params->window = cpu_to_le16(window);
990 
991 			plen += sizeof(*phy_params);
992 			phy_params++;
993 		}
994 
995 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
996 			    plen, ext_param_cp);
997 
998 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
999 		ext_enable_cp.enable = LE_SCAN_ENABLE;
1000 		ext_enable_cp.filter_dup = filter_dup;
1001 
1002 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003 			    sizeof(ext_enable_cp), &ext_enable_cp);
1004 	} else {
1005 		struct hci_cp_le_set_scan_param param_cp;
1006 		struct hci_cp_le_set_scan_enable enable_cp;
1007 
1008 		memset(&param_cp, 0, sizeof(param_cp));
1009 		param_cp.type = type;
1010 		param_cp.interval = cpu_to_le16(interval);
1011 		param_cp.window = cpu_to_le16(window);
1012 		param_cp.own_address_type = own_addr_type;
1013 		param_cp.filter_policy = filter_policy;
1014 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015 			    &param_cp);
1016 
1017 		memset(&enable_cp, 0, sizeof(enable_cp));
1018 		enable_cp.enable = LE_SCAN_ENABLE;
1019 		enable_cp.filter_dup = filter_dup;
1020 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021 			    &enable_cp);
1022 	}
1023 }
1024 
1025 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)1026 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027 {
1028 	struct hci_conn_hash *h = &hdev->conn_hash;
1029 	struct hci_conn  *c;
1030 
1031 	rcu_read_lock();
1032 
1033 	list_for_each_entry_rcu(c, &h->list, list) {
1034 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036 			rcu_read_unlock();
1037 			return true;
1038 		}
1039 	}
1040 
1041 	rcu_read_unlock();
1042 
1043 	return false;
1044 }
1045 
1046 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047  * controller based address resolution to be able to reconfigure
1048  * resolving list.
1049  */
hci_req_add_le_passive_scan(struct hci_request * req)1050 void hci_req_add_le_passive_scan(struct hci_request *req)
1051 {
1052 	struct hci_dev *hdev = req->hdev;
1053 	u8 own_addr_type;
1054 	u8 filter_policy;
1055 	u16 window, interval;
1056 	/* Default is to enable duplicates filter */
1057 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058 	/* Background scanning should run with address resolution */
1059 	bool addr_resolv = true;
1060 
1061 	if (hdev->scanning_paused) {
1062 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1063 		return;
1064 	}
1065 
1066 	/* Set require_privacy to false since no SCAN_REQ are send
1067 	 * during passive scanning. Not using an non-resolvable address
1068 	 * here is important so that peer devices using direct
1069 	 * advertising with our address will be correctly reported
1070 	 * by the controller.
1071 	 */
1072 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1073 				      &own_addr_type))
1074 		return;
1075 
1076 	if (hdev->enable_advmon_interleave_scan &&
1077 	    __hci_update_interleaved_scan(hdev))
1078 		return;
1079 
1080 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081 	/* Adding or removing entries from the accept list must
1082 	 * happen before enabling scanning. The controller does
1083 	 * not allow accept list modification while scanning.
1084 	 */
1085 	filter_policy = update_accept_list(req);
1086 
1087 	/* When the controller is using random resolvable addresses and
1088 	 * with that having LE privacy enabled, then controllers with
1089 	 * Extended Scanner Filter Policies support can now enable support
1090 	 * for handling directed advertising.
1091 	 *
1092 	 * So instead of using filter polices 0x00 (no accept list)
1093 	 * and 0x01 (accept list enabled) use the new filter policies
1094 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
1095 	 */
1096 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098 		filter_policy |= 0x02;
1099 
1100 	if (hdev->suspended) {
1101 		window = hdev->le_scan_window_suspend;
1102 		interval = hdev->le_scan_int_suspend;
1103 
1104 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105 	} else if (hci_is_le_conn_scanning(hdev)) {
1106 		window = hdev->le_scan_window_connect;
1107 		interval = hdev->le_scan_int_connect;
1108 	} else if (hci_is_adv_monitoring(hdev)) {
1109 		window = hdev->le_scan_window_adv_monitor;
1110 		interval = hdev->le_scan_int_adv_monitor;
1111 
1112 		/* Disable duplicates filter when scanning for advertisement
1113 		 * monitor for the following reasons.
1114 		 *
1115 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116 		 * controllers ignore RSSI_Sampling_Period when the duplicates
1117 		 * filter is enabled.
1118 		 *
1119 		 * For SW pattern filtering, when we're not doing interleaved
1120 		 * scanning, it is necessary to disable duplicates filter,
1121 		 * otherwise hosts can only receive one advertisement and it's
1122 		 * impossible to know if a peer is still in range.
1123 		 */
1124 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1125 	} else {
1126 		window = hdev->le_scan_window;
1127 		interval = hdev->le_scan_interval;
1128 	}
1129 
1130 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1131 		   filter_policy);
1132 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133 			   own_addr_type, filter_policy, filter_dup,
1134 			   addr_resolv);
1135 }
1136 
adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1137 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1138 {
1139 	struct adv_info *adv_instance;
1140 
1141 	/* Instance 0x00 always set local name */
1142 	if (instance == 0x00)
1143 		return true;
1144 
1145 	adv_instance = hci_find_adv_instance(hdev, instance);
1146 	if (!adv_instance)
1147 		return false;
1148 
1149 	if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150 	    adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151 		return true;
1152 
1153 	return adv_instance->scan_rsp_len ? true : false;
1154 }
1155 
hci_req_clear_event_filter(struct hci_request * req)1156 static void hci_req_clear_event_filter(struct hci_request *req)
1157 {
1158 	struct hci_cp_set_event_filter f;
1159 
1160 	if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1161 		return;
1162 
1163 	if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164 		memset(&f, 0, sizeof(f));
1165 		f.flt_type = HCI_FLT_CLEAR_ALL;
1166 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1167 	}
1168 }
1169 
hci_req_set_event_filter(struct hci_request * req)1170 static void hci_req_set_event_filter(struct hci_request *req)
1171 {
1172 	struct bdaddr_list_with_flags *b;
1173 	struct hci_cp_set_event_filter f;
1174 	struct hci_dev *hdev = req->hdev;
1175 	u8 scan = SCAN_DISABLED;
1176 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1177 
1178 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 		return;
1180 
1181 	/* Always clear event filter when starting */
1182 	hci_req_clear_event_filter(req);
1183 
1184 	list_for_each_entry(b, &hdev->accept_list, list) {
1185 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1186 					b->current_flags))
1187 			continue;
1188 
1189 		memset(&f, 0, sizeof(f));
1190 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191 		f.flt_type = HCI_FLT_CONN_SETUP;
1192 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1194 
1195 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1197 		scan = SCAN_PAGE;
1198 	}
1199 
1200 	if (scan && !scanning) {
1201 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203 	} else if (!scan && scanning) {
1204 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1206 	}
1207 }
1208 
cancel_adv_timeout(struct hci_dev * hdev)1209 static void cancel_adv_timeout(struct hci_dev *hdev)
1210 {
1211 	if (hdev->adv_instance_timeout) {
1212 		hdev->adv_instance_timeout = 0;
1213 		cancel_delayed_work(&hdev->adv_instance_expire);
1214 	}
1215 }
1216 
1217 /* This function requires the caller holds hdev->lock */
__hci_req_pause_adv_instances(struct hci_request * req)1218 void __hci_req_pause_adv_instances(struct hci_request *req)
1219 {
1220 	bt_dev_dbg(req->hdev, "Pausing advertising instances");
1221 
1222 	/* Call to disable any advertisements active on the controller.
1223 	 * This will succeed even if no advertisements are configured.
1224 	 */
1225 	__hci_req_disable_advertising(req);
1226 
1227 	/* If we are using software rotation, pause the loop */
1228 	if (!ext_adv_capable(req->hdev))
1229 		cancel_adv_timeout(req->hdev);
1230 }
1231 
1232 /* This function requires the caller holds hdev->lock */
__hci_req_resume_adv_instances(struct hci_request * req)1233 static void __hci_req_resume_adv_instances(struct hci_request *req)
1234 {
1235 	struct adv_info *adv;
1236 
1237 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1238 
1239 	if (ext_adv_capable(req->hdev)) {
1240 		/* Call for each tracked instance to be re-enabled */
1241 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242 			__hci_req_enable_ext_advertising(req,
1243 							 adv->instance);
1244 		}
1245 
1246 	} else {
1247 		/* Schedule for most recent instance to be restarted and begin
1248 		 * the software rotation loop
1249 		 */
1250 		__hci_req_schedule_adv_instance(req,
1251 						req->hdev->cur_adv_instance,
1252 						true);
1253 	}
1254 }
1255 
1256 /* This function requires the caller holds hdev->lock */
hci_req_resume_adv_instances(struct hci_dev * hdev)1257 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1258 {
1259 	struct hci_request req;
1260 
1261 	hci_req_init(&req, hdev);
1262 	__hci_req_resume_adv_instances(&req);
1263 
1264 	return hci_req_run(&req, NULL);
1265 }
1266 
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1267 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1268 {
1269 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1270 		   status);
1271 	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272 	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273 		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274 		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275 		wake_up(&hdev->suspend_wait_q);
1276 	}
1277 
1278 	if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279 		clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280 		wake_up(&hdev->suspend_wait_q);
1281 	}
1282 }
1283 
hci_req_add_set_adv_filter_enable(struct hci_request * req,bool enable)1284 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1285 					      bool enable)
1286 {
1287 	struct hci_dev *hdev = req->hdev;
1288 
1289 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290 	case HCI_ADV_MONITOR_EXT_MSFT:
1291 		msft_req_add_set_filter_enable(req, enable);
1292 		break;
1293 	default:
1294 		return;
1295 	}
1296 
1297 	/* No need to block when enabling since it's on resume path */
1298 	if (hdev->suspended && !enable)
1299 		set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1300 }
1301 
1302 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1303 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1304 {
1305 	int old_state;
1306 	struct hci_conn *conn;
1307 	struct hci_request req;
1308 	u8 page_scan;
1309 	int disconnect_counter;
1310 
1311 	if (next == hdev->suspend_state) {
1312 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1313 		goto done;
1314 	}
1315 
1316 	hdev->suspend_state = next;
1317 	hci_req_init(&req, hdev);
1318 
1319 	if (next == BT_SUSPEND_DISCONNECT) {
1320 		/* Mark device as suspended */
1321 		hdev->suspended = true;
1322 
1323 		/* Pause discovery if not already stopped */
1324 		old_state = hdev->discovery.state;
1325 		if (old_state != DISCOVERY_STOPPED) {
1326 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1329 		}
1330 
1331 		hdev->discovery_paused = true;
1332 		hdev->discovery_old_state = old_state;
1333 
1334 		/* Stop directed advertising */
1335 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1336 		if (old_state) {
1337 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338 			cancel_delayed_work(&hdev->discov_off);
1339 			queue_delayed_work(hdev->req_workqueue,
1340 					   &hdev->discov_off, 0);
1341 		}
1342 
1343 		/* Pause other advertisements */
1344 		if (hdev->adv_instance_cnt)
1345 			__hci_req_pause_adv_instances(&req);
1346 
1347 		hdev->advertising_paused = true;
1348 		hdev->advertising_old_state = old_state;
1349 
1350 		/* Disable page scan if enabled */
1351 		if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352 			page_scan = SCAN_DISABLED;
1353 			hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1354 				    &page_scan);
1355 			set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1356 		}
1357 
1358 		/* Disable LE passive scan if enabled */
1359 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360 			cancel_interleave_scan(hdev);
1361 			hci_req_add_le_scan_disable(&req, false);
1362 		}
1363 
1364 		/* Disable advertisement filters */
1365 		hci_req_add_set_adv_filter_enable(&req, false);
1366 
1367 		/* Prevent disconnects from causing scanning to be re-enabled */
1368 		hdev->scanning_paused = true;
1369 
1370 		/* Run commands before disconnecting */
1371 		hci_req_run(&req, suspend_req_complete);
1372 
1373 		disconnect_counter = 0;
1374 		/* Soft disconnect everything (power off) */
1375 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377 			disconnect_counter++;
1378 		}
1379 
1380 		if (disconnect_counter > 0) {
1381 			bt_dev_dbg(hdev,
1382 				   "Had %d disconnects. Will wait on them",
1383 				   disconnect_counter);
1384 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1385 		}
1386 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387 		/* Unpause to take care of updating scanning params */
1388 		hdev->scanning_paused = false;
1389 		/* Enable event filter for paired devices */
1390 		hci_req_set_event_filter(&req);
1391 		/* Enable passive scan at lower duty cycle */
1392 		__hci_update_background_scan(&req);
1393 		/* Pause scan changes again. */
1394 		hdev->scanning_paused = true;
1395 		hci_req_run(&req, suspend_req_complete);
1396 	} else {
1397 		hdev->suspended = false;
1398 		hdev->scanning_paused = false;
1399 
1400 		/* Clear any event filters and restore scan state */
1401 		hci_req_clear_event_filter(&req);
1402 		__hci_req_update_scan(&req);
1403 
1404 		/* Reset passive/background scanning to normal */
1405 		__hci_update_background_scan(&req);
1406 		/* Enable all of the advertisement filters */
1407 		hci_req_add_set_adv_filter_enable(&req, true);
1408 
1409 		/* Unpause directed advertising */
1410 		hdev->advertising_paused = false;
1411 		if (hdev->advertising_old_state) {
1412 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413 				hdev->suspend_tasks);
1414 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415 			queue_work(hdev->req_workqueue,
1416 				   &hdev->discoverable_update);
1417 			hdev->advertising_old_state = 0;
1418 		}
1419 
1420 		/* Resume other advertisements */
1421 		if (hdev->adv_instance_cnt)
1422 			__hci_req_resume_adv_instances(&req);
1423 
1424 		/* Unpause discovery */
1425 		hdev->discovery_paused = false;
1426 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1431 		}
1432 
1433 		hci_req_run(&req, suspend_req_complete);
1434 	}
1435 
1436 	hdev->suspend_state = next;
1437 
1438 done:
1439 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440 	wake_up(&hdev->suspend_wait_q);
1441 }
1442 
adv_cur_instance_is_scannable(struct hci_dev * hdev)1443 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1444 {
1445 	return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1446 }
1447 
__hci_req_disable_advertising(struct hci_request * req)1448 void __hci_req_disable_advertising(struct hci_request *req)
1449 {
1450 	if (ext_adv_capable(req->hdev)) {
1451 		__hci_req_disable_ext_adv_instance(req, 0x00);
1452 
1453 	} else {
1454 		u8 enable = 0x00;
1455 
1456 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1457 	}
1458 }
1459 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1460 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1461 {
1462 	u32 flags;
1463 	struct adv_info *adv_instance;
1464 
1465 	if (instance == 0x00) {
1466 		/* Instance 0 always manages the "Tx Power" and "Flags"
1467 		 * fields
1468 		 */
1469 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1470 
1471 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472 		 * corresponds to the "connectable" instance flag.
1473 		 */
1474 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1476 
1477 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480 			flags |= MGMT_ADV_FLAG_DISCOV;
1481 
1482 		return flags;
1483 	}
1484 
1485 	adv_instance = hci_find_adv_instance(hdev, instance);
1486 
1487 	/* Return 0 when we got an invalid instance identifier. */
1488 	if (!adv_instance)
1489 		return 0;
1490 
1491 	return adv_instance->flags;
1492 }
1493 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1494 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1495 {
1496 	/* If privacy is not enabled don't use RPA */
1497 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1498 		return false;
1499 
1500 	/* If basic privacy mode is enabled use RPA */
1501 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1502 		return true;
1503 
1504 	/* If limited privacy mode is enabled don't use RPA if we're
1505 	 * both discoverable and bondable.
1506 	 */
1507 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1509 		return false;
1510 
1511 	/* We're neither bondable nor discoverable in the limited
1512 	 * privacy mode, therefore use RPA.
1513 	 */
1514 	return true;
1515 }
1516 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1517 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1518 {
1519 	/* If there is no connection we are OK to advertise. */
1520 	if (hci_conn_num(hdev, LE_LINK) == 0)
1521 		return true;
1522 
1523 	/* Check le_states if there is any connection in peripheral role. */
1524 	if (hdev->conn_hash.le_num_peripheral > 0) {
1525 		/* Peripheral connection state and non connectable mode bit 20.
1526 		 */
1527 		if (!connectable && !(hdev->le_states[2] & 0x10))
1528 			return false;
1529 
1530 		/* Peripheral connection state and connectable mode bit 38
1531 		 * and scannable bit 21.
1532 		 */
1533 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534 				    !(hdev->le_states[2] & 0x20)))
1535 			return false;
1536 	}
1537 
1538 	/* Check le_states if there is any connection in central role. */
1539 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540 		/* Central connection state and non connectable mode bit 18. */
1541 		if (!connectable && !(hdev->le_states[2] & 0x02))
1542 			return false;
1543 
1544 		/* Central connection state and connectable mode bit 35 and
1545 		 * scannable 19.
1546 		 */
1547 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548 				    !(hdev->le_states[2] & 0x08)))
1549 			return false;
1550 	}
1551 
1552 	return true;
1553 }
1554 
__hci_req_enable_advertising(struct hci_request * req)1555 void __hci_req_enable_advertising(struct hci_request *req)
1556 {
1557 	struct hci_dev *hdev = req->hdev;
1558 	struct adv_info *adv_instance;
1559 	struct hci_cp_le_set_adv_param cp;
1560 	u8 own_addr_type, enable = 0x01;
1561 	bool connectable;
1562 	u16 adv_min_interval, adv_max_interval;
1563 	u32 flags;
1564 
1565 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1567 
1568 	/* If the "connectable" instance flag was not set, then choose between
1569 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1570 	 */
1571 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572 		      mgmt_get_connectable(hdev);
1573 
1574 	if (!is_advertising_allowed(hdev, connectable))
1575 		return;
1576 
1577 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578 		__hci_req_disable_advertising(req);
1579 
1580 	/* Clear the HCI_LE_ADV bit temporarily so that the
1581 	 * hci_update_random_address knows that it's safe to go ahead
1582 	 * and write a new random address. The flag will be set back on
1583 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1584 	 */
1585 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1586 
1587 	/* Set require_privacy to true only when non-connectable
1588 	 * advertising is used. In that case it is fine to use a
1589 	 * non-resolvable private address.
1590 	 */
1591 	if (hci_update_random_address(req, !connectable,
1592 				      adv_use_rpa(hdev, flags),
1593 				      &own_addr_type) < 0)
1594 		return;
1595 
1596 	memset(&cp, 0, sizeof(cp));
1597 
1598 	if (adv_instance) {
1599 		adv_min_interval = adv_instance->min_interval;
1600 		adv_max_interval = adv_instance->max_interval;
1601 	} else {
1602 		adv_min_interval = hdev->le_adv_min_interval;
1603 		adv_max_interval = hdev->le_adv_max_interval;
1604 	}
1605 
1606 	if (connectable) {
1607 		cp.type = LE_ADV_IND;
1608 	} else {
1609 		if (adv_cur_instance_is_scannable(hdev))
1610 			cp.type = LE_ADV_SCAN_IND;
1611 		else
1612 			cp.type = LE_ADV_NONCONN_IND;
1613 
1614 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1618 		}
1619 	}
1620 
1621 	cp.min_interval = cpu_to_le16(adv_min_interval);
1622 	cp.max_interval = cpu_to_le16(adv_max_interval);
1623 	cp.own_address_type = own_addr_type;
1624 	cp.channel_map = hdev->le_adv_channel_map;
1625 
1626 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1627 
1628 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1629 }
1630 
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1631 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1632 {
1633 	size_t short_len;
1634 	size_t complete_len;
1635 
1636 	/* no space left for name (+ NULL + type + len) */
1637 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1638 		return ad_len;
1639 
1640 	/* use complete name if present and fits */
1641 	complete_len = strlen(hdev->dev_name);
1642 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1643 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1644 				       hdev->dev_name, complete_len + 1);
1645 
1646 	/* use short name if present */
1647 	short_len = strlen(hdev->short_name);
1648 	if (short_len)
1649 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1650 				       hdev->short_name, short_len + 1);
1651 
1652 	/* use shortened full name if present, we already know that name
1653 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1654 	 */
1655 	if (complete_len) {
1656 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1657 
1658 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1659 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1660 
1661 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1662 				       sizeof(name));
1663 	}
1664 
1665 	return ad_len;
1666 }
1667 
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1668 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1669 {
1670 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1671 }
1672 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1673 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1674 {
1675 	u8 scan_rsp_len = 0;
1676 
1677 	if (hdev->appearance)
1678 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1679 
1680 	return append_local_name(hdev, ptr, scan_rsp_len);
1681 }
1682 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1683 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1684 					u8 *ptr)
1685 {
1686 	struct adv_info *adv_instance;
1687 	u32 instance_flags;
1688 	u8 scan_rsp_len = 0;
1689 
1690 	adv_instance = hci_find_adv_instance(hdev, instance);
1691 	if (!adv_instance)
1692 		return 0;
1693 
1694 	instance_flags = adv_instance->flags;
1695 
1696 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1697 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1698 
1699 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1700 	       adv_instance->scan_rsp_len);
1701 
1702 	scan_rsp_len += adv_instance->scan_rsp_len;
1703 
1704 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1705 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1706 
1707 	return scan_rsp_len;
1708 }
1709 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1710 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1711 {
1712 	struct hci_dev *hdev = req->hdev;
1713 	u8 len;
1714 
1715 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1716 		return;
1717 
1718 	if (ext_adv_capable(hdev)) {
1719 		struct {
1720 			struct hci_cp_le_set_ext_scan_rsp_data cp;
1721 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1722 		} pdu;
1723 
1724 		memset(&pdu, 0, sizeof(pdu));
1725 
1726 		if (instance)
1727 			len = create_instance_scan_rsp_data(hdev, instance,
1728 							    pdu.data);
1729 		else
1730 			len = create_default_scan_rsp_data(hdev, pdu.data);
1731 
1732 		if (hdev->scan_rsp_data_len == len &&
1733 		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1734 			return;
1735 
1736 		memcpy(hdev->scan_rsp_data, pdu.data, len);
1737 		hdev->scan_rsp_data_len = len;
1738 
1739 		pdu.cp.handle = instance;
1740 		pdu.cp.length = len;
1741 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1742 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1743 
1744 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1745 			    sizeof(pdu.cp) + len, &pdu.cp);
1746 	} else {
1747 		struct hci_cp_le_set_scan_rsp_data cp;
1748 
1749 		memset(&cp, 0, sizeof(cp));
1750 
1751 		if (instance)
1752 			len = create_instance_scan_rsp_data(hdev, instance,
1753 							    cp.data);
1754 		else
1755 			len = create_default_scan_rsp_data(hdev, cp.data);
1756 
1757 		if (hdev->scan_rsp_data_len == len &&
1758 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1759 			return;
1760 
1761 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1762 		hdev->scan_rsp_data_len = len;
1763 
1764 		cp.length = len;
1765 
1766 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1767 	}
1768 }
1769 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1770 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1771 {
1772 	struct adv_info *adv_instance = NULL;
1773 	u8 ad_len = 0, flags = 0;
1774 	u32 instance_flags;
1775 
1776 	/* Return 0 when the current instance identifier is invalid. */
1777 	if (instance) {
1778 		adv_instance = hci_find_adv_instance(hdev, instance);
1779 		if (!adv_instance)
1780 			return 0;
1781 	}
1782 
1783 	instance_flags = get_adv_instance_flags(hdev, instance);
1784 
1785 	/* If instance already has the flags set skip adding it once
1786 	 * again.
1787 	 */
1788 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1789 					 adv_instance->adv_data_len, EIR_FLAGS,
1790 					 NULL))
1791 		goto skip_flags;
1792 
1793 	/* The Add Advertising command allows userspace to set both the general
1794 	 * and limited discoverable flags.
1795 	 */
1796 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1797 		flags |= LE_AD_GENERAL;
1798 
1799 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1800 		flags |= LE_AD_LIMITED;
1801 
1802 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1803 		flags |= LE_AD_NO_BREDR;
1804 
1805 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1806 		/* If a discovery flag wasn't provided, simply use the global
1807 		 * settings.
1808 		 */
1809 		if (!flags)
1810 			flags |= mgmt_get_adv_discov_flags(hdev);
1811 
1812 		/* If flags would still be empty, then there is no need to
1813 		 * include the "Flags" AD field".
1814 		 */
1815 		if (flags) {
1816 			ptr[0] = 0x02;
1817 			ptr[1] = EIR_FLAGS;
1818 			ptr[2] = flags;
1819 
1820 			ad_len += 3;
1821 			ptr += 3;
1822 		}
1823 	}
1824 
1825 skip_flags:
1826 	if (adv_instance) {
1827 		memcpy(ptr, adv_instance->adv_data,
1828 		       adv_instance->adv_data_len);
1829 		ad_len += adv_instance->adv_data_len;
1830 		ptr += adv_instance->adv_data_len;
1831 	}
1832 
1833 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1834 		s8 adv_tx_power;
1835 
1836 		if (ext_adv_capable(hdev)) {
1837 			if (adv_instance)
1838 				adv_tx_power = adv_instance->tx_power;
1839 			else
1840 				adv_tx_power = hdev->adv_tx_power;
1841 		} else {
1842 			adv_tx_power = hdev->adv_tx_power;
1843 		}
1844 
1845 		/* Provide Tx Power only if we can provide a valid value for it */
1846 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1847 			ptr[0] = 0x02;
1848 			ptr[1] = EIR_TX_POWER;
1849 			ptr[2] = (u8)adv_tx_power;
1850 
1851 			ad_len += 3;
1852 			ptr += 3;
1853 		}
1854 	}
1855 
1856 	return ad_len;
1857 }
1858 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1859 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1860 {
1861 	struct hci_dev *hdev = req->hdev;
1862 	u8 len;
1863 
1864 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1865 		return;
1866 
1867 	if (ext_adv_capable(hdev)) {
1868 		struct {
1869 			struct hci_cp_le_set_ext_adv_data cp;
1870 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1871 		} pdu;
1872 
1873 		memset(&pdu, 0, sizeof(pdu));
1874 
1875 		len = create_instance_adv_data(hdev, instance, pdu.data);
1876 
1877 		/* There's nothing to do if the data hasn't changed */
1878 		if (hdev->adv_data_len == len &&
1879 		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1880 			return;
1881 
1882 		memcpy(hdev->adv_data, pdu.data, len);
1883 		hdev->adv_data_len = len;
1884 
1885 		pdu.cp.length = len;
1886 		pdu.cp.handle = instance;
1887 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1888 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1889 
1890 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1891 			    sizeof(pdu.cp) + len, &pdu.cp);
1892 	} else {
1893 		struct hci_cp_le_set_adv_data cp;
1894 
1895 		memset(&cp, 0, sizeof(cp));
1896 
1897 		len = create_instance_adv_data(hdev, instance, cp.data);
1898 
1899 		/* There's nothing to do if the data hasn't changed */
1900 		if (hdev->adv_data_len == len &&
1901 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1902 			return;
1903 
1904 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1905 		hdev->adv_data_len = len;
1906 
1907 		cp.length = len;
1908 
1909 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1910 	}
1911 }
1912 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1913 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1914 {
1915 	struct hci_request req;
1916 
1917 	hci_req_init(&req, hdev);
1918 	__hci_req_update_adv_data(&req, instance);
1919 
1920 	return hci_req_run(&req, NULL);
1921 }
1922 
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1923 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1924 					    u16 opcode)
1925 {
1926 	BT_DBG("%s status %u", hdev->name, status);
1927 }
1928 
hci_req_disable_address_resolution(struct hci_dev * hdev)1929 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1930 {
1931 	struct hci_request req;
1932 	__u8 enable = 0x00;
1933 
1934 	if (!use_ll_privacy(hdev) &&
1935 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1936 		return;
1937 
1938 	hci_req_init(&req, hdev);
1939 
1940 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1941 
1942 	hci_req_run(&req, enable_addr_resolution_complete);
1943 }
1944 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1945 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1946 {
1947 	bt_dev_dbg(hdev, "status %u", status);
1948 }
1949 
hci_req_reenable_advertising(struct hci_dev * hdev)1950 void hci_req_reenable_advertising(struct hci_dev *hdev)
1951 {
1952 	struct hci_request req;
1953 
1954 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1955 	    list_empty(&hdev->adv_instances))
1956 		return;
1957 
1958 	hci_req_init(&req, hdev);
1959 
1960 	if (hdev->cur_adv_instance) {
1961 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1962 						true);
1963 	} else {
1964 		if (ext_adv_capable(hdev)) {
1965 			__hci_req_start_ext_adv(&req, 0x00);
1966 		} else {
1967 			__hci_req_update_adv_data(&req, 0x00);
1968 			__hci_req_update_scan_rsp_data(&req, 0x00);
1969 			__hci_req_enable_advertising(&req);
1970 		}
1971 	}
1972 
1973 	hci_req_run(&req, adv_enable_complete);
1974 }
1975 
adv_timeout_expire(struct work_struct * work)1976 static void adv_timeout_expire(struct work_struct *work)
1977 {
1978 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1979 					    adv_instance_expire.work);
1980 
1981 	struct hci_request req;
1982 	u8 instance;
1983 
1984 	bt_dev_dbg(hdev, "");
1985 
1986 	hci_dev_lock(hdev);
1987 
1988 	hdev->adv_instance_timeout = 0;
1989 
1990 	instance = hdev->cur_adv_instance;
1991 	if (instance == 0x00)
1992 		goto unlock;
1993 
1994 	hci_req_init(&req, hdev);
1995 
1996 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1997 
1998 	if (list_empty(&hdev->adv_instances))
1999 		__hci_req_disable_advertising(&req);
2000 
2001 	hci_req_run(&req, NULL);
2002 
2003 unlock:
2004 	hci_dev_unlock(hdev);
2005 }
2006 
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)2007 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2008 					   unsigned long opt)
2009 {
2010 	struct hci_dev *hdev = req->hdev;
2011 	int ret = 0;
2012 
2013 	hci_dev_lock(hdev);
2014 
2015 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2016 		hci_req_add_le_scan_disable(req, false);
2017 	hci_req_add_le_passive_scan(req);
2018 
2019 	switch (hdev->interleave_scan_state) {
2020 	case INTERLEAVE_SCAN_ALLOWLIST:
2021 		bt_dev_dbg(hdev, "next state: allowlist");
2022 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2023 		break;
2024 	case INTERLEAVE_SCAN_NO_FILTER:
2025 		bt_dev_dbg(hdev, "next state: no filter");
2026 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2027 		break;
2028 	case INTERLEAVE_SCAN_NONE:
2029 		BT_ERR("unexpected error");
2030 		ret = -1;
2031 	}
2032 
2033 	hci_dev_unlock(hdev);
2034 
2035 	return ret;
2036 }
2037 
interleave_scan_work(struct work_struct * work)2038 static void interleave_scan_work(struct work_struct *work)
2039 {
2040 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2041 					    interleave_scan.work);
2042 	u8 status;
2043 	unsigned long timeout;
2044 
2045 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2046 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2047 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2048 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2049 	} else {
2050 		bt_dev_err(hdev, "unexpected error");
2051 		return;
2052 	}
2053 
2054 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2055 		     HCI_CMD_TIMEOUT, &status);
2056 
2057 	/* Don't continue interleaving if it was canceled */
2058 	if (is_interleave_scanning(hdev))
2059 		queue_delayed_work(hdev->req_workqueue,
2060 				   &hdev->interleave_scan, timeout);
2061 }
2062 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)2063 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2064 			   bool use_rpa, struct adv_info *adv_instance,
2065 			   u8 *own_addr_type, bdaddr_t *rand_addr)
2066 {
2067 	int err;
2068 
2069 	bacpy(rand_addr, BDADDR_ANY);
2070 
2071 	/* If privacy is enabled use a resolvable private address. If
2072 	 * current RPA has expired then generate a new one.
2073 	 */
2074 	if (use_rpa) {
2075 		/* If Controller supports LL Privacy use own address type is
2076 		 * 0x03
2077 		 */
2078 		if (use_ll_privacy(hdev) &&
2079 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2080 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2081 		else
2082 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2083 
2084 		if (adv_instance) {
2085 			if (adv_rpa_valid(adv_instance))
2086 				return 0;
2087 		} else {
2088 			if (rpa_valid(hdev))
2089 				return 0;
2090 		}
2091 
2092 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2093 		if (err < 0) {
2094 			bt_dev_err(hdev, "failed to generate new RPA");
2095 			return err;
2096 		}
2097 
2098 		bacpy(rand_addr, &hdev->rpa);
2099 
2100 		return 0;
2101 	}
2102 
2103 	/* In case of required privacy without resolvable private address,
2104 	 * use an non-resolvable private address. This is useful for
2105 	 * non-connectable advertising.
2106 	 */
2107 	if (require_privacy) {
2108 		bdaddr_t nrpa;
2109 
2110 		while (true) {
2111 			/* The non-resolvable private address is generated
2112 			 * from random six bytes with the two most significant
2113 			 * bits cleared.
2114 			 */
2115 			get_random_bytes(&nrpa, 6);
2116 			nrpa.b[5] &= 0x3f;
2117 
2118 			/* The non-resolvable private address shall not be
2119 			 * equal to the public address.
2120 			 */
2121 			if (bacmp(&hdev->bdaddr, &nrpa))
2122 				break;
2123 		}
2124 
2125 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2126 		bacpy(rand_addr, &nrpa);
2127 
2128 		return 0;
2129 	}
2130 
2131 	/* No privacy so use a public address. */
2132 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2133 
2134 	return 0;
2135 }
2136 
__hci_req_clear_ext_adv_sets(struct hci_request * req)2137 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2138 {
2139 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2140 }
2141 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2142 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2143 {
2144 	struct hci_dev *hdev = req->hdev;
2145 
2146 	/* If we're advertising or initiating an LE connection we can't
2147 	 * go ahead and change the random address at this time. This is
2148 	 * because the eventual initiator address used for the
2149 	 * subsequently created connection will be undefined (some
2150 	 * controllers use the new address and others the one we had
2151 	 * when the operation started).
2152 	 *
2153 	 * In this kind of scenario skip the update and let the random
2154 	 * address be updated at the next cycle.
2155 	 */
2156 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2157 	    hci_lookup_le_connect(hdev)) {
2158 		bt_dev_dbg(hdev, "Deferring random address update");
2159 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2160 		return;
2161 	}
2162 
2163 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2164 }
2165 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)2166 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2167 {
2168 	struct hci_cp_le_set_ext_adv_params cp;
2169 	struct hci_dev *hdev = req->hdev;
2170 	bool connectable;
2171 	u32 flags;
2172 	bdaddr_t random_addr;
2173 	u8 own_addr_type;
2174 	int err;
2175 	struct adv_info *adv_instance;
2176 	bool secondary_adv;
2177 
2178 	if (instance > 0) {
2179 		adv_instance = hci_find_adv_instance(hdev, instance);
2180 		if (!adv_instance)
2181 			return -EINVAL;
2182 	} else {
2183 		adv_instance = NULL;
2184 	}
2185 
2186 	flags = get_adv_instance_flags(hdev, instance);
2187 
2188 	/* If the "connectable" instance flag was not set, then choose between
2189 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2190 	 */
2191 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2192 		      mgmt_get_connectable(hdev);
2193 
2194 	if (!is_advertising_allowed(hdev, connectable))
2195 		return -EPERM;
2196 
2197 	/* Set require_privacy to true only when non-connectable
2198 	 * advertising is used. In that case it is fine to use a
2199 	 * non-resolvable private address.
2200 	 */
2201 	err = hci_get_random_address(hdev, !connectable,
2202 				     adv_use_rpa(hdev, flags), adv_instance,
2203 				     &own_addr_type, &random_addr);
2204 	if (err < 0)
2205 		return err;
2206 
2207 	memset(&cp, 0, sizeof(cp));
2208 
2209 	if (adv_instance) {
2210 		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2211 		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2212 		cp.tx_power = adv_instance->tx_power;
2213 	} else {
2214 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2215 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2216 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2217 	}
2218 
2219 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2220 
2221 	if (connectable) {
2222 		if (secondary_adv)
2223 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2224 		else
2225 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2226 	} else if (adv_instance_is_scannable(hdev, instance) ||
2227 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2228 		if (secondary_adv)
2229 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2230 		else
2231 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2232 	} else {
2233 		if (secondary_adv)
2234 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2235 		else
2236 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2237 	}
2238 
2239 	cp.own_addr_type = own_addr_type;
2240 	cp.channel_map = hdev->le_adv_channel_map;
2241 	cp.handle = instance;
2242 
2243 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2244 		cp.primary_phy = HCI_ADV_PHY_1M;
2245 		cp.secondary_phy = HCI_ADV_PHY_2M;
2246 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2247 		cp.primary_phy = HCI_ADV_PHY_CODED;
2248 		cp.secondary_phy = HCI_ADV_PHY_CODED;
2249 	} else {
2250 		/* In all other cases use 1M */
2251 		cp.primary_phy = HCI_ADV_PHY_1M;
2252 		cp.secondary_phy = HCI_ADV_PHY_1M;
2253 	}
2254 
2255 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2256 
2257 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2258 	    bacmp(&random_addr, BDADDR_ANY)) {
2259 		struct hci_cp_le_set_adv_set_rand_addr cp;
2260 
2261 		/* Check if random address need to be updated */
2262 		if (adv_instance) {
2263 			if (!bacmp(&random_addr, &adv_instance->random_addr))
2264 				return 0;
2265 		} else {
2266 			if (!bacmp(&random_addr, &hdev->random_addr))
2267 				return 0;
2268 			/* Instance 0x00 doesn't have an adv_info, instead it
2269 			 * uses hdev->random_addr to track its address so
2270 			 * whenever it needs to be updated this also set the
2271 			 * random address since hdev->random_addr is shared with
2272 			 * scan state machine.
2273 			 */
2274 			set_random_addr(req, &random_addr);
2275 		}
2276 
2277 		memset(&cp, 0, sizeof(cp));
2278 
2279 		cp.handle = instance;
2280 		bacpy(&cp.bdaddr, &random_addr);
2281 
2282 		hci_req_add(req,
2283 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2284 			    sizeof(cp), &cp);
2285 	}
2286 
2287 	return 0;
2288 }
2289 
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2290 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2291 {
2292 	struct hci_dev *hdev = req->hdev;
2293 	struct hci_cp_le_set_ext_adv_enable *cp;
2294 	struct hci_cp_ext_adv_set *adv_set;
2295 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2296 	struct adv_info *adv_instance;
2297 
2298 	if (instance > 0) {
2299 		adv_instance = hci_find_adv_instance(hdev, instance);
2300 		if (!adv_instance)
2301 			return -EINVAL;
2302 	} else {
2303 		adv_instance = NULL;
2304 	}
2305 
2306 	cp = (void *) data;
2307 	adv_set = (void *) cp->data;
2308 
2309 	memset(cp, 0, sizeof(*cp));
2310 
2311 	cp->enable = 0x01;
2312 	cp->num_of_sets = 0x01;
2313 
2314 	memset(adv_set, 0, sizeof(*adv_set));
2315 
2316 	adv_set->handle = instance;
2317 
2318 	/* Set duration per instance since controller is responsible for
2319 	 * scheduling it.
2320 	 */
2321 	if (adv_instance && adv_instance->timeout) {
2322 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2323 
2324 		/* Time = N * 10 ms */
2325 		adv_set->duration = cpu_to_le16(duration / 10);
2326 	}
2327 
2328 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2329 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2330 		    data);
2331 
2332 	return 0;
2333 }
2334 
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2335 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2336 {
2337 	struct hci_dev *hdev = req->hdev;
2338 	struct hci_cp_le_set_ext_adv_enable *cp;
2339 	struct hci_cp_ext_adv_set *adv_set;
2340 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2341 	u8 req_size;
2342 
2343 	/* If request specifies an instance that doesn't exist, fail */
2344 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2345 		return -EINVAL;
2346 
2347 	memset(data, 0, sizeof(data));
2348 
2349 	cp = (void *)data;
2350 	adv_set = (void *)cp->data;
2351 
2352 	/* Instance 0x00 indicates all advertising instances will be disabled */
2353 	cp->num_of_sets = !!instance;
2354 	cp->enable = 0x00;
2355 
2356 	adv_set->handle = instance;
2357 
2358 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2359 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2360 
2361 	return 0;
2362 }
2363 
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2364 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2365 {
2366 	struct hci_dev *hdev = req->hdev;
2367 
2368 	/* If request specifies an instance that doesn't exist, fail */
2369 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2370 		return -EINVAL;
2371 
2372 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2373 
2374 	return 0;
2375 }
2376 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2377 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2378 {
2379 	struct hci_dev *hdev = req->hdev;
2380 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2381 	int err;
2382 
2383 	/* If instance isn't pending, the chip knows about it, and it's safe to
2384 	 * disable
2385 	 */
2386 	if (adv_instance && !adv_instance->pending)
2387 		__hci_req_disable_ext_adv_instance(req, instance);
2388 
2389 	err = __hci_req_setup_ext_adv_instance(req, instance);
2390 	if (err < 0)
2391 		return err;
2392 
2393 	__hci_req_update_scan_rsp_data(req, instance);
2394 	__hci_req_enable_ext_advertising(req, instance);
2395 
2396 	return 0;
2397 }
2398 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2399 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2400 				    bool force)
2401 {
2402 	struct hci_dev *hdev = req->hdev;
2403 	struct adv_info *adv_instance = NULL;
2404 	u16 timeout;
2405 
2406 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2407 	    list_empty(&hdev->adv_instances))
2408 		return -EPERM;
2409 
2410 	if (hdev->adv_instance_timeout)
2411 		return -EBUSY;
2412 
2413 	adv_instance = hci_find_adv_instance(hdev, instance);
2414 	if (!adv_instance)
2415 		return -ENOENT;
2416 
2417 	/* A zero timeout means unlimited advertising. As long as there is
2418 	 * only one instance, duration should be ignored. We still set a timeout
2419 	 * in case further instances are being added later on.
2420 	 *
2421 	 * If the remaining lifetime of the instance is more than the duration
2422 	 * then the timeout corresponds to the duration, otherwise it will be
2423 	 * reduced to the remaining instance lifetime.
2424 	 */
2425 	if (adv_instance->timeout == 0 ||
2426 	    adv_instance->duration <= adv_instance->remaining_time)
2427 		timeout = adv_instance->duration;
2428 	else
2429 		timeout = adv_instance->remaining_time;
2430 
2431 	/* The remaining time is being reduced unless the instance is being
2432 	 * advertised without time limit.
2433 	 */
2434 	if (adv_instance->timeout)
2435 		adv_instance->remaining_time =
2436 				adv_instance->remaining_time - timeout;
2437 
2438 	/* Only use work for scheduling instances with legacy advertising */
2439 	if (!ext_adv_capable(hdev)) {
2440 		hdev->adv_instance_timeout = timeout;
2441 		queue_delayed_work(hdev->req_workqueue,
2442 			   &hdev->adv_instance_expire,
2443 			   msecs_to_jiffies(timeout * 1000));
2444 	}
2445 
2446 	/* If we're just re-scheduling the same instance again then do not
2447 	 * execute any HCI commands. This happens when a single instance is
2448 	 * being advertised.
2449 	 */
2450 	if (!force && hdev->cur_adv_instance == instance &&
2451 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2452 		return 0;
2453 
2454 	hdev->cur_adv_instance = instance;
2455 	if (ext_adv_capable(hdev)) {
2456 		__hci_req_start_ext_adv(req, instance);
2457 	} else {
2458 		__hci_req_update_adv_data(req, instance);
2459 		__hci_req_update_scan_rsp_data(req, instance);
2460 		__hci_req_enable_advertising(req);
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 /* For a single instance:
2467  * - force == true: The instance will be removed even when its remaining
2468  *   lifetime is not zero.
2469  * - force == false: the instance will be deactivated but kept stored unless
2470  *   the remaining lifetime is zero.
2471  *
2472  * For instance == 0x00:
2473  * - force == true: All instances will be removed regardless of their timeout
2474  *   setting.
2475  * - force == false: Only instances that have a timeout will be removed.
2476  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2477 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2478 				struct hci_request *req, u8 instance,
2479 				bool force)
2480 {
2481 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2482 	int err;
2483 	u8 rem_inst;
2484 
2485 	/* Cancel any timeout concerning the removed instance(s). */
2486 	if (!instance || hdev->cur_adv_instance == instance)
2487 		cancel_adv_timeout(hdev);
2488 
2489 	/* Get the next instance to advertise BEFORE we remove
2490 	 * the current one. This can be the same instance again
2491 	 * if there is only one instance.
2492 	 */
2493 	if (instance && hdev->cur_adv_instance == instance)
2494 		next_instance = hci_get_next_instance(hdev, instance);
2495 
2496 	if (instance == 0x00) {
2497 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2498 					 list) {
2499 			if (!(force || adv_instance->timeout))
2500 				continue;
2501 
2502 			rem_inst = adv_instance->instance;
2503 			err = hci_remove_adv_instance(hdev, rem_inst);
2504 			if (!err)
2505 				mgmt_advertising_removed(sk, hdev, rem_inst);
2506 		}
2507 	} else {
2508 		adv_instance = hci_find_adv_instance(hdev, instance);
2509 
2510 		if (force || (adv_instance && adv_instance->timeout &&
2511 			      !adv_instance->remaining_time)) {
2512 			/* Don't advertise a removed instance. */
2513 			if (next_instance &&
2514 			    next_instance->instance == instance)
2515 				next_instance = NULL;
2516 
2517 			err = hci_remove_adv_instance(hdev, instance);
2518 			if (!err)
2519 				mgmt_advertising_removed(sk, hdev, instance);
2520 		}
2521 	}
2522 
2523 	if (!req || !hdev_is_powered(hdev) ||
2524 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2525 		return;
2526 
2527 	if (next_instance && !ext_adv_capable(hdev))
2528 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2529 						false);
2530 }
2531 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2532 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2533 			      bool use_rpa, u8 *own_addr_type)
2534 {
2535 	struct hci_dev *hdev = req->hdev;
2536 	int err;
2537 
2538 	/* If privacy is enabled use a resolvable private address. If
2539 	 * current RPA has expired or there is something else than
2540 	 * the current RPA in use, then generate a new one.
2541 	 */
2542 	if (use_rpa) {
2543 		/* If Controller supports LL Privacy use own address type is
2544 		 * 0x03
2545 		 */
2546 		if (use_ll_privacy(hdev) &&
2547 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2548 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2549 		else
2550 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2551 
2552 		if (rpa_valid(hdev))
2553 			return 0;
2554 
2555 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2556 		if (err < 0) {
2557 			bt_dev_err(hdev, "failed to generate new RPA");
2558 			return err;
2559 		}
2560 
2561 		set_random_addr(req, &hdev->rpa);
2562 
2563 		return 0;
2564 	}
2565 
2566 	/* In case of required privacy without resolvable private address,
2567 	 * use an non-resolvable private address. This is useful for active
2568 	 * scanning and non-connectable advertising.
2569 	 */
2570 	if (require_privacy) {
2571 		bdaddr_t nrpa;
2572 
2573 		while (true) {
2574 			/* The non-resolvable private address is generated
2575 			 * from random six bytes with the two most significant
2576 			 * bits cleared.
2577 			 */
2578 			get_random_bytes(&nrpa, 6);
2579 			nrpa.b[5] &= 0x3f;
2580 
2581 			/* The non-resolvable private address shall not be
2582 			 * equal to the public address.
2583 			 */
2584 			if (bacmp(&hdev->bdaddr, &nrpa))
2585 				break;
2586 		}
2587 
2588 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2589 		set_random_addr(req, &nrpa);
2590 		return 0;
2591 	}
2592 
2593 	/* If forcing static address is in use or there is no public
2594 	 * address use the static address as random address (but skip
2595 	 * the HCI command if the current random address is already the
2596 	 * static one.
2597 	 *
2598 	 * In case BR/EDR has been disabled on a dual-mode controller
2599 	 * and a static address has been configured, then use that
2600 	 * address instead of the public BR/EDR address.
2601 	 */
2602 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2603 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2604 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2605 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2606 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2607 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2608 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2609 				    &hdev->static_addr);
2610 		return 0;
2611 	}
2612 
2613 	/* Neither privacy nor static address is being used so use a
2614 	 * public address.
2615 	 */
2616 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2617 
2618 	return 0;
2619 }
2620 
disconnected_accept_list_entries(struct hci_dev * hdev)2621 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2622 {
2623 	struct bdaddr_list *b;
2624 
2625 	list_for_each_entry(b, &hdev->accept_list, list) {
2626 		struct hci_conn *conn;
2627 
2628 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2629 		if (!conn)
2630 			return true;
2631 
2632 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2633 			return true;
2634 	}
2635 
2636 	return false;
2637 }
2638 
__hci_req_update_scan(struct hci_request * req)2639 void __hci_req_update_scan(struct hci_request *req)
2640 {
2641 	struct hci_dev *hdev = req->hdev;
2642 	u8 scan;
2643 
2644 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2645 		return;
2646 
2647 	if (!hdev_is_powered(hdev))
2648 		return;
2649 
2650 	if (mgmt_powering_down(hdev))
2651 		return;
2652 
2653 	if (hdev->scanning_paused)
2654 		return;
2655 
2656 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2657 	    disconnected_accept_list_entries(hdev))
2658 		scan = SCAN_PAGE;
2659 	else
2660 		scan = SCAN_DISABLED;
2661 
2662 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2663 		scan |= SCAN_INQUIRY;
2664 
2665 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2666 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2667 		return;
2668 
2669 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2670 }
2671 
update_scan(struct hci_request * req,unsigned long opt)2672 static int update_scan(struct hci_request *req, unsigned long opt)
2673 {
2674 	hci_dev_lock(req->hdev);
2675 	__hci_req_update_scan(req);
2676 	hci_dev_unlock(req->hdev);
2677 	return 0;
2678 }
2679 
scan_update_work(struct work_struct * work)2680 static void scan_update_work(struct work_struct *work)
2681 {
2682 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2683 
2684 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2685 }
2686 
connectable_update(struct hci_request * req,unsigned long opt)2687 static int connectable_update(struct hci_request *req, unsigned long opt)
2688 {
2689 	struct hci_dev *hdev = req->hdev;
2690 
2691 	hci_dev_lock(hdev);
2692 
2693 	__hci_req_update_scan(req);
2694 
2695 	/* If BR/EDR is not enabled and we disable advertising as a
2696 	 * by-product of disabling connectable, we need to update the
2697 	 * advertising flags.
2698 	 */
2699 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2700 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2701 
2702 	/* Update the advertising parameters if necessary */
2703 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2704 	    !list_empty(&hdev->adv_instances)) {
2705 		if (ext_adv_capable(hdev))
2706 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2707 		else
2708 			__hci_req_enable_advertising(req);
2709 	}
2710 
2711 	__hci_update_background_scan(req);
2712 
2713 	hci_dev_unlock(hdev);
2714 
2715 	return 0;
2716 }
2717 
connectable_update_work(struct work_struct * work)2718 static void connectable_update_work(struct work_struct *work)
2719 {
2720 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2721 					    connectable_update);
2722 	u8 status;
2723 
2724 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2725 	mgmt_set_connectable_complete(hdev, status);
2726 }
2727 
get_service_classes(struct hci_dev * hdev)2728 static u8 get_service_classes(struct hci_dev *hdev)
2729 {
2730 	struct bt_uuid *uuid;
2731 	u8 val = 0;
2732 
2733 	list_for_each_entry(uuid, &hdev->uuids, list)
2734 		val |= uuid->svc_hint;
2735 
2736 	return val;
2737 }
2738 
__hci_req_update_class(struct hci_request * req)2739 void __hci_req_update_class(struct hci_request *req)
2740 {
2741 	struct hci_dev *hdev = req->hdev;
2742 	u8 cod[3];
2743 
2744 	bt_dev_dbg(hdev, "");
2745 
2746 	if (!hdev_is_powered(hdev))
2747 		return;
2748 
2749 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2750 		return;
2751 
2752 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2753 		return;
2754 
2755 	cod[0] = hdev->minor_class;
2756 	cod[1] = hdev->major_class;
2757 	cod[2] = get_service_classes(hdev);
2758 
2759 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2760 		cod[1] |= 0x20;
2761 
2762 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2763 		return;
2764 
2765 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2766 }
2767 
write_iac(struct hci_request * req)2768 static void write_iac(struct hci_request *req)
2769 {
2770 	struct hci_dev *hdev = req->hdev;
2771 	struct hci_cp_write_current_iac_lap cp;
2772 
2773 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2774 		return;
2775 
2776 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2777 		/* Limited discoverable mode */
2778 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2779 		cp.iac_lap[0] = 0x00;	/* LIAC */
2780 		cp.iac_lap[1] = 0x8b;
2781 		cp.iac_lap[2] = 0x9e;
2782 		cp.iac_lap[3] = 0x33;	/* GIAC */
2783 		cp.iac_lap[4] = 0x8b;
2784 		cp.iac_lap[5] = 0x9e;
2785 	} else {
2786 		/* General discoverable mode */
2787 		cp.num_iac = 1;
2788 		cp.iac_lap[0] = 0x33;	/* GIAC */
2789 		cp.iac_lap[1] = 0x8b;
2790 		cp.iac_lap[2] = 0x9e;
2791 	}
2792 
2793 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2794 		    (cp.num_iac * 3) + 1, &cp);
2795 }
2796 
discoverable_update(struct hci_request * req,unsigned long opt)2797 static int discoverable_update(struct hci_request *req, unsigned long opt)
2798 {
2799 	struct hci_dev *hdev = req->hdev;
2800 
2801 	hci_dev_lock(hdev);
2802 
2803 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2804 		write_iac(req);
2805 		__hci_req_update_scan(req);
2806 		__hci_req_update_class(req);
2807 	}
2808 
2809 	/* Advertising instances don't use the global discoverable setting, so
2810 	 * only update AD if advertising was enabled using Set Advertising.
2811 	 */
2812 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2813 		__hci_req_update_adv_data(req, 0x00);
2814 
2815 		/* Discoverable mode affects the local advertising
2816 		 * address in limited privacy mode.
2817 		 */
2818 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2819 			if (ext_adv_capable(hdev))
2820 				__hci_req_start_ext_adv(req, 0x00);
2821 			else
2822 				__hci_req_enable_advertising(req);
2823 		}
2824 	}
2825 
2826 	hci_dev_unlock(hdev);
2827 
2828 	return 0;
2829 }
2830 
discoverable_update_work(struct work_struct * work)2831 static void discoverable_update_work(struct work_struct *work)
2832 {
2833 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2834 					    discoverable_update);
2835 	u8 status;
2836 
2837 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2838 	mgmt_set_discoverable_complete(hdev, status);
2839 }
2840 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2841 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2842 		      u8 reason)
2843 {
2844 	switch (conn->state) {
2845 	case BT_CONNECTED:
2846 	case BT_CONFIG:
2847 		if (conn->type == AMP_LINK) {
2848 			struct hci_cp_disconn_phy_link cp;
2849 
2850 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2851 			cp.reason = reason;
2852 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2853 				    &cp);
2854 		} else {
2855 			struct hci_cp_disconnect dc;
2856 
2857 			dc.handle = cpu_to_le16(conn->handle);
2858 			dc.reason = reason;
2859 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2860 		}
2861 
2862 		conn->state = BT_DISCONN;
2863 
2864 		break;
2865 	case BT_CONNECT:
2866 		if (conn->type == LE_LINK) {
2867 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2868 				break;
2869 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2870 				    0, NULL);
2871 		} else if (conn->type == ACL_LINK) {
2872 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2873 				break;
2874 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2875 				    6, &conn->dst);
2876 		}
2877 		break;
2878 	case BT_CONNECT2:
2879 		if (conn->type == ACL_LINK) {
2880 			struct hci_cp_reject_conn_req rej;
2881 
2882 			bacpy(&rej.bdaddr, &conn->dst);
2883 			rej.reason = reason;
2884 
2885 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2886 				    sizeof(rej), &rej);
2887 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2888 			struct hci_cp_reject_sync_conn_req rej;
2889 
2890 			bacpy(&rej.bdaddr, &conn->dst);
2891 
2892 			/* SCO rejection has its own limited set of
2893 			 * allowed error values (0x0D-0x0F) which isn't
2894 			 * compatible with most values passed to this
2895 			 * function. To be safe hard-code one of the
2896 			 * values that's suitable for SCO.
2897 			 */
2898 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2899 
2900 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2901 				    sizeof(rej), &rej);
2902 		}
2903 		break;
2904 	default:
2905 		conn->state = BT_CLOSED;
2906 		break;
2907 	}
2908 }
2909 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2910 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2911 {
2912 	if (status)
2913 		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2914 }
2915 
hci_abort_conn(struct hci_conn * conn,u8 reason)2916 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2917 {
2918 	struct hci_request req;
2919 	int err;
2920 
2921 	hci_req_init(&req, conn->hdev);
2922 
2923 	__hci_abort_conn(&req, conn, reason);
2924 
2925 	err = hci_req_run(&req, abort_conn_complete);
2926 	if (err && err != -ENODATA) {
2927 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2928 		return err;
2929 	}
2930 
2931 	return 0;
2932 }
2933 
update_bg_scan(struct hci_request * req,unsigned long opt)2934 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2935 {
2936 	hci_dev_lock(req->hdev);
2937 	__hci_update_background_scan(req);
2938 	hci_dev_unlock(req->hdev);
2939 	return 0;
2940 }
2941 
bg_scan_update(struct work_struct * work)2942 static void bg_scan_update(struct work_struct *work)
2943 {
2944 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2945 					    bg_scan_update);
2946 	struct hci_conn *conn;
2947 	u8 status;
2948 	int err;
2949 
2950 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2951 	if (!err)
2952 		return;
2953 
2954 	hci_dev_lock(hdev);
2955 
2956 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2957 	if (conn)
2958 		hci_le_conn_failed(conn, status);
2959 
2960 	hci_dev_unlock(hdev);
2961 }
2962 
le_scan_disable(struct hci_request * req,unsigned long opt)2963 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2964 {
2965 	hci_req_add_le_scan_disable(req, false);
2966 	return 0;
2967 }
2968 
bredr_inquiry(struct hci_request * req,unsigned long opt)2969 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2970 {
2971 	u8 length = opt;
2972 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2973 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2974 	struct hci_cp_inquiry cp;
2975 
2976 	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2977 		return 0;
2978 
2979 	bt_dev_dbg(req->hdev, "");
2980 
2981 	hci_dev_lock(req->hdev);
2982 	hci_inquiry_cache_flush(req->hdev);
2983 	hci_dev_unlock(req->hdev);
2984 
2985 	memset(&cp, 0, sizeof(cp));
2986 
2987 	if (req->hdev->discovery.limited)
2988 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2989 	else
2990 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2991 
2992 	cp.length = length;
2993 
2994 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2995 
2996 	return 0;
2997 }
2998 
le_scan_disable_work(struct work_struct * work)2999 static void le_scan_disable_work(struct work_struct *work)
3000 {
3001 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3002 					    le_scan_disable.work);
3003 	u8 status;
3004 
3005 	bt_dev_dbg(hdev, "");
3006 
3007 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3008 		return;
3009 
3010 	cancel_delayed_work(&hdev->le_scan_restart);
3011 
3012 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3013 	if (status) {
3014 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3015 			   status);
3016 		return;
3017 	}
3018 
3019 	hdev->discovery.scan_start = 0;
3020 
3021 	/* If we were running LE only scan, change discovery state. If
3022 	 * we were running both LE and BR/EDR inquiry simultaneously,
3023 	 * and BR/EDR inquiry is already finished, stop discovery,
3024 	 * otherwise BR/EDR inquiry will stop discovery when finished.
3025 	 * If we will resolve remote device name, do not change
3026 	 * discovery state.
3027 	 */
3028 
3029 	if (hdev->discovery.type == DISCOV_TYPE_LE)
3030 		goto discov_stopped;
3031 
3032 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3033 		return;
3034 
3035 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3036 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3037 		    hdev->discovery.state != DISCOVERY_RESOLVING)
3038 			goto discov_stopped;
3039 
3040 		return;
3041 	}
3042 
3043 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3044 		     HCI_CMD_TIMEOUT, &status);
3045 	if (status) {
3046 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3047 		goto discov_stopped;
3048 	}
3049 
3050 	return;
3051 
3052 discov_stopped:
3053 	hci_dev_lock(hdev);
3054 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3055 	hci_dev_unlock(hdev);
3056 }
3057 
le_scan_restart(struct hci_request * req,unsigned long opt)3058 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3059 {
3060 	struct hci_dev *hdev = req->hdev;
3061 
3062 	/* If controller is not scanning we are done. */
3063 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3064 		return 0;
3065 
3066 	if (hdev->scanning_paused) {
3067 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
3068 		return 0;
3069 	}
3070 
3071 	hci_req_add_le_scan_disable(req, false);
3072 
3073 	if (use_ext_scan(hdev)) {
3074 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3075 
3076 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3077 		ext_enable_cp.enable = LE_SCAN_ENABLE;
3078 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3079 
3080 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3081 			    sizeof(ext_enable_cp), &ext_enable_cp);
3082 	} else {
3083 		struct hci_cp_le_set_scan_enable cp;
3084 
3085 		memset(&cp, 0, sizeof(cp));
3086 		cp.enable = LE_SCAN_ENABLE;
3087 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3088 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3089 	}
3090 
3091 	return 0;
3092 }
3093 
le_scan_restart_work(struct work_struct * work)3094 static void le_scan_restart_work(struct work_struct *work)
3095 {
3096 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3097 					    le_scan_restart.work);
3098 	unsigned long timeout, duration, scan_start, now;
3099 	u8 status;
3100 
3101 	bt_dev_dbg(hdev, "");
3102 
3103 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3104 	if (status) {
3105 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
3106 			   status);
3107 		return;
3108 	}
3109 
3110 	hci_dev_lock(hdev);
3111 
3112 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3113 	    !hdev->discovery.scan_start)
3114 		goto unlock;
3115 
3116 	/* When the scan was started, hdev->le_scan_disable has been queued
3117 	 * after duration from scan_start. During scan restart this job
3118 	 * has been canceled, and we need to queue it again after proper
3119 	 * timeout, to make sure that scan does not run indefinitely.
3120 	 */
3121 	duration = hdev->discovery.scan_duration;
3122 	scan_start = hdev->discovery.scan_start;
3123 	now = jiffies;
3124 	if (now - scan_start <= duration) {
3125 		int elapsed;
3126 
3127 		if (now >= scan_start)
3128 			elapsed = now - scan_start;
3129 		else
3130 			elapsed = ULONG_MAX - scan_start + now;
3131 
3132 		timeout = duration - elapsed;
3133 	} else {
3134 		timeout = 0;
3135 	}
3136 
3137 	queue_delayed_work(hdev->req_workqueue,
3138 			   &hdev->le_scan_disable, timeout);
3139 
3140 unlock:
3141 	hci_dev_unlock(hdev);
3142 }
3143 
active_scan(struct hci_request * req,unsigned long opt)3144 static int active_scan(struct hci_request *req, unsigned long opt)
3145 {
3146 	uint16_t interval = opt;
3147 	struct hci_dev *hdev = req->hdev;
3148 	u8 own_addr_type;
3149 	/* Accept list is not used for discovery */
3150 	u8 filter_policy = 0x00;
3151 	/* Default is to enable duplicates filter */
3152 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3153 	/* Discovery doesn't require controller address resolution */
3154 	bool addr_resolv = false;
3155 	int err;
3156 
3157 	bt_dev_dbg(hdev, "");
3158 
3159 	/* If controller is scanning, it means the background scanning is
3160 	 * running. Thus, we should temporarily stop it in order to set the
3161 	 * discovery scanning parameters.
3162 	 */
3163 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3164 		hci_req_add_le_scan_disable(req, false);
3165 		cancel_interleave_scan(hdev);
3166 	}
3167 
3168 	/* All active scans will be done with either a resolvable private
3169 	 * address (when privacy feature has been enabled) or non-resolvable
3170 	 * private address.
3171 	 */
3172 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3173 					&own_addr_type);
3174 	if (err < 0)
3175 		own_addr_type = ADDR_LE_DEV_PUBLIC;
3176 
3177 	hci_dev_lock(hdev);
3178 	if (hci_is_adv_monitoring(hdev)) {
3179 		/* Duplicate filter should be disabled when some advertisement
3180 		 * monitor is activated, otherwise AdvMon can only receive one
3181 		 * advertisement for one peer(*) during active scanning, and
3182 		 * might report loss to these peers.
3183 		 *
3184 		 * Note that different controllers have different meanings of
3185 		 * |duplicate|. Some of them consider packets with the same
3186 		 * address as duplicate, and others consider packets with the
3187 		 * same address and the same RSSI as duplicate. Although in the
3188 		 * latter case we don't need to disable duplicate filter, but
3189 		 * it is common to have active scanning for a short period of
3190 		 * time, the power impact should be neglectable.
3191 		 */
3192 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3193 	}
3194 	hci_dev_unlock(hdev);
3195 
3196 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3197 			   hdev->le_scan_window_discovery, own_addr_type,
3198 			   filter_policy, filter_dup, addr_resolv);
3199 	return 0;
3200 }
3201 
interleaved_discov(struct hci_request * req,unsigned long opt)3202 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3203 {
3204 	int err;
3205 
3206 	bt_dev_dbg(req->hdev, "");
3207 
3208 	err = active_scan(req, opt);
3209 	if (err)
3210 		return err;
3211 
3212 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3213 }
3214 
start_discovery(struct hci_dev * hdev,u8 * status)3215 static void start_discovery(struct hci_dev *hdev, u8 *status)
3216 {
3217 	unsigned long timeout;
3218 
3219 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3220 
3221 	switch (hdev->discovery.type) {
3222 	case DISCOV_TYPE_BREDR:
3223 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3224 			hci_req_sync(hdev, bredr_inquiry,
3225 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3226 				     status);
3227 		return;
3228 	case DISCOV_TYPE_INTERLEAVED:
3229 		/* When running simultaneous discovery, the LE scanning time
3230 		 * should occupy the whole discovery time sine BR/EDR inquiry
3231 		 * and LE scanning are scheduled by the controller.
3232 		 *
3233 		 * For interleaving discovery in comparison, BR/EDR inquiry
3234 		 * and LE scanning are done sequentially with separate
3235 		 * timeouts.
3236 		 */
3237 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3238 			     &hdev->quirks)) {
3239 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3240 			/* During simultaneous discovery, we double LE scan
3241 			 * interval. We must leave some time for the controller
3242 			 * to do BR/EDR inquiry.
3243 			 */
3244 			hci_req_sync(hdev, interleaved_discov,
3245 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3246 				     status);
3247 			break;
3248 		}
3249 
3250 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3251 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3252 			     HCI_CMD_TIMEOUT, status);
3253 		break;
3254 	case DISCOV_TYPE_LE:
3255 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3256 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3257 			     HCI_CMD_TIMEOUT, status);
3258 		break;
3259 	default:
3260 		*status = HCI_ERROR_UNSPECIFIED;
3261 		return;
3262 	}
3263 
3264 	if (*status)
3265 		return;
3266 
3267 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3268 
3269 	/* When service discovery is used and the controller has a
3270 	 * strict duplicate filter, it is important to remember the
3271 	 * start and duration of the scan. This is required for
3272 	 * restarting scanning during the discovery phase.
3273 	 */
3274 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3275 		     hdev->discovery.result_filtering) {
3276 		hdev->discovery.scan_start = jiffies;
3277 		hdev->discovery.scan_duration = timeout;
3278 	}
3279 
3280 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3281 			   timeout);
3282 }
3283 
hci_req_stop_discovery(struct hci_request * req)3284 bool hci_req_stop_discovery(struct hci_request *req)
3285 {
3286 	struct hci_dev *hdev = req->hdev;
3287 	struct discovery_state *d = &hdev->discovery;
3288 	struct hci_cp_remote_name_req_cancel cp;
3289 	struct inquiry_entry *e;
3290 	bool ret = false;
3291 
3292 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3293 
3294 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3295 		if (test_bit(HCI_INQUIRY, &hdev->flags))
3296 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3297 
3298 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3299 			cancel_delayed_work(&hdev->le_scan_disable);
3300 			cancel_delayed_work(&hdev->le_scan_restart);
3301 			hci_req_add_le_scan_disable(req, false);
3302 		}
3303 
3304 		ret = true;
3305 	} else {
3306 		/* Passive scanning */
3307 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3308 			hci_req_add_le_scan_disable(req, false);
3309 			ret = true;
3310 		}
3311 	}
3312 
3313 	/* No further actions needed for LE-only discovery */
3314 	if (d->type == DISCOV_TYPE_LE)
3315 		return ret;
3316 
3317 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3318 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3319 						     NAME_PENDING);
3320 		if (!e)
3321 			return ret;
3322 
3323 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3324 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3325 			    &cp);
3326 		ret = true;
3327 	}
3328 
3329 	return ret;
3330 }
3331 
stop_discovery(struct hci_request * req,unsigned long opt)3332 static int stop_discovery(struct hci_request *req, unsigned long opt)
3333 {
3334 	hci_dev_lock(req->hdev);
3335 	hci_req_stop_discovery(req);
3336 	hci_dev_unlock(req->hdev);
3337 
3338 	return 0;
3339 }
3340 
discov_update(struct work_struct * work)3341 static void discov_update(struct work_struct *work)
3342 {
3343 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3344 					    discov_update);
3345 	u8 status = 0;
3346 
3347 	switch (hdev->discovery.state) {
3348 	case DISCOVERY_STARTING:
3349 		start_discovery(hdev, &status);
3350 		mgmt_start_discovery_complete(hdev, status);
3351 		if (status)
3352 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3353 		else
3354 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3355 		break;
3356 	case DISCOVERY_STOPPING:
3357 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3358 		mgmt_stop_discovery_complete(hdev, status);
3359 		if (!status)
3360 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3361 		break;
3362 	case DISCOVERY_STOPPED:
3363 	default:
3364 		return;
3365 	}
3366 }
3367 
discov_off(struct work_struct * work)3368 static void discov_off(struct work_struct *work)
3369 {
3370 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3371 					    discov_off.work);
3372 
3373 	bt_dev_dbg(hdev, "");
3374 
3375 	hci_dev_lock(hdev);
3376 
3377 	/* When discoverable timeout triggers, then just make sure
3378 	 * the limited discoverable flag is cleared. Even in the case
3379 	 * of a timeout triggered from general discoverable, it is
3380 	 * safe to unconditionally clear the flag.
3381 	 */
3382 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3383 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3384 	hdev->discov_timeout = 0;
3385 
3386 	hci_dev_unlock(hdev);
3387 
3388 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3389 	mgmt_new_settings(hdev);
3390 }
3391 
powered_update_hci(struct hci_request * req,unsigned long opt)3392 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3393 {
3394 	struct hci_dev *hdev = req->hdev;
3395 	u8 link_sec;
3396 
3397 	hci_dev_lock(hdev);
3398 
3399 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3400 	    !lmp_host_ssp_capable(hdev)) {
3401 		u8 mode = 0x01;
3402 
3403 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3404 
3405 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3406 			u8 support = 0x01;
3407 
3408 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3409 				    sizeof(support), &support);
3410 		}
3411 	}
3412 
3413 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3414 	    lmp_bredr_capable(hdev)) {
3415 		struct hci_cp_write_le_host_supported cp;
3416 
3417 		cp.le = 0x01;
3418 		cp.simul = 0x00;
3419 
3420 		/* Check first if we already have the right
3421 		 * host state (host features set)
3422 		 */
3423 		if (cp.le != lmp_host_le_capable(hdev) ||
3424 		    cp.simul != lmp_host_le_br_capable(hdev))
3425 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3426 				    sizeof(cp), &cp);
3427 	}
3428 
3429 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3430 		/* Make sure the controller has a good default for
3431 		 * advertising data. This also applies to the case
3432 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3433 		 */
3434 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3435 		    list_empty(&hdev->adv_instances)) {
3436 			int err;
3437 
3438 			if (ext_adv_capable(hdev)) {
3439 				err = __hci_req_setup_ext_adv_instance(req,
3440 								       0x00);
3441 				if (!err)
3442 					__hci_req_update_scan_rsp_data(req,
3443 								       0x00);
3444 			} else {
3445 				err = 0;
3446 				__hci_req_update_adv_data(req, 0x00);
3447 				__hci_req_update_scan_rsp_data(req, 0x00);
3448 			}
3449 
3450 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3451 				if (!ext_adv_capable(hdev))
3452 					__hci_req_enable_advertising(req);
3453 				else if (!err)
3454 					__hci_req_enable_ext_advertising(req,
3455 									 0x00);
3456 			}
3457 		} else if (!list_empty(&hdev->adv_instances)) {
3458 			struct adv_info *adv_instance;
3459 
3460 			adv_instance = list_first_entry(&hdev->adv_instances,
3461 							struct adv_info, list);
3462 			__hci_req_schedule_adv_instance(req,
3463 							adv_instance->instance,
3464 							true);
3465 		}
3466 	}
3467 
3468 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3469 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3470 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3471 			    sizeof(link_sec), &link_sec);
3472 
3473 	if (lmp_bredr_capable(hdev)) {
3474 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3475 			__hci_req_write_fast_connectable(req, true);
3476 		else
3477 			__hci_req_write_fast_connectable(req, false);
3478 		__hci_req_update_scan(req);
3479 		__hci_req_update_class(req);
3480 		__hci_req_update_name(req);
3481 		__hci_req_update_eir(req);
3482 	}
3483 
3484 	hci_dev_unlock(hdev);
3485 	return 0;
3486 }
3487 
__hci_req_hci_power_on(struct hci_dev * hdev)3488 int __hci_req_hci_power_on(struct hci_dev *hdev)
3489 {
3490 	/* Register the available SMP channels (BR/EDR and LE) only when
3491 	 * successfully powering on the controller. This late
3492 	 * registration is required so that LE SMP can clearly decide if
3493 	 * the public address or static address is used.
3494 	 */
3495 	smp_register(hdev);
3496 
3497 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3498 			      NULL);
3499 }
3500 
hci_request_setup(struct hci_dev * hdev)3501 void hci_request_setup(struct hci_dev *hdev)
3502 {
3503 	INIT_WORK(&hdev->discov_update, discov_update);
3504 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3505 	INIT_WORK(&hdev->scan_update, scan_update_work);
3506 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3507 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3508 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3509 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3510 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3511 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3512 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3513 }
3514 
hci_request_cancel_all(struct hci_dev * hdev)3515 void hci_request_cancel_all(struct hci_dev *hdev)
3516 {
3517 	hci_req_sync_cancel(hdev, ENODEV);
3518 
3519 	cancel_work_sync(&hdev->discov_update);
3520 	cancel_work_sync(&hdev->bg_scan_update);
3521 	cancel_work_sync(&hdev->scan_update);
3522 	cancel_work_sync(&hdev->connectable_update);
3523 	cancel_work_sync(&hdev->discoverable_update);
3524 	cancel_delayed_work_sync(&hdev->discov_off);
3525 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3526 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3527 
3528 	if (hdev->adv_instance_timeout) {
3529 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3530 		hdev->adv_instance_timeout = 0;
3531 	}
3532 
3533 	cancel_interleave_scan(hdev);
3534 }
3535