• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 	return hdev->req_status == HCI_REQ_PEND;
52 }
53 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 		   hci_req_complete_skb_t complete_skb)
56 {
57 	struct hci_dev *hdev = req->hdev;
58 	struct sk_buff *skb;
59 	unsigned long flags;
60 
61 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62 
63 	/* If an error occurred during request building, remove all HCI
64 	 * commands queued on the HCI request queue.
65 	 */
66 	if (req->err) {
67 		skb_queue_purge(&req->cmd_q);
68 		return req->err;
69 	}
70 
71 	/* Do not allow empty requests */
72 	if (skb_queue_empty(&req->cmd_q))
73 		return -ENODATA;
74 
75 	skb = skb_peek_tail(&req->cmd_q);
76 	if (complete) {
77 		bt_cb(skb)->hci.req_complete = complete;
78 	} else if (complete_skb) {
79 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 	}
82 
83 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86 
87 	queue_work(hdev->workqueue, &hdev->cmd_work);
88 
89 	return 0;
90 }
91 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 	return req_run(req, complete, NULL);
95 }
96 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 	return req_run(req, NULL, complete);
100 }
101 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 				  struct sk_buff *skb)
104 {
105 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		if (skb)
111 			hdev->req_skb = skb_get(skb);
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 				  const void *param, u8 event, u32 timeout)
129 {
130 	struct hci_request req;
131 	struct sk_buff *skb;
132 	int err = 0;
133 
134 	BT_DBG("%s", hdev->name);
135 
136 	hci_req_init(&req, hdev);
137 
138 	hci_req_add_ev(&req, opcode, plen, param, event);
139 
140 	hdev->req_status = HCI_REQ_PEND;
141 
142 	err = hci_req_run_skb(&req, hci_req_sync_complete);
143 	if (err < 0)
144 		return ERR_PTR(err);
145 
146 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 			hdev->req_status != HCI_REQ_PEND, timeout);
148 
149 	if (err == -ERESTARTSYS)
150 		return ERR_PTR(-EINTR);
151 
152 	switch (hdev->req_status) {
153 	case HCI_REQ_DONE:
154 		err = -bt_to_errno(hdev->req_result);
155 		break;
156 
157 	case HCI_REQ_CANCELED:
158 		err = -hdev->req_result;
159 		break;
160 
161 	default:
162 		err = -ETIMEDOUT;
163 		break;
164 	}
165 
166 	hdev->req_status = hdev->req_result = 0;
167 	skb = hdev->req_skb;
168 	hdev->req_skb = NULL;
169 
170 	BT_DBG("%s end: err %d", hdev->name, err);
171 
172 	if (err < 0) {
173 		kfree_skb(skb);
174 		return ERR_PTR(err);
175 	}
176 
177 	if (!skb)
178 		return ERR_PTR(-ENODATA);
179 
180 	return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 			       const void *param, u32 timeout)
186 {
187 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190 
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 						     unsigned long opt),
194 		   unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 	struct hci_request req;
197 	int err = 0;
198 
199 	BT_DBG("%s start", hdev->name);
200 
201 	hci_req_init(&req, hdev);
202 
203 	hdev->req_status = HCI_REQ_PEND;
204 
205 	err = func(&req, opt);
206 	if (err) {
207 		if (hci_status)
208 			*hci_status = HCI_ERROR_UNSPECIFIED;
209 		return err;
210 	}
211 
212 	err = hci_req_run_skb(&req, hci_req_sync_complete);
213 	if (err < 0) {
214 		hdev->req_status = 0;
215 
216 		/* ENODATA means the HCI request command queue is empty.
217 		 * This can happen when a request with conditionals doesn't
218 		 * trigger any commands to be sent. This is normal behavior
219 		 * and should not trigger an error return.
220 		 */
221 		if (err == -ENODATA) {
222 			if (hci_status)
223 				*hci_status = 0;
224 			return 0;
225 		}
226 
227 		if (hci_status)
228 			*hci_status = HCI_ERROR_UNSPECIFIED;
229 
230 		return err;
231 	}
232 
233 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 			hdev->req_status != HCI_REQ_PEND, timeout);
235 
236 	if (err == -ERESTARTSYS)
237 		return -EINTR;
238 
239 	switch (hdev->req_status) {
240 	case HCI_REQ_DONE:
241 		err = -bt_to_errno(hdev->req_result);
242 		if (hci_status)
243 			*hci_status = hdev->req_result;
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		if (hci_status)
249 			*hci_status = HCI_ERROR_UNSPECIFIED;
250 		break;
251 
252 	default:
253 		err = -ETIMEDOUT;
254 		if (hci_status)
255 			*hci_status = HCI_ERROR_UNSPECIFIED;
256 		break;
257 	}
258 
259 	kfree_skb(hdev->req_skb);
260 	hdev->req_skb = NULL;
261 	hdev->req_status = hdev->req_result = 0;
262 
263 	BT_DBG("%s end: err %d", hdev->name, err);
264 
265 	return err;
266 }
267 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 						  unsigned long opt),
270 		 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 	int ret;
273 
274 	/* Serialize all requests */
275 	hci_req_sync_lock(hdev);
276 	/* check the state after obtaing the lock to protect the HCI_UP
277 	 * against any races from hci_dev_do_close when the controller
278 	 * gets removed.
279 	 */
280 	if (test_bit(HCI_UP, &hdev->flags))
281 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 	else
283 		ret = -ENETDOWN;
284 	hci_req_sync_unlock(hdev);
285 
286 	return ret;
287 }
288 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 				const void *param)
291 {
292 	int len = HCI_COMMAND_HDR_SIZE + plen;
293 	struct hci_command_hdr *hdr;
294 	struct sk_buff *skb;
295 
296 	skb = bt_skb_alloc(len, GFP_ATOMIC);
297 	if (!skb)
298 		return NULL;
299 
300 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 	hdr->opcode = cpu_to_le16(opcode);
302 	hdr->plen   = plen;
303 
304 	if (plen)
305 		skb_put_data(skb, param, plen);
306 
307 	BT_DBG("skb len %d", skb->len);
308 
309 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 	hci_skb_opcode(skb) = opcode;
311 
312 	return skb;
313 }
314 
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 		    const void *param, u8 event)
318 {
319 	struct hci_dev *hdev = req->hdev;
320 	struct sk_buff *skb;
321 
322 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323 
324 	/* If an error occurred during request building, there is no point in
325 	 * queueing the HCI command. We can simply return.
326 	 */
327 	if (req->err)
328 		return;
329 
330 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 	if (!skb) {
332 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 			   opcode);
334 		req->err = -ENOMEM;
335 		return;
336 	}
337 
338 	if (skb_queue_empty(&req->cmd_q))
339 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340 
341 	bt_cb(skb)->hci.req_event = event;
342 
343 	skb_queue_tail(&req->cmd_q, skb);
344 }
345 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 		 const void *param)
348 {
349 	hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 	struct hci_dev *hdev = req->hdev;
355 	struct hci_cp_write_page_scan_activity acp;
356 	u8 type;
357 
358 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 		return;
360 
361 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 		return;
363 
364 	if (enable) {
365 		type = PAGE_SCAN_TYPE_INTERLACED;
366 
367 		/* 160 msec page scan interval */
368 		acp.interval = cpu_to_le16(0x0100);
369 	} else {
370 		type = hdev->def_page_scan_type;
371 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
372 	}
373 
374 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
375 
376 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
377 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
378 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
379 			    sizeof(acp), &acp);
380 
381 	if (hdev->page_scan_type != type)
382 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
383 }
384 
385 /* This function controls the background scanning based on hdev->pend_le_conns
386  * list. If there are pending LE connection we start the background scanning,
387  * otherwise we stop it.
388  *
389  * This function requires the caller holds hdev->lock.
390  */
__hci_update_background_scan(struct hci_request * req)391 static void __hci_update_background_scan(struct hci_request *req)
392 {
393 	struct hci_dev *hdev = req->hdev;
394 
395 	if (!test_bit(HCI_UP, &hdev->flags) ||
396 	    test_bit(HCI_INIT, &hdev->flags) ||
397 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
398 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
399 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
400 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
401 		return;
402 
403 	/* No point in doing scanning if LE support hasn't been enabled */
404 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
405 		return;
406 
407 	/* If discovery is active don't interfere with it */
408 	if (hdev->discovery.state != DISCOVERY_STOPPED)
409 		return;
410 
411 	/* Reset RSSI and UUID filters when starting background scanning
412 	 * since these filters are meant for service discovery only.
413 	 *
414 	 * The Start Discovery and Start Service Discovery operations
415 	 * ensure to set proper values for RSSI threshold and UUID
416 	 * filter list. So it is safe to just reset them here.
417 	 */
418 	hci_discovery_filter_clear(hdev);
419 
420 	BT_DBG("%s ADV monitoring is %s", hdev->name,
421 	       hci_is_adv_monitoring(hdev) ? "on" : "off");
422 
423 	if (list_empty(&hdev->pend_le_conns) &&
424 	    list_empty(&hdev->pend_le_reports) &&
425 	    !hci_is_adv_monitoring(hdev)) {
426 		/* If there is no pending LE connections or devices
427 		 * to be scanned for or no ADV monitors, we should stop the
428 		 * background scanning.
429 		 */
430 
431 		/* If controller is not scanning we are done. */
432 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 			return;
434 
435 		hci_req_add_le_scan_disable(req, false);
436 
437 		BT_DBG("%s stopping background scanning", hdev->name);
438 	} else {
439 		/* If there is at least one pending LE connection, we should
440 		 * keep the background scan running.
441 		 */
442 
443 		/* If controller is connecting, we should not start scanning
444 		 * since some controllers are not able to scan and connect at
445 		 * the same time.
446 		 */
447 		if (hci_lookup_le_connect(hdev))
448 			return;
449 
450 		/* If controller is currently scanning, we stop it to ensure we
451 		 * don't miss any advertising (due to duplicates filter).
452 		 */
453 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 			hci_req_add_le_scan_disable(req, false);
455 
456 		hci_req_add_le_passive_scan(req);
457 
458 		BT_DBG("%s starting background scanning", hdev->name);
459 	}
460 }
461 
__hci_req_update_name(struct hci_request * req)462 void __hci_req_update_name(struct hci_request *req)
463 {
464 	struct hci_dev *hdev = req->hdev;
465 	struct hci_cp_write_local_name cp;
466 
467 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468 
469 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471 
472 #define PNP_INFO_SVCLASS_ID		0x1200
473 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476 	u8 *ptr = data, *uuids_start = NULL;
477 	struct bt_uuid *uuid;
478 
479 	if (len < 4)
480 		return ptr;
481 
482 	list_for_each_entry(uuid, &hdev->uuids, list) {
483 		u16 uuid16;
484 
485 		if (uuid->size != 16)
486 			continue;
487 
488 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 		if (uuid16 < 0x1100)
490 			continue;
491 
492 		if (uuid16 == PNP_INFO_SVCLASS_ID)
493 			continue;
494 
495 		if (!uuids_start) {
496 			uuids_start = ptr;
497 			uuids_start[0] = 1;
498 			uuids_start[1] = EIR_UUID16_ALL;
499 			ptr += 2;
500 		}
501 
502 		/* Stop if not enough space to put next UUID */
503 		if ((ptr - data) + sizeof(u16) > len) {
504 			uuids_start[1] = EIR_UUID16_SOME;
505 			break;
506 		}
507 
508 		*ptr++ = (uuid16 & 0x00ff);
509 		*ptr++ = (uuid16 & 0xff00) >> 8;
510 		uuids_start[0] += sizeof(uuid16);
511 	}
512 
513 	return ptr;
514 }
515 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 	u8 *ptr = data, *uuids_start = NULL;
519 	struct bt_uuid *uuid;
520 
521 	if (len < 6)
522 		return ptr;
523 
524 	list_for_each_entry(uuid, &hdev->uuids, list) {
525 		if (uuid->size != 32)
526 			continue;
527 
528 		if (!uuids_start) {
529 			uuids_start = ptr;
530 			uuids_start[0] = 1;
531 			uuids_start[1] = EIR_UUID32_ALL;
532 			ptr += 2;
533 		}
534 
535 		/* Stop if not enough space to put next UUID */
536 		if ((ptr - data) + sizeof(u32) > len) {
537 			uuids_start[1] = EIR_UUID32_SOME;
538 			break;
539 		}
540 
541 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 		ptr += sizeof(u32);
543 		uuids_start[0] += sizeof(u32);
544 	}
545 
546 	return ptr;
547 }
548 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551 	u8 *ptr = data, *uuids_start = NULL;
552 	struct bt_uuid *uuid;
553 
554 	if (len < 18)
555 		return ptr;
556 
557 	list_for_each_entry(uuid, &hdev->uuids, list) {
558 		if (uuid->size != 128)
559 			continue;
560 
561 		if (!uuids_start) {
562 			uuids_start = ptr;
563 			uuids_start[0] = 1;
564 			uuids_start[1] = EIR_UUID128_ALL;
565 			ptr += 2;
566 		}
567 
568 		/* Stop if not enough space to put next UUID */
569 		if ((ptr - data) + 16 > len) {
570 			uuids_start[1] = EIR_UUID128_SOME;
571 			break;
572 		}
573 
574 		memcpy(ptr, uuid->uuid, 16);
575 		ptr += 16;
576 		uuids_start[0] += 16;
577 	}
578 
579 	return ptr;
580 }
581 
create_eir(struct hci_dev * hdev,u8 * data)582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584 	u8 *ptr = data;
585 	size_t name_len;
586 
587 	name_len = strlen(hdev->dev_name);
588 
589 	if (name_len > 0) {
590 		/* EIR Data type */
591 		if (name_len > 48) {
592 			name_len = 48;
593 			ptr[1] = EIR_NAME_SHORT;
594 		} else
595 			ptr[1] = EIR_NAME_COMPLETE;
596 
597 		/* EIR Data length */
598 		ptr[0] = name_len + 1;
599 
600 		memcpy(ptr + 2, hdev->dev_name, name_len);
601 
602 		ptr += (name_len + 2);
603 	}
604 
605 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 		ptr[0] = 2;
607 		ptr[1] = EIR_TX_POWER;
608 		ptr[2] = (u8) hdev->inq_tx_power;
609 
610 		ptr += 3;
611 	}
612 
613 	if (hdev->devid_source > 0) {
614 		ptr[0] = 9;
615 		ptr[1] = EIR_DEVICE_ID;
616 
617 		put_unaligned_le16(hdev->devid_source, ptr + 2);
618 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 		put_unaligned_le16(hdev->devid_product, ptr + 6);
620 		put_unaligned_le16(hdev->devid_version, ptr + 8);
621 
622 		ptr += 10;
623 	}
624 
625 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629 
__hci_req_update_eir(struct hci_request * req)630 void __hci_req_update_eir(struct hci_request *req)
631 {
632 	struct hci_dev *hdev = req->hdev;
633 	struct hci_cp_write_eir cp;
634 
635 	if (!hdev_is_powered(hdev))
636 		return;
637 
638 	if (!lmp_ext_inq_capable(hdev))
639 		return;
640 
641 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 		return;
643 
644 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 		return;
646 
647 	memset(&cp, 0, sizeof(cp));
648 
649 	create_eir(hdev, cp.data);
650 
651 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 		return;
653 
654 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
655 
656 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658 
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)659 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
660 {
661 	struct hci_dev *hdev = req->hdev;
662 
663 	if (hdev->scanning_paused) {
664 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
665 		return;
666 	}
667 
668 	if (use_ext_scan(hdev)) {
669 		struct hci_cp_le_set_ext_scan_enable cp;
670 
671 		memset(&cp, 0, sizeof(cp));
672 		cp.enable = LE_SCAN_DISABLE;
673 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
674 			    &cp);
675 	} else {
676 		struct hci_cp_le_set_scan_enable cp;
677 
678 		memset(&cp, 0, sizeof(cp));
679 		cp.enable = LE_SCAN_DISABLE;
680 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
681 	}
682 
683 	/* Disable address resolution */
684 	if (use_ll_privacy(hdev) &&
685 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
686 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
687 		__u8 enable = 0x00;
688 
689 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
690 	}
691 }
692 
del_from_white_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)693 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
694 				u8 bdaddr_type)
695 {
696 	struct hci_cp_le_del_from_white_list cp;
697 
698 	cp.bdaddr_type = bdaddr_type;
699 	bacpy(&cp.bdaddr, bdaddr);
700 
701 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
702 		   cp.bdaddr_type);
703 	hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
704 
705 	if (use_ll_privacy(req->hdev) &&
706 	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
707 		struct smp_irk *irk;
708 
709 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
710 		if (irk) {
711 			struct hci_cp_le_del_from_resolv_list cp;
712 
713 			cp.bdaddr_type = bdaddr_type;
714 			bacpy(&cp.bdaddr, bdaddr);
715 
716 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
717 				    sizeof(cp), &cp);
718 		}
719 	}
720 }
721 
722 /* Adds connection to white list if needed. On error, returns -1. */
add_to_white_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)723 static int add_to_white_list(struct hci_request *req,
724 			     struct hci_conn_params *params, u8 *num_entries,
725 			     bool allow_rpa)
726 {
727 	struct hci_cp_le_add_to_white_list cp;
728 	struct hci_dev *hdev = req->hdev;
729 
730 	/* Already in white list */
731 	if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
732 				   params->addr_type))
733 		return 0;
734 
735 	/* Select filter policy to accept all advertising */
736 	if (*num_entries >= hdev->le_white_list_size)
737 		return -1;
738 
739 	/* White list can not be used with RPAs */
740 	if (!allow_rpa &&
741 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
742 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
743 		return -1;
744 	}
745 
746 	/* During suspend, only wakeable devices can be in whitelist */
747 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
748 						   params->current_flags))
749 		return 0;
750 
751 	*num_entries += 1;
752 	cp.bdaddr_type = params->addr_type;
753 	bacpy(&cp.bdaddr, &params->addr);
754 
755 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
756 		   cp.bdaddr_type);
757 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
758 
759 	if (use_ll_privacy(hdev) &&
760 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
761 		struct smp_irk *irk;
762 
763 		irk = hci_find_irk_by_addr(hdev, &params->addr,
764 					   params->addr_type);
765 		if (irk) {
766 			struct hci_cp_le_add_to_resolv_list cp;
767 
768 			cp.bdaddr_type = params->addr_type;
769 			bacpy(&cp.bdaddr, &params->addr);
770 			memcpy(cp.peer_irk, irk->val, 16);
771 
772 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
773 				memcpy(cp.local_irk, hdev->irk, 16);
774 			else
775 				memset(cp.local_irk, 0, 16);
776 
777 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
778 				    sizeof(cp), &cp);
779 		}
780 	}
781 
782 	return 0;
783 }
784 
update_white_list(struct hci_request * req)785 static u8 update_white_list(struct hci_request *req)
786 {
787 	struct hci_dev *hdev = req->hdev;
788 	struct hci_conn_params *params;
789 	struct bdaddr_list *b;
790 	u8 num_entries = 0;
791 	bool pend_conn, pend_report;
792 	/* We allow whitelisting even with RPAs in suspend. In the worst case,
793 	 * we won't be able to wake from devices that use the privacy1.2
794 	 * features. Additionally, once we support privacy1.2 and IRK
795 	 * offloading, we can update this to also check for those conditions.
796 	 */
797 	bool allow_rpa = hdev->suspended;
798 
799 	if (use_ll_privacy(hdev) &&
800 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
801 		allow_rpa = true;
802 
803 	/* Go through the current white list programmed into the
804 	 * controller one by one and check if that address is still
805 	 * in the list of pending connections or list of devices to
806 	 * report. If not present in either list, then queue the
807 	 * command to remove it from the controller.
808 	 */
809 	list_for_each_entry(b, &hdev->le_white_list, list) {
810 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
811 						      &b->bdaddr,
812 						      b->bdaddr_type);
813 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
814 							&b->bdaddr,
815 							b->bdaddr_type);
816 
817 		/* If the device is not likely to connect or report,
818 		 * remove it from the whitelist.
819 		 */
820 		if (!pend_conn && !pend_report) {
821 			del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
822 			continue;
823 		}
824 
825 		/* White list can not be used with RPAs */
826 		if (!allow_rpa &&
827 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
828 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
829 			return 0x00;
830 		}
831 
832 		num_entries++;
833 	}
834 
835 	/* Since all no longer valid white list entries have been
836 	 * removed, walk through the list of pending connections
837 	 * and ensure that any new device gets programmed into
838 	 * the controller.
839 	 *
840 	 * If the list of the devices is larger than the list of
841 	 * available white list entries in the controller, then
842 	 * just abort and return filer policy value to not use the
843 	 * white list.
844 	 */
845 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
846 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
847 			return 0x00;
848 	}
849 
850 	/* After adding all new pending connections, walk through
851 	 * the list of pending reports and also add these to the
852 	 * white list if there is still space. Abort if space runs out.
853 	 */
854 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
855 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
856 			return 0x00;
857 	}
858 
859 	/* Once the controller offloading of advertisement monitor is in place,
860 	 * the if condition should include the support of MSFT extension
861 	 * support. If suspend is ongoing, whitelist should be the default to
862 	 * prevent waking by random advertisements.
863 	 */
864 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
865 		return 0x00;
866 
867 	/* Select filter policy to use white list */
868 	return 0x01;
869 }
870 
scan_use_rpa(struct hci_dev * hdev)871 static bool scan_use_rpa(struct hci_dev *hdev)
872 {
873 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
874 }
875 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)876 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
877 			       u16 window, u8 own_addr_type, u8 filter_policy,
878 			       bool addr_resolv)
879 {
880 	struct hci_dev *hdev = req->hdev;
881 
882 	if (hdev->scanning_paused) {
883 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
884 		return;
885 	}
886 
887 	if (use_ll_privacy(hdev) &&
888 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
889 	    addr_resolv) {
890 		u8 enable = 0x01;
891 
892 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
893 	}
894 
895 	/* Use ext scanning if set ext scan param and ext scan enable is
896 	 * supported
897 	 */
898 	if (use_ext_scan(hdev)) {
899 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
900 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
901 		struct hci_cp_le_scan_phy_params *phy_params;
902 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
903 		u32 plen;
904 
905 		ext_param_cp = (void *)data;
906 		phy_params = (void *)ext_param_cp->data;
907 
908 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
909 		ext_param_cp->own_addr_type = own_addr_type;
910 		ext_param_cp->filter_policy = filter_policy;
911 
912 		plen = sizeof(*ext_param_cp);
913 
914 		if (scan_1m(hdev) || scan_2m(hdev)) {
915 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
916 
917 			memset(phy_params, 0, sizeof(*phy_params));
918 			phy_params->type = type;
919 			phy_params->interval = cpu_to_le16(interval);
920 			phy_params->window = cpu_to_le16(window);
921 
922 			plen += sizeof(*phy_params);
923 			phy_params++;
924 		}
925 
926 		if (scan_coded(hdev)) {
927 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
928 
929 			memset(phy_params, 0, sizeof(*phy_params));
930 			phy_params->type = type;
931 			phy_params->interval = cpu_to_le16(interval);
932 			phy_params->window = cpu_to_le16(window);
933 
934 			plen += sizeof(*phy_params);
935 			phy_params++;
936 		}
937 
938 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
939 			    plen, ext_param_cp);
940 
941 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
942 		ext_enable_cp.enable = LE_SCAN_ENABLE;
943 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
944 
945 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
946 			    sizeof(ext_enable_cp), &ext_enable_cp);
947 	} else {
948 		struct hci_cp_le_set_scan_param param_cp;
949 		struct hci_cp_le_set_scan_enable enable_cp;
950 
951 		memset(&param_cp, 0, sizeof(param_cp));
952 		param_cp.type = type;
953 		param_cp.interval = cpu_to_le16(interval);
954 		param_cp.window = cpu_to_le16(window);
955 		param_cp.own_address_type = own_addr_type;
956 		param_cp.filter_policy = filter_policy;
957 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
958 			    &param_cp);
959 
960 		memset(&enable_cp, 0, sizeof(enable_cp));
961 		enable_cp.enable = LE_SCAN_ENABLE;
962 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
963 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
964 			    &enable_cp);
965 	}
966 }
967 
968 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)969 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
970 {
971 	struct hci_conn_hash *h = &hdev->conn_hash;
972 	struct hci_conn  *c;
973 
974 	rcu_read_lock();
975 
976 	list_for_each_entry_rcu(c, &h->list, list) {
977 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
978 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
979 			rcu_read_unlock();
980 			return true;
981 		}
982 	}
983 
984 	rcu_read_unlock();
985 
986 	return false;
987 }
988 
989 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
990  * controller based address resolution to be able to reconfigure
991  * resolving list.
992  */
hci_req_add_le_passive_scan(struct hci_request * req)993 void hci_req_add_le_passive_scan(struct hci_request *req)
994 {
995 	struct hci_dev *hdev = req->hdev;
996 	u8 own_addr_type;
997 	u8 filter_policy;
998 	u16 window, interval;
999 	/* Background scanning should run with address resolution */
1000 	bool addr_resolv = true;
1001 
1002 	if (hdev->scanning_paused) {
1003 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1004 		return;
1005 	}
1006 
1007 	/* Set require_privacy to false since no SCAN_REQ are send
1008 	 * during passive scanning. Not using an non-resolvable address
1009 	 * here is important so that peer devices using direct
1010 	 * advertising with our address will be correctly reported
1011 	 * by the controller.
1012 	 */
1013 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1014 				      &own_addr_type))
1015 		return;
1016 
1017 	/* Adding or removing entries from the white list must
1018 	 * happen before enabling scanning. The controller does
1019 	 * not allow white list modification while scanning.
1020 	 */
1021 	filter_policy = update_white_list(req);
1022 
1023 	/* When the controller is using random resolvable addresses and
1024 	 * with that having LE privacy enabled, then controllers with
1025 	 * Extended Scanner Filter Policies support can now enable support
1026 	 * for handling directed advertising.
1027 	 *
1028 	 * So instead of using filter polices 0x00 (no whitelist)
1029 	 * and 0x01 (whitelist enabled) use the new filter policies
1030 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1031 	 */
1032 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1033 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1034 		filter_policy |= 0x02;
1035 
1036 	if (hdev->suspended) {
1037 		window = hdev->le_scan_window_suspend;
1038 		interval = hdev->le_scan_int_suspend;
1039 	} else if (hci_is_le_conn_scanning(hdev)) {
1040 		window = hdev->le_scan_window_connect;
1041 		interval = hdev->le_scan_int_connect;
1042 	} else if (hci_is_adv_monitoring(hdev)) {
1043 		window = hdev->le_scan_window_adv_monitor;
1044 		interval = hdev->le_scan_int_adv_monitor;
1045 	} else {
1046 		window = hdev->le_scan_window;
1047 		interval = hdev->le_scan_interval;
1048 	}
1049 
1050 	bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1051 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1052 			   own_addr_type, filter_policy, addr_resolv);
1053 }
1054 
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1055 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1056 {
1057 	struct adv_info *adv_instance;
1058 
1059 	/* Instance 0x00 always set local name */
1060 	if (instance == 0x00)
1061 		return 1;
1062 
1063 	adv_instance = hci_find_adv_instance(hdev, instance);
1064 	if (!adv_instance)
1065 		return 0;
1066 
1067 	if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1068 	    adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1069 		return 1;
1070 
1071 	return adv_instance->scan_rsp_len;
1072 }
1073 
hci_req_clear_event_filter(struct hci_request * req)1074 static void hci_req_clear_event_filter(struct hci_request *req)
1075 {
1076 	struct hci_cp_set_event_filter f;
1077 
1078 	memset(&f, 0, sizeof(f));
1079 	f.flt_type = HCI_FLT_CLEAR_ALL;
1080 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1081 
1082 	/* Update page scan state (since we may have modified it when setting
1083 	 * the event filter).
1084 	 */
1085 	__hci_req_update_scan(req);
1086 }
1087 
hci_req_set_event_filter(struct hci_request * req)1088 static void hci_req_set_event_filter(struct hci_request *req)
1089 {
1090 	struct bdaddr_list_with_flags *b;
1091 	struct hci_cp_set_event_filter f;
1092 	struct hci_dev *hdev = req->hdev;
1093 	u8 scan = SCAN_DISABLED;
1094 
1095 	/* Always clear event filter when starting */
1096 	hci_req_clear_event_filter(req);
1097 
1098 	list_for_each_entry(b, &hdev->whitelist, list) {
1099 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1100 					b->current_flags))
1101 			continue;
1102 
1103 		memset(&f, 0, sizeof(f));
1104 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1105 		f.flt_type = HCI_FLT_CONN_SETUP;
1106 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1107 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1108 
1109 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1110 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1111 		scan = SCAN_PAGE;
1112 	}
1113 
1114 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1115 }
1116 
hci_req_config_le_suspend_scan(struct hci_request * req)1117 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1118 {
1119 	/* Before changing params disable scan if enabled */
1120 	if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1121 		hci_req_add_le_scan_disable(req, false);
1122 
1123 	/* Configure params and enable scanning */
1124 	hci_req_add_le_passive_scan(req);
1125 
1126 	/* Block suspend notifier on response */
1127 	set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1128 }
1129 
cancel_adv_timeout(struct hci_dev * hdev)1130 static void cancel_adv_timeout(struct hci_dev *hdev)
1131 {
1132 	if (hdev->adv_instance_timeout) {
1133 		hdev->adv_instance_timeout = 0;
1134 		cancel_delayed_work(&hdev->adv_instance_expire);
1135 	}
1136 }
1137 
1138 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1139 static void hci_suspend_adv_instances(struct hci_request *req)
1140 {
1141 	bt_dev_dbg(req->hdev, "Suspending advertising instances");
1142 
1143 	/* Call to disable any advertisements active on the controller.
1144 	 * This will succeed even if no advertisements are configured.
1145 	 */
1146 	__hci_req_disable_advertising(req);
1147 
1148 	/* If we are using software rotation, pause the loop */
1149 	if (!ext_adv_capable(req->hdev))
1150 		cancel_adv_timeout(req->hdev);
1151 }
1152 
1153 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1154 static void hci_resume_adv_instances(struct hci_request *req)
1155 {
1156 	struct adv_info *adv;
1157 
1158 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1159 
1160 	if (ext_adv_capable(req->hdev)) {
1161 		/* Call for each tracked instance to be re-enabled */
1162 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1163 			__hci_req_enable_ext_advertising(req,
1164 							 adv->instance);
1165 		}
1166 
1167 	} else {
1168 		/* Schedule for most recent instance to be restarted and begin
1169 		 * the software rotation loop
1170 		 */
1171 		__hci_req_schedule_adv_instance(req,
1172 						req->hdev->cur_adv_instance,
1173 						true);
1174 	}
1175 }
1176 
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1177 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1178 {
1179 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1180 		   status);
1181 	if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1182 	    test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1183 		wake_up(&hdev->suspend_wait_q);
1184 	}
1185 }
1186 
1187 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1188 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1189 {
1190 	int old_state;
1191 	struct hci_conn *conn;
1192 	struct hci_request req;
1193 	u8 page_scan;
1194 	int disconnect_counter;
1195 
1196 	if (next == hdev->suspend_state) {
1197 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1198 		goto done;
1199 	}
1200 
1201 	hdev->suspend_state = next;
1202 	hci_req_init(&req, hdev);
1203 
1204 	if (next == BT_SUSPEND_DISCONNECT) {
1205 		/* Mark device as suspended */
1206 		hdev->suspended = true;
1207 
1208 		/* Pause discovery if not already stopped */
1209 		old_state = hdev->discovery.state;
1210 		if (old_state != DISCOVERY_STOPPED) {
1211 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1212 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1213 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1214 		}
1215 
1216 		hdev->discovery_paused = true;
1217 		hdev->discovery_old_state = old_state;
1218 
1219 		/* Stop directed advertising */
1220 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1221 		if (old_state) {
1222 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1223 			cancel_delayed_work(&hdev->discov_off);
1224 			queue_delayed_work(hdev->req_workqueue,
1225 					   &hdev->discov_off, 0);
1226 		}
1227 
1228 		/* Pause other advertisements */
1229 		if (hdev->adv_instance_cnt)
1230 			hci_suspend_adv_instances(&req);
1231 
1232 		hdev->advertising_paused = true;
1233 		hdev->advertising_old_state = old_state;
1234 		/* Disable page scan */
1235 		page_scan = SCAN_DISABLED;
1236 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1237 
1238 		/* Disable LE passive scan if enabled */
1239 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1240 			hci_req_add_le_scan_disable(&req, false);
1241 
1242 		/* Mark task needing completion */
1243 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1244 
1245 		/* Prevent disconnects from causing scanning to be re-enabled */
1246 		hdev->scanning_paused = true;
1247 
1248 		/* Run commands before disconnecting */
1249 		hci_req_run(&req, suspend_req_complete);
1250 
1251 		disconnect_counter = 0;
1252 		/* Soft disconnect everything (power off) */
1253 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1254 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1255 			disconnect_counter++;
1256 		}
1257 
1258 		if (disconnect_counter > 0) {
1259 			bt_dev_dbg(hdev,
1260 				   "Had %d disconnects. Will wait on them",
1261 				   disconnect_counter);
1262 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1263 		}
1264 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1265 		/* Unpause to take care of updating scanning params */
1266 		hdev->scanning_paused = false;
1267 		/* Enable event filter for paired devices */
1268 		hci_req_set_event_filter(&req);
1269 		/* Enable passive scan at lower duty cycle */
1270 		hci_req_config_le_suspend_scan(&req);
1271 		/* Pause scan changes again. */
1272 		hdev->scanning_paused = true;
1273 		hci_req_run(&req, suspend_req_complete);
1274 	} else {
1275 		hdev->suspended = false;
1276 		hdev->scanning_paused = false;
1277 
1278 		hci_req_clear_event_filter(&req);
1279 		/* Reset passive/background scanning to normal */
1280 		hci_req_config_le_suspend_scan(&req);
1281 
1282 		/* Unpause directed advertising */
1283 		hdev->advertising_paused = false;
1284 		if (hdev->advertising_old_state) {
1285 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1286 				hdev->suspend_tasks);
1287 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1288 			queue_work(hdev->req_workqueue,
1289 				   &hdev->discoverable_update);
1290 			hdev->advertising_old_state = 0;
1291 		}
1292 
1293 		/* Resume other advertisements */
1294 		if (hdev->adv_instance_cnt)
1295 			hci_resume_adv_instances(&req);
1296 
1297 		/* Unpause discovery */
1298 		hdev->discovery_paused = false;
1299 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1300 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1301 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1302 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1303 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1304 		}
1305 
1306 		hci_req_run(&req, suspend_req_complete);
1307 	}
1308 
1309 	hdev->suspend_state = next;
1310 
1311 done:
1312 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1313 	wake_up(&hdev->suspend_wait_q);
1314 }
1315 
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1316 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1317 {
1318 	u8 instance = hdev->cur_adv_instance;
1319 	struct adv_info *adv_instance;
1320 
1321 	/* Instance 0x00 always set local name */
1322 	if (instance == 0x00)
1323 		return 1;
1324 
1325 	adv_instance = hci_find_adv_instance(hdev, instance);
1326 	if (!adv_instance)
1327 		return 0;
1328 
1329 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1330 	 * These are currently being ignored as they are not supported.
1331 	 */
1332 	return adv_instance->scan_rsp_len;
1333 }
1334 
__hci_req_disable_advertising(struct hci_request * req)1335 void __hci_req_disable_advertising(struct hci_request *req)
1336 {
1337 	if (ext_adv_capable(req->hdev)) {
1338 		__hci_req_disable_ext_adv_instance(req, 0x00);
1339 
1340 	} else {
1341 		u8 enable = 0x00;
1342 
1343 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1344 	}
1345 }
1346 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1347 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1348 {
1349 	u32 flags;
1350 	struct adv_info *adv_instance;
1351 
1352 	if (instance == 0x00) {
1353 		/* Instance 0 always manages the "Tx Power" and "Flags"
1354 		 * fields
1355 		 */
1356 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1357 
1358 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1359 		 * corresponds to the "connectable" instance flag.
1360 		 */
1361 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1362 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1363 
1364 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1365 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1366 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1367 			flags |= MGMT_ADV_FLAG_DISCOV;
1368 
1369 		return flags;
1370 	}
1371 
1372 	adv_instance = hci_find_adv_instance(hdev, instance);
1373 
1374 	/* Return 0 when we got an invalid instance identifier. */
1375 	if (!adv_instance)
1376 		return 0;
1377 
1378 	return adv_instance->flags;
1379 }
1380 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1381 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1382 {
1383 	/* If privacy is not enabled don't use RPA */
1384 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1385 		return false;
1386 
1387 	/* If basic privacy mode is enabled use RPA */
1388 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1389 		return true;
1390 
1391 	/* If limited privacy mode is enabled don't use RPA if we're
1392 	 * both discoverable and bondable.
1393 	 */
1394 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1395 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1396 		return false;
1397 
1398 	/* We're neither bondable nor discoverable in the limited
1399 	 * privacy mode, therefore use RPA.
1400 	 */
1401 	return true;
1402 }
1403 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1404 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1405 {
1406 	/* If there is no connection we are OK to advertise. */
1407 	if (hci_conn_num(hdev, LE_LINK) == 0)
1408 		return true;
1409 
1410 	/* Check le_states if there is any connection in slave role. */
1411 	if (hdev->conn_hash.le_num_slave > 0) {
1412 		/* Slave connection state and non connectable mode bit 20. */
1413 		if (!connectable && !(hdev->le_states[2] & 0x10))
1414 			return false;
1415 
1416 		/* Slave connection state and connectable mode bit 38
1417 		 * and scannable bit 21.
1418 		 */
1419 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1420 				    !(hdev->le_states[2] & 0x20)))
1421 			return false;
1422 	}
1423 
1424 	/* Check le_states if there is any connection in master role. */
1425 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1426 		/* Master connection state and non connectable mode bit 18. */
1427 		if (!connectable && !(hdev->le_states[2] & 0x02))
1428 			return false;
1429 
1430 		/* Master connection state and connectable mode bit 35 and
1431 		 * scannable 19.
1432 		 */
1433 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1434 				    !(hdev->le_states[2] & 0x08)))
1435 			return false;
1436 	}
1437 
1438 	return true;
1439 }
1440 
__hci_req_enable_advertising(struct hci_request * req)1441 void __hci_req_enable_advertising(struct hci_request *req)
1442 {
1443 	struct hci_dev *hdev = req->hdev;
1444 	struct hci_cp_le_set_adv_param cp;
1445 	u8 own_addr_type, enable = 0x01;
1446 	bool connectable;
1447 	u16 adv_min_interval, adv_max_interval;
1448 	u32 flags;
1449 
1450 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1451 
1452 	/* If the "connectable" instance flag was not set, then choose between
1453 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1454 	 */
1455 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1456 		      mgmt_get_connectable(hdev);
1457 
1458 	if (!is_advertising_allowed(hdev, connectable))
1459 		return;
1460 
1461 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1462 		__hci_req_disable_advertising(req);
1463 
1464 	/* Clear the HCI_LE_ADV bit temporarily so that the
1465 	 * hci_update_random_address knows that it's safe to go ahead
1466 	 * and write a new random address. The flag will be set back on
1467 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1468 	 */
1469 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1470 
1471 	/* Set require_privacy to true only when non-connectable
1472 	 * advertising is used. In that case it is fine to use a
1473 	 * non-resolvable private address.
1474 	 */
1475 	if (hci_update_random_address(req, !connectable,
1476 				      adv_use_rpa(hdev, flags),
1477 				      &own_addr_type) < 0)
1478 		return;
1479 
1480 	memset(&cp, 0, sizeof(cp));
1481 
1482 	if (connectable) {
1483 		cp.type = LE_ADV_IND;
1484 
1485 		adv_min_interval = hdev->le_adv_min_interval;
1486 		adv_max_interval = hdev->le_adv_max_interval;
1487 	} else {
1488 		if (get_cur_adv_instance_scan_rsp_len(hdev))
1489 			cp.type = LE_ADV_SCAN_IND;
1490 		else
1491 			cp.type = LE_ADV_NONCONN_IND;
1492 
1493 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1494 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1495 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1496 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1497 		} else {
1498 			adv_min_interval = hdev->le_adv_min_interval;
1499 			adv_max_interval = hdev->le_adv_max_interval;
1500 		}
1501 	}
1502 
1503 	cp.min_interval = cpu_to_le16(adv_min_interval);
1504 	cp.max_interval = cpu_to_le16(adv_max_interval);
1505 	cp.own_address_type = own_addr_type;
1506 	cp.channel_map = hdev->le_adv_channel_map;
1507 
1508 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1509 
1510 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1511 }
1512 
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1513 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1514 {
1515 	size_t short_len;
1516 	size_t complete_len;
1517 
1518 	/* no space left for name (+ NULL + type + len) */
1519 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1520 		return ad_len;
1521 
1522 	/* use complete name if present and fits */
1523 	complete_len = strlen(hdev->dev_name);
1524 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1525 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1526 				       hdev->dev_name, complete_len + 1);
1527 
1528 	/* use short name if present */
1529 	short_len = strlen(hdev->short_name);
1530 	if (short_len)
1531 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1532 				       hdev->short_name, short_len + 1);
1533 
1534 	/* use shortened full name if present, we already know that name
1535 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1536 	 */
1537 	if (complete_len) {
1538 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1539 
1540 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1541 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1542 
1543 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1544 				       sizeof(name));
1545 	}
1546 
1547 	return ad_len;
1548 }
1549 
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1550 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1551 {
1552 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1553 }
1554 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1555 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1556 {
1557 	u8 scan_rsp_len = 0;
1558 
1559 	if (hdev->appearance) {
1560 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1561 	}
1562 
1563 	return append_local_name(hdev, ptr, scan_rsp_len);
1564 }
1565 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1566 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1567 					u8 *ptr)
1568 {
1569 	struct adv_info *adv_instance;
1570 	u32 instance_flags;
1571 	u8 scan_rsp_len = 0;
1572 
1573 	adv_instance = hci_find_adv_instance(hdev, instance);
1574 	if (!adv_instance)
1575 		return 0;
1576 
1577 	instance_flags = adv_instance->flags;
1578 
1579 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1580 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1581 	}
1582 
1583 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1584 	       adv_instance->scan_rsp_len);
1585 
1586 	scan_rsp_len += adv_instance->scan_rsp_len;
1587 
1588 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1589 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1590 
1591 	return scan_rsp_len;
1592 }
1593 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1594 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1595 {
1596 	struct hci_dev *hdev = req->hdev;
1597 	u8 len;
1598 
1599 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1600 		return;
1601 
1602 	if (ext_adv_capable(hdev)) {
1603 		struct {
1604 			struct hci_cp_le_set_ext_scan_rsp_data cp;
1605 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1606 		} pdu;
1607 
1608 		memset(&pdu, 0, sizeof(pdu));
1609 
1610 		if (instance)
1611 			len = create_instance_scan_rsp_data(hdev, instance,
1612 							    pdu.data);
1613 		else
1614 			len = create_default_scan_rsp_data(hdev, pdu.data);
1615 
1616 		if (hdev->scan_rsp_data_len == len &&
1617 		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1618 			return;
1619 
1620 		memcpy(hdev->scan_rsp_data, pdu.data, len);
1621 		hdev->scan_rsp_data_len = len;
1622 
1623 		pdu.cp.handle = instance;
1624 		pdu.cp.length = len;
1625 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1626 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1627 
1628 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1629 			    sizeof(pdu.cp) + len, &pdu.cp);
1630 	} else {
1631 		struct hci_cp_le_set_scan_rsp_data cp;
1632 
1633 		memset(&cp, 0, sizeof(cp));
1634 
1635 		if (instance)
1636 			len = create_instance_scan_rsp_data(hdev, instance,
1637 							    cp.data);
1638 		else
1639 			len = create_default_scan_rsp_data(hdev, cp.data);
1640 
1641 		if (hdev->scan_rsp_data_len == len &&
1642 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1643 			return;
1644 
1645 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1646 		hdev->scan_rsp_data_len = len;
1647 
1648 		cp.length = len;
1649 
1650 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1651 	}
1652 }
1653 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1654 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1655 {
1656 	struct adv_info *adv_instance = NULL;
1657 	u8 ad_len = 0, flags = 0;
1658 	u32 instance_flags;
1659 
1660 	/* Return 0 when the current instance identifier is invalid. */
1661 	if (instance) {
1662 		adv_instance = hci_find_adv_instance(hdev, instance);
1663 		if (!adv_instance)
1664 			return 0;
1665 	}
1666 
1667 	instance_flags = get_adv_instance_flags(hdev, instance);
1668 
1669 	/* If instance already has the flags set skip adding it once
1670 	 * again.
1671 	 */
1672 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1673 					 adv_instance->adv_data_len, EIR_FLAGS,
1674 					 NULL))
1675 		goto skip_flags;
1676 
1677 	/* The Add Advertising command allows userspace to set both the general
1678 	 * and limited discoverable flags.
1679 	 */
1680 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1681 		flags |= LE_AD_GENERAL;
1682 
1683 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1684 		flags |= LE_AD_LIMITED;
1685 
1686 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1687 		flags |= LE_AD_NO_BREDR;
1688 
1689 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1690 		/* If a discovery flag wasn't provided, simply use the global
1691 		 * settings.
1692 		 */
1693 		if (!flags)
1694 			flags |= mgmt_get_adv_discov_flags(hdev);
1695 
1696 		/* If flags would still be empty, then there is no need to
1697 		 * include the "Flags" AD field".
1698 		 */
1699 		if (flags) {
1700 			ptr[0] = 0x02;
1701 			ptr[1] = EIR_FLAGS;
1702 			ptr[2] = flags;
1703 
1704 			ad_len += 3;
1705 			ptr += 3;
1706 		}
1707 	}
1708 
1709 skip_flags:
1710 	if (adv_instance) {
1711 		memcpy(ptr, adv_instance->adv_data,
1712 		       adv_instance->adv_data_len);
1713 		ad_len += adv_instance->adv_data_len;
1714 		ptr += adv_instance->adv_data_len;
1715 	}
1716 
1717 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1718 		s8 adv_tx_power;
1719 
1720 		if (ext_adv_capable(hdev)) {
1721 			if (adv_instance)
1722 				adv_tx_power = adv_instance->tx_power;
1723 			else
1724 				adv_tx_power = hdev->adv_tx_power;
1725 		} else {
1726 			adv_tx_power = hdev->adv_tx_power;
1727 		}
1728 
1729 		/* Provide Tx Power only if we can provide a valid value for it */
1730 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1731 			ptr[0] = 0x02;
1732 			ptr[1] = EIR_TX_POWER;
1733 			ptr[2] = (u8)adv_tx_power;
1734 
1735 			ad_len += 3;
1736 			ptr += 3;
1737 		}
1738 	}
1739 
1740 	return ad_len;
1741 }
1742 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1743 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1744 {
1745 	struct hci_dev *hdev = req->hdev;
1746 	u8 len;
1747 
1748 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1749 		return;
1750 
1751 	if (ext_adv_capable(hdev)) {
1752 		struct {
1753 			struct hci_cp_le_set_ext_adv_data cp;
1754 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1755 		} pdu;
1756 
1757 		memset(&pdu, 0, sizeof(pdu));
1758 
1759 		len = create_instance_adv_data(hdev, instance, pdu.data);
1760 
1761 		/* There's nothing to do if the data hasn't changed */
1762 		if (hdev->adv_data_len == len &&
1763 		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1764 			return;
1765 
1766 		memcpy(hdev->adv_data, pdu.data, len);
1767 		hdev->adv_data_len = len;
1768 
1769 		pdu.cp.length = len;
1770 		pdu.cp.handle = instance;
1771 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1772 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1773 
1774 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1775 			    sizeof(pdu.cp) + len, &pdu.cp);
1776 	} else {
1777 		struct hci_cp_le_set_adv_data cp;
1778 
1779 		memset(&cp, 0, sizeof(cp));
1780 
1781 		len = create_instance_adv_data(hdev, instance, cp.data);
1782 
1783 		/* There's nothing to do if the data hasn't changed */
1784 		if (hdev->adv_data_len == len &&
1785 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1786 			return;
1787 
1788 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1789 		hdev->adv_data_len = len;
1790 
1791 		cp.length = len;
1792 
1793 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1794 	}
1795 }
1796 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1797 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1798 {
1799 	struct hci_request req;
1800 
1801 	hci_req_init(&req, hdev);
1802 	__hci_req_update_adv_data(&req, instance);
1803 
1804 	return hci_req_run(&req, NULL);
1805 }
1806 
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1807 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1808 					    u16 opcode)
1809 {
1810 	BT_DBG("%s status %u", hdev->name, status);
1811 }
1812 
hci_req_disable_address_resolution(struct hci_dev * hdev)1813 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1814 {
1815 	struct hci_request req;
1816 	__u8 enable = 0x00;
1817 
1818 	if (!use_ll_privacy(hdev) &&
1819 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1820 		return;
1821 
1822 	hci_req_init(&req, hdev);
1823 
1824 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1825 
1826 	hci_req_run(&req, enable_addr_resolution_complete);
1827 }
1828 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1829 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1830 {
1831 	BT_DBG("%s status %u", hdev->name, status);
1832 }
1833 
hci_req_reenable_advertising(struct hci_dev * hdev)1834 void hci_req_reenable_advertising(struct hci_dev *hdev)
1835 {
1836 	struct hci_request req;
1837 
1838 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1839 	    list_empty(&hdev->adv_instances))
1840 		return;
1841 
1842 	hci_req_init(&req, hdev);
1843 
1844 	if (hdev->cur_adv_instance) {
1845 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1846 						true);
1847 	} else {
1848 		if (ext_adv_capable(hdev)) {
1849 			__hci_req_start_ext_adv(&req, 0x00);
1850 		} else {
1851 			__hci_req_update_adv_data(&req, 0x00);
1852 			__hci_req_update_scan_rsp_data(&req, 0x00);
1853 			__hci_req_enable_advertising(&req);
1854 		}
1855 	}
1856 
1857 	hci_req_run(&req, adv_enable_complete);
1858 }
1859 
adv_timeout_expire(struct work_struct * work)1860 static void adv_timeout_expire(struct work_struct *work)
1861 {
1862 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1863 					    adv_instance_expire.work);
1864 
1865 	struct hci_request req;
1866 	u8 instance;
1867 
1868 	BT_DBG("%s", hdev->name);
1869 
1870 	hci_dev_lock(hdev);
1871 
1872 	hdev->adv_instance_timeout = 0;
1873 
1874 	instance = hdev->cur_adv_instance;
1875 	if (instance == 0x00)
1876 		goto unlock;
1877 
1878 	hci_req_init(&req, hdev);
1879 
1880 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1881 
1882 	if (list_empty(&hdev->adv_instances))
1883 		__hci_req_disable_advertising(&req);
1884 
1885 	hci_req_run(&req, NULL);
1886 
1887 unlock:
1888 	hci_dev_unlock(hdev);
1889 }
1890 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1891 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1892 			   bool use_rpa, struct adv_info *adv_instance,
1893 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1894 {
1895 	int err;
1896 
1897 	bacpy(rand_addr, BDADDR_ANY);
1898 
1899 	/* If privacy is enabled use a resolvable private address. If
1900 	 * current RPA has expired then generate a new one.
1901 	 */
1902 	if (use_rpa) {
1903 		int to;
1904 
1905 		/* If Controller supports LL Privacy use own address type is
1906 		 * 0x03
1907 		 */
1908 		if (use_ll_privacy(hdev))
1909 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1910 		else
1911 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1912 
1913 		if (adv_instance) {
1914 			if (!adv_instance->rpa_expired &&
1915 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1916 				return 0;
1917 
1918 			adv_instance->rpa_expired = false;
1919 		} else {
1920 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1921 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1922 				return 0;
1923 		}
1924 
1925 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1926 		if (err < 0) {
1927 			bt_dev_err(hdev, "failed to generate new RPA");
1928 			return err;
1929 		}
1930 
1931 		bacpy(rand_addr, &hdev->rpa);
1932 
1933 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1934 		if (adv_instance)
1935 			queue_delayed_work(hdev->workqueue,
1936 					   &adv_instance->rpa_expired_cb, to);
1937 		else
1938 			queue_delayed_work(hdev->workqueue,
1939 					   &hdev->rpa_expired, to);
1940 
1941 		return 0;
1942 	}
1943 
1944 	/* In case of required privacy without resolvable private address,
1945 	 * use an non-resolvable private address. This is useful for
1946 	 * non-connectable advertising.
1947 	 */
1948 	if (require_privacy) {
1949 		bdaddr_t nrpa;
1950 
1951 		while (true) {
1952 			/* The non-resolvable private address is generated
1953 			 * from random six bytes with the two most significant
1954 			 * bits cleared.
1955 			 */
1956 			get_random_bytes(&nrpa, 6);
1957 			nrpa.b[5] &= 0x3f;
1958 
1959 			/* The non-resolvable private address shall not be
1960 			 * equal to the public address.
1961 			 */
1962 			if (bacmp(&hdev->bdaddr, &nrpa))
1963 				break;
1964 		}
1965 
1966 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1967 		bacpy(rand_addr, &nrpa);
1968 
1969 		return 0;
1970 	}
1971 
1972 	/* No privacy so use a public address. */
1973 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1974 
1975 	return 0;
1976 }
1977 
__hci_req_clear_ext_adv_sets(struct hci_request * req)1978 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1979 {
1980 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1981 }
1982 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1983 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1984 {
1985 	struct hci_cp_le_set_ext_adv_params cp;
1986 	struct hci_dev *hdev = req->hdev;
1987 	bool connectable;
1988 	u32 flags;
1989 	bdaddr_t random_addr;
1990 	u8 own_addr_type;
1991 	int err;
1992 	struct adv_info *adv_instance;
1993 	bool secondary_adv;
1994 
1995 	if (instance > 0) {
1996 		adv_instance = hci_find_adv_instance(hdev, instance);
1997 		if (!adv_instance)
1998 			return -EINVAL;
1999 	} else {
2000 		adv_instance = NULL;
2001 	}
2002 
2003 	flags = get_adv_instance_flags(hdev, instance);
2004 
2005 	/* If the "connectable" instance flag was not set, then choose between
2006 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2007 	 */
2008 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2009 		      mgmt_get_connectable(hdev);
2010 
2011 	if (!is_advertising_allowed(hdev, connectable))
2012 		return -EPERM;
2013 
2014 	/* Set require_privacy to true only when non-connectable
2015 	 * advertising is used. In that case it is fine to use a
2016 	 * non-resolvable private address.
2017 	 */
2018 	err = hci_get_random_address(hdev, !connectable,
2019 				     adv_use_rpa(hdev, flags), adv_instance,
2020 				     &own_addr_type, &random_addr);
2021 	if (err < 0)
2022 		return err;
2023 
2024 	memset(&cp, 0, sizeof(cp));
2025 
2026 	/* In ext adv set param interval is 3 octets */
2027 	hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2028 	hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2029 
2030 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2031 
2032 	if (connectable) {
2033 		if (secondary_adv)
2034 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2035 		else
2036 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2037 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2038 		if (secondary_adv)
2039 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2040 		else
2041 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2042 	} else {
2043 		if (secondary_adv)
2044 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2045 		else
2046 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2047 	}
2048 
2049 	cp.own_addr_type = own_addr_type;
2050 	cp.channel_map = hdev->le_adv_channel_map;
2051 	cp.tx_power = 127;
2052 	cp.handle = instance;
2053 
2054 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2055 		cp.primary_phy = HCI_ADV_PHY_1M;
2056 		cp.secondary_phy = HCI_ADV_PHY_2M;
2057 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2058 		cp.primary_phy = HCI_ADV_PHY_CODED;
2059 		cp.secondary_phy = HCI_ADV_PHY_CODED;
2060 	} else {
2061 		/* In all other cases use 1M */
2062 		cp.primary_phy = HCI_ADV_PHY_1M;
2063 		cp.secondary_phy = HCI_ADV_PHY_1M;
2064 	}
2065 
2066 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2067 
2068 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2069 	    bacmp(&random_addr, BDADDR_ANY)) {
2070 		struct hci_cp_le_set_adv_set_rand_addr cp;
2071 
2072 		/* Check if random address need to be updated */
2073 		if (adv_instance) {
2074 			if (!bacmp(&random_addr, &adv_instance->random_addr))
2075 				return 0;
2076 		} else {
2077 			if (!bacmp(&random_addr, &hdev->random_addr))
2078 				return 0;
2079 		}
2080 
2081 		memset(&cp, 0, sizeof(cp));
2082 
2083 		cp.handle = instance;
2084 		bacpy(&cp.bdaddr, &random_addr);
2085 
2086 		hci_req_add(req,
2087 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2088 			    sizeof(cp), &cp);
2089 	}
2090 
2091 	return 0;
2092 }
2093 
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2094 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2095 {
2096 	struct hci_dev *hdev = req->hdev;
2097 	struct hci_cp_le_set_ext_adv_enable *cp;
2098 	struct hci_cp_ext_adv_set *adv_set;
2099 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2100 	struct adv_info *adv_instance;
2101 
2102 	if (instance > 0) {
2103 		adv_instance = hci_find_adv_instance(hdev, instance);
2104 		if (!adv_instance)
2105 			return -EINVAL;
2106 	} else {
2107 		adv_instance = NULL;
2108 	}
2109 
2110 	cp = (void *) data;
2111 	adv_set = (void *) cp->data;
2112 
2113 	memset(cp, 0, sizeof(*cp));
2114 
2115 	cp->enable = 0x01;
2116 	cp->num_of_sets = 0x01;
2117 
2118 	memset(adv_set, 0, sizeof(*adv_set));
2119 
2120 	adv_set->handle = instance;
2121 
2122 	/* Set duration per instance since controller is responsible for
2123 	 * scheduling it.
2124 	 */
2125 	if (adv_instance && adv_instance->timeout) {
2126 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2127 
2128 		/* Time = N * 10 ms */
2129 		adv_set->duration = cpu_to_le16(duration / 10);
2130 	}
2131 
2132 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2133 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2134 		    data);
2135 
2136 	return 0;
2137 }
2138 
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2139 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2140 {
2141 	struct hci_dev *hdev = req->hdev;
2142 	struct hci_cp_le_set_ext_adv_enable *cp;
2143 	struct hci_cp_ext_adv_set *adv_set;
2144 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2145 	u8 req_size;
2146 
2147 	/* If request specifies an instance that doesn't exist, fail */
2148 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2149 		return -EINVAL;
2150 
2151 	memset(data, 0, sizeof(data));
2152 
2153 	cp = (void *)data;
2154 	adv_set = (void *)cp->data;
2155 
2156 	/* Instance 0x00 indicates all advertising instances will be disabled */
2157 	cp->num_of_sets = !!instance;
2158 	cp->enable = 0x00;
2159 
2160 	adv_set->handle = instance;
2161 
2162 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2163 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2164 
2165 	return 0;
2166 }
2167 
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2168 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2169 {
2170 	struct hci_dev *hdev = req->hdev;
2171 
2172 	/* If request specifies an instance that doesn't exist, fail */
2173 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2174 		return -EINVAL;
2175 
2176 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2177 
2178 	return 0;
2179 }
2180 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2181 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2182 {
2183 	struct hci_dev *hdev = req->hdev;
2184 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2185 	int err;
2186 
2187 	/* If instance isn't pending, the chip knows about it, and it's safe to
2188 	 * disable
2189 	 */
2190 	if (adv_instance && !adv_instance->pending)
2191 		__hci_req_disable_ext_adv_instance(req, instance);
2192 
2193 	err = __hci_req_setup_ext_adv_instance(req, instance);
2194 	if (err < 0)
2195 		return err;
2196 
2197 	__hci_req_update_scan_rsp_data(req, instance);
2198 	__hci_req_enable_ext_advertising(req, instance);
2199 
2200 	return 0;
2201 }
2202 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2203 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2204 				    bool force)
2205 {
2206 	struct hci_dev *hdev = req->hdev;
2207 	struct adv_info *adv_instance = NULL;
2208 	u16 timeout;
2209 
2210 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2211 	    list_empty(&hdev->adv_instances))
2212 		return -EPERM;
2213 
2214 	if (hdev->adv_instance_timeout)
2215 		return -EBUSY;
2216 
2217 	adv_instance = hci_find_adv_instance(hdev, instance);
2218 	if (!adv_instance)
2219 		return -ENOENT;
2220 
2221 	/* A zero timeout means unlimited advertising. As long as there is
2222 	 * only one instance, duration should be ignored. We still set a timeout
2223 	 * in case further instances are being added later on.
2224 	 *
2225 	 * If the remaining lifetime of the instance is more than the duration
2226 	 * then the timeout corresponds to the duration, otherwise it will be
2227 	 * reduced to the remaining instance lifetime.
2228 	 */
2229 	if (adv_instance->timeout == 0 ||
2230 	    adv_instance->duration <= adv_instance->remaining_time)
2231 		timeout = adv_instance->duration;
2232 	else
2233 		timeout = adv_instance->remaining_time;
2234 
2235 	/* The remaining time is being reduced unless the instance is being
2236 	 * advertised without time limit.
2237 	 */
2238 	if (adv_instance->timeout)
2239 		adv_instance->remaining_time =
2240 				adv_instance->remaining_time - timeout;
2241 
2242 	/* Only use work for scheduling instances with legacy advertising */
2243 	if (!ext_adv_capable(hdev)) {
2244 		hdev->adv_instance_timeout = timeout;
2245 		queue_delayed_work(hdev->req_workqueue,
2246 			   &hdev->adv_instance_expire,
2247 			   msecs_to_jiffies(timeout * 1000));
2248 	}
2249 
2250 	/* If we're just re-scheduling the same instance again then do not
2251 	 * execute any HCI commands. This happens when a single instance is
2252 	 * being advertised.
2253 	 */
2254 	if (!force && hdev->cur_adv_instance == instance &&
2255 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2256 		return 0;
2257 
2258 	hdev->cur_adv_instance = instance;
2259 	if (ext_adv_capable(hdev)) {
2260 		__hci_req_start_ext_adv(req, instance);
2261 	} else {
2262 		__hci_req_update_adv_data(req, instance);
2263 		__hci_req_update_scan_rsp_data(req, instance);
2264 		__hci_req_enable_advertising(req);
2265 	}
2266 
2267 	return 0;
2268 }
2269 
2270 /* For a single instance:
2271  * - force == true: The instance will be removed even when its remaining
2272  *   lifetime is not zero.
2273  * - force == false: the instance will be deactivated but kept stored unless
2274  *   the remaining lifetime is zero.
2275  *
2276  * For instance == 0x00:
2277  * - force == true: All instances will be removed regardless of their timeout
2278  *   setting.
2279  * - force == false: Only instances that have a timeout will be removed.
2280  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2281 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2282 				struct hci_request *req, u8 instance,
2283 				bool force)
2284 {
2285 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2286 	int err;
2287 	u8 rem_inst;
2288 
2289 	/* Cancel any timeout concerning the removed instance(s). */
2290 	if (!instance || hdev->cur_adv_instance == instance)
2291 		cancel_adv_timeout(hdev);
2292 
2293 	/* Get the next instance to advertise BEFORE we remove
2294 	 * the current one. This can be the same instance again
2295 	 * if there is only one instance.
2296 	 */
2297 	if (instance && hdev->cur_adv_instance == instance)
2298 		next_instance = hci_get_next_instance(hdev, instance);
2299 
2300 	if (instance == 0x00) {
2301 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2302 					 list) {
2303 			if (!(force || adv_instance->timeout))
2304 				continue;
2305 
2306 			rem_inst = adv_instance->instance;
2307 			err = hci_remove_adv_instance(hdev, rem_inst);
2308 			if (!err)
2309 				mgmt_advertising_removed(sk, hdev, rem_inst);
2310 		}
2311 	} else {
2312 		adv_instance = hci_find_adv_instance(hdev, instance);
2313 
2314 		if (force || (adv_instance && adv_instance->timeout &&
2315 			      !adv_instance->remaining_time)) {
2316 			/* Don't advertise a removed instance. */
2317 			if (next_instance &&
2318 			    next_instance->instance == instance)
2319 				next_instance = NULL;
2320 
2321 			err = hci_remove_adv_instance(hdev, instance);
2322 			if (!err)
2323 				mgmt_advertising_removed(sk, hdev, instance);
2324 		}
2325 	}
2326 
2327 	if (!req || !hdev_is_powered(hdev) ||
2328 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2329 		return;
2330 
2331 	if (next_instance && !ext_adv_capable(hdev))
2332 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2333 						false);
2334 }
2335 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2336 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2337 {
2338 	struct hci_dev *hdev = req->hdev;
2339 
2340 	/* If we're advertising or initiating an LE connection we can't
2341 	 * go ahead and change the random address at this time. This is
2342 	 * because the eventual initiator address used for the
2343 	 * subsequently created connection will be undefined (some
2344 	 * controllers use the new address and others the one we had
2345 	 * when the operation started).
2346 	 *
2347 	 * In this kind of scenario skip the update and let the random
2348 	 * address be updated at the next cycle.
2349 	 */
2350 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2351 	    hci_lookup_le_connect(hdev)) {
2352 		BT_DBG("Deferring random address update");
2353 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2354 		return;
2355 	}
2356 
2357 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2358 }
2359 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2360 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2361 			      bool use_rpa, u8 *own_addr_type)
2362 {
2363 	struct hci_dev *hdev = req->hdev;
2364 	int err;
2365 
2366 	/* If privacy is enabled use a resolvable private address. If
2367 	 * current RPA has expired or there is something else than
2368 	 * the current RPA in use, then generate a new one.
2369 	 */
2370 	if (use_rpa) {
2371 		int to;
2372 
2373 		/* If Controller supports LL Privacy use own address type is
2374 		 * 0x03
2375 		 */
2376 		if (use_ll_privacy(hdev))
2377 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2378 		else
2379 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2380 
2381 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2382 		    !bacmp(&hdev->random_addr, &hdev->rpa))
2383 			return 0;
2384 
2385 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2386 		if (err < 0) {
2387 			bt_dev_err(hdev, "failed to generate new RPA");
2388 			return err;
2389 		}
2390 
2391 		set_random_addr(req, &hdev->rpa);
2392 
2393 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2394 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2395 
2396 		return 0;
2397 	}
2398 
2399 	/* In case of required privacy without resolvable private address,
2400 	 * use an non-resolvable private address. This is useful for active
2401 	 * scanning and non-connectable advertising.
2402 	 */
2403 	if (require_privacy) {
2404 		bdaddr_t nrpa;
2405 
2406 		while (true) {
2407 			/* The non-resolvable private address is generated
2408 			 * from random six bytes with the two most significant
2409 			 * bits cleared.
2410 			 */
2411 			get_random_bytes(&nrpa, 6);
2412 			nrpa.b[5] &= 0x3f;
2413 
2414 			/* The non-resolvable private address shall not be
2415 			 * equal to the public address.
2416 			 */
2417 			if (bacmp(&hdev->bdaddr, &nrpa))
2418 				break;
2419 		}
2420 
2421 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2422 		set_random_addr(req, &nrpa);
2423 		return 0;
2424 	}
2425 
2426 	/* If forcing static address is in use or there is no public
2427 	 * address use the static address as random address (but skip
2428 	 * the HCI command if the current random address is already the
2429 	 * static one.
2430 	 *
2431 	 * In case BR/EDR has been disabled on a dual-mode controller
2432 	 * and a static address has been configured, then use that
2433 	 * address instead of the public BR/EDR address.
2434 	 */
2435 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2436 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2437 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2438 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2439 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2440 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2441 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2442 				    &hdev->static_addr);
2443 		return 0;
2444 	}
2445 
2446 	/* Neither privacy nor static address is being used so use a
2447 	 * public address.
2448 	 */
2449 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2450 
2451 	return 0;
2452 }
2453 
disconnected_whitelist_entries(struct hci_dev * hdev)2454 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2455 {
2456 	struct bdaddr_list *b;
2457 
2458 	list_for_each_entry(b, &hdev->whitelist, list) {
2459 		struct hci_conn *conn;
2460 
2461 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2462 		if (!conn)
2463 			return true;
2464 
2465 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2466 			return true;
2467 	}
2468 
2469 	return false;
2470 }
2471 
__hci_req_update_scan(struct hci_request * req)2472 void __hci_req_update_scan(struct hci_request *req)
2473 {
2474 	struct hci_dev *hdev = req->hdev;
2475 	u8 scan;
2476 
2477 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2478 		return;
2479 
2480 	if (!hdev_is_powered(hdev))
2481 		return;
2482 
2483 	if (mgmt_powering_down(hdev))
2484 		return;
2485 
2486 	if (hdev->scanning_paused)
2487 		return;
2488 
2489 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2490 	    disconnected_whitelist_entries(hdev))
2491 		scan = SCAN_PAGE;
2492 	else
2493 		scan = SCAN_DISABLED;
2494 
2495 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2496 		scan |= SCAN_INQUIRY;
2497 
2498 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2499 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2500 		return;
2501 
2502 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2503 }
2504 
update_scan(struct hci_request * req,unsigned long opt)2505 static int update_scan(struct hci_request *req, unsigned long opt)
2506 {
2507 	hci_dev_lock(req->hdev);
2508 	__hci_req_update_scan(req);
2509 	hci_dev_unlock(req->hdev);
2510 	return 0;
2511 }
2512 
scan_update_work(struct work_struct * work)2513 static void scan_update_work(struct work_struct *work)
2514 {
2515 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2516 
2517 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2518 }
2519 
connectable_update(struct hci_request * req,unsigned long opt)2520 static int connectable_update(struct hci_request *req, unsigned long opt)
2521 {
2522 	struct hci_dev *hdev = req->hdev;
2523 
2524 	hci_dev_lock(hdev);
2525 
2526 	__hci_req_update_scan(req);
2527 
2528 	/* If BR/EDR is not enabled and we disable advertising as a
2529 	 * by-product of disabling connectable, we need to update the
2530 	 * advertising flags.
2531 	 */
2532 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2533 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2534 
2535 	/* Update the advertising parameters if necessary */
2536 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2537 	    !list_empty(&hdev->adv_instances)) {
2538 		if (ext_adv_capable(hdev))
2539 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2540 		else
2541 			__hci_req_enable_advertising(req);
2542 	}
2543 
2544 	__hci_update_background_scan(req);
2545 
2546 	hci_dev_unlock(hdev);
2547 
2548 	return 0;
2549 }
2550 
connectable_update_work(struct work_struct * work)2551 static void connectable_update_work(struct work_struct *work)
2552 {
2553 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2554 					    connectable_update);
2555 	u8 status;
2556 
2557 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2558 	mgmt_set_connectable_complete(hdev, status);
2559 }
2560 
get_service_classes(struct hci_dev * hdev)2561 static u8 get_service_classes(struct hci_dev *hdev)
2562 {
2563 	struct bt_uuid *uuid;
2564 	u8 val = 0;
2565 
2566 	list_for_each_entry(uuid, &hdev->uuids, list)
2567 		val |= uuid->svc_hint;
2568 
2569 	return val;
2570 }
2571 
__hci_req_update_class(struct hci_request * req)2572 void __hci_req_update_class(struct hci_request *req)
2573 {
2574 	struct hci_dev *hdev = req->hdev;
2575 	u8 cod[3];
2576 
2577 	BT_DBG("%s", hdev->name);
2578 
2579 	if (!hdev_is_powered(hdev))
2580 		return;
2581 
2582 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2583 		return;
2584 
2585 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2586 		return;
2587 
2588 	cod[0] = hdev->minor_class;
2589 	cod[1] = hdev->major_class;
2590 	cod[2] = get_service_classes(hdev);
2591 
2592 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2593 		cod[1] |= 0x20;
2594 
2595 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2596 		return;
2597 
2598 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2599 }
2600 
write_iac(struct hci_request * req)2601 static void write_iac(struct hci_request *req)
2602 {
2603 	struct hci_dev *hdev = req->hdev;
2604 	struct hci_cp_write_current_iac_lap cp;
2605 
2606 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2607 		return;
2608 
2609 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2610 		/* Limited discoverable mode */
2611 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2612 		cp.iac_lap[0] = 0x00;	/* LIAC */
2613 		cp.iac_lap[1] = 0x8b;
2614 		cp.iac_lap[2] = 0x9e;
2615 		cp.iac_lap[3] = 0x33;	/* GIAC */
2616 		cp.iac_lap[4] = 0x8b;
2617 		cp.iac_lap[5] = 0x9e;
2618 	} else {
2619 		/* General discoverable mode */
2620 		cp.num_iac = 1;
2621 		cp.iac_lap[0] = 0x33;	/* GIAC */
2622 		cp.iac_lap[1] = 0x8b;
2623 		cp.iac_lap[2] = 0x9e;
2624 	}
2625 
2626 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2627 		    (cp.num_iac * 3) + 1, &cp);
2628 }
2629 
discoverable_update(struct hci_request * req,unsigned long opt)2630 static int discoverable_update(struct hci_request *req, unsigned long opt)
2631 {
2632 	struct hci_dev *hdev = req->hdev;
2633 
2634 	hci_dev_lock(hdev);
2635 
2636 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2637 		write_iac(req);
2638 		__hci_req_update_scan(req);
2639 		__hci_req_update_class(req);
2640 	}
2641 
2642 	/* Advertising instances don't use the global discoverable setting, so
2643 	 * only update AD if advertising was enabled using Set Advertising.
2644 	 */
2645 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2646 		__hci_req_update_adv_data(req, 0x00);
2647 
2648 		/* Discoverable mode affects the local advertising
2649 		 * address in limited privacy mode.
2650 		 */
2651 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2652 			if (ext_adv_capable(hdev))
2653 				__hci_req_start_ext_adv(req, 0x00);
2654 			else
2655 				__hci_req_enable_advertising(req);
2656 		}
2657 	}
2658 
2659 	hci_dev_unlock(hdev);
2660 
2661 	return 0;
2662 }
2663 
discoverable_update_work(struct work_struct * work)2664 static void discoverable_update_work(struct work_struct *work)
2665 {
2666 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2667 					    discoverable_update);
2668 	u8 status;
2669 
2670 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2671 	mgmt_set_discoverable_complete(hdev, status);
2672 }
2673 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2674 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2675 		      u8 reason)
2676 {
2677 	switch (conn->state) {
2678 	case BT_CONNECTED:
2679 	case BT_CONFIG:
2680 		if (conn->type == AMP_LINK) {
2681 			struct hci_cp_disconn_phy_link cp;
2682 
2683 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2684 			cp.reason = reason;
2685 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2686 				    &cp);
2687 		} else {
2688 			struct hci_cp_disconnect dc;
2689 
2690 			dc.handle = cpu_to_le16(conn->handle);
2691 			dc.reason = reason;
2692 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2693 		}
2694 
2695 		conn->state = BT_DISCONN;
2696 
2697 		break;
2698 	case BT_CONNECT:
2699 		if (conn->type == LE_LINK) {
2700 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2701 				break;
2702 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2703 				    0, NULL);
2704 		} else if (conn->type == ACL_LINK) {
2705 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2706 				break;
2707 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2708 				    6, &conn->dst);
2709 		}
2710 		break;
2711 	case BT_CONNECT2:
2712 		if (conn->type == ACL_LINK) {
2713 			struct hci_cp_reject_conn_req rej;
2714 
2715 			bacpy(&rej.bdaddr, &conn->dst);
2716 			rej.reason = reason;
2717 
2718 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2719 				    sizeof(rej), &rej);
2720 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2721 			struct hci_cp_reject_sync_conn_req rej;
2722 
2723 			bacpy(&rej.bdaddr, &conn->dst);
2724 
2725 			/* SCO rejection has its own limited set of
2726 			 * allowed error values (0x0D-0x0F) which isn't
2727 			 * compatible with most values passed to this
2728 			 * function. To be safe hard-code one of the
2729 			 * values that's suitable for SCO.
2730 			 */
2731 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2732 
2733 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2734 				    sizeof(rej), &rej);
2735 		}
2736 		break;
2737 	default:
2738 		conn->state = BT_CLOSED;
2739 		break;
2740 	}
2741 }
2742 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2743 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2744 {
2745 	if (status)
2746 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2747 }
2748 
hci_abort_conn(struct hci_conn * conn,u8 reason)2749 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2750 {
2751 	struct hci_request req;
2752 	int err;
2753 
2754 	hci_req_init(&req, conn->hdev);
2755 
2756 	__hci_abort_conn(&req, conn, reason);
2757 
2758 	err = hci_req_run(&req, abort_conn_complete);
2759 	if (err && err != -ENODATA) {
2760 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2761 		return err;
2762 	}
2763 
2764 	return 0;
2765 }
2766 
update_bg_scan(struct hci_request * req,unsigned long opt)2767 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2768 {
2769 	hci_dev_lock(req->hdev);
2770 	__hci_update_background_scan(req);
2771 	hci_dev_unlock(req->hdev);
2772 	return 0;
2773 }
2774 
bg_scan_update(struct work_struct * work)2775 static void bg_scan_update(struct work_struct *work)
2776 {
2777 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2778 					    bg_scan_update);
2779 	struct hci_conn *conn;
2780 	u8 status;
2781 	int err;
2782 
2783 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2784 	if (!err)
2785 		return;
2786 
2787 	hci_dev_lock(hdev);
2788 
2789 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2790 	if (conn)
2791 		hci_le_conn_failed(conn, status);
2792 
2793 	hci_dev_unlock(hdev);
2794 }
2795 
le_scan_disable(struct hci_request * req,unsigned long opt)2796 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2797 {
2798 	hci_req_add_le_scan_disable(req, false);
2799 	return 0;
2800 }
2801 
bredr_inquiry(struct hci_request * req,unsigned long opt)2802 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2803 {
2804 	u8 length = opt;
2805 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2806 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2807 	struct hci_cp_inquiry cp;
2808 
2809 	BT_DBG("%s", req->hdev->name);
2810 
2811 	hci_dev_lock(req->hdev);
2812 	hci_inquiry_cache_flush(req->hdev);
2813 	hci_dev_unlock(req->hdev);
2814 
2815 	memset(&cp, 0, sizeof(cp));
2816 
2817 	if (req->hdev->discovery.limited)
2818 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2819 	else
2820 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2821 
2822 	cp.length = length;
2823 
2824 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2825 
2826 	return 0;
2827 }
2828 
le_scan_disable_work(struct work_struct * work)2829 static void le_scan_disable_work(struct work_struct *work)
2830 {
2831 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2832 					    le_scan_disable.work);
2833 	u8 status;
2834 
2835 	BT_DBG("%s", hdev->name);
2836 
2837 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2838 		return;
2839 
2840 	cancel_delayed_work(&hdev->le_scan_restart);
2841 
2842 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2843 	if (status) {
2844 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2845 			   status);
2846 		return;
2847 	}
2848 
2849 	hdev->discovery.scan_start = 0;
2850 
2851 	/* If we were running LE only scan, change discovery state. If
2852 	 * we were running both LE and BR/EDR inquiry simultaneously,
2853 	 * and BR/EDR inquiry is already finished, stop discovery,
2854 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2855 	 * If we will resolve remote device name, do not change
2856 	 * discovery state.
2857 	 */
2858 
2859 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2860 		goto discov_stopped;
2861 
2862 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2863 		return;
2864 
2865 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2866 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2867 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2868 			goto discov_stopped;
2869 
2870 		return;
2871 	}
2872 
2873 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2874 		     HCI_CMD_TIMEOUT, &status);
2875 	if (status) {
2876 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2877 		goto discov_stopped;
2878 	}
2879 
2880 	return;
2881 
2882 discov_stopped:
2883 	hci_dev_lock(hdev);
2884 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2885 	hci_dev_unlock(hdev);
2886 }
2887 
le_scan_restart(struct hci_request * req,unsigned long opt)2888 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2889 {
2890 	struct hci_dev *hdev = req->hdev;
2891 
2892 	/* If controller is not scanning we are done. */
2893 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2894 		return 0;
2895 
2896 	if (hdev->scanning_paused) {
2897 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2898 		return 0;
2899 	}
2900 
2901 	hci_req_add_le_scan_disable(req, false);
2902 
2903 	if (use_ext_scan(hdev)) {
2904 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2905 
2906 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2907 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2908 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2909 
2910 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2911 			    sizeof(ext_enable_cp), &ext_enable_cp);
2912 	} else {
2913 		struct hci_cp_le_set_scan_enable cp;
2914 
2915 		memset(&cp, 0, sizeof(cp));
2916 		cp.enable = LE_SCAN_ENABLE;
2917 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2918 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2919 	}
2920 
2921 	return 0;
2922 }
2923 
le_scan_restart_work(struct work_struct * work)2924 static void le_scan_restart_work(struct work_struct *work)
2925 {
2926 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2927 					    le_scan_restart.work);
2928 	unsigned long timeout, duration, scan_start, now;
2929 	u8 status;
2930 
2931 	BT_DBG("%s", hdev->name);
2932 
2933 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2934 	if (status) {
2935 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2936 			   status);
2937 		return;
2938 	}
2939 
2940 	hci_dev_lock(hdev);
2941 
2942 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2943 	    !hdev->discovery.scan_start)
2944 		goto unlock;
2945 
2946 	/* When the scan was started, hdev->le_scan_disable has been queued
2947 	 * after duration from scan_start. During scan restart this job
2948 	 * has been canceled, and we need to queue it again after proper
2949 	 * timeout, to make sure that scan does not run indefinitely.
2950 	 */
2951 	duration = hdev->discovery.scan_duration;
2952 	scan_start = hdev->discovery.scan_start;
2953 	now = jiffies;
2954 	if (now - scan_start <= duration) {
2955 		int elapsed;
2956 
2957 		if (now >= scan_start)
2958 			elapsed = now - scan_start;
2959 		else
2960 			elapsed = ULONG_MAX - scan_start + now;
2961 
2962 		timeout = duration - elapsed;
2963 	} else {
2964 		timeout = 0;
2965 	}
2966 
2967 	queue_delayed_work(hdev->req_workqueue,
2968 			   &hdev->le_scan_disable, timeout);
2969 
2970 unlock:
2971 	hci_dev_unlock(hdev);
2972 }
2973 
active_scan(struct hci_request * req,unsigned long opt)2974 static int active_scan(struct hci_request *req, unsigned long opt)
2975 {
2976 	uint16_t interval = opt;
2977 	struct hci_dev *hdev = req->hdev;
2978 	u8 own_addr_type;
2979 	/* White list is not used for discovery */
2980 	u8 filter_policy = 0x00;
2981 	/* Discovery doesn't require controller address resolution */
2982 	bool addr_resolv = false;
2983 	int err;
2984 
2985 	BT_DBG("%s", hdev->name);
2986 
2987 	/* If controller is scanning, it means the background scanning is
2988 	 * running. Thus, we should temporarily stop it in order to set the
2989 	 * discovery scanning parameters.
2990 	 */
2991 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2992 		hci_req_add_le_scan_disable(req, false);
2993 
2994 	/* All active scans will be done with either a resolvable private
2995 	 * address (when privacy feature has been enabled) or non-resolvable
2996 	 * private address.
2997 	 */
2998 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2999 					&own_addr_type);
3000 	if (err < 0)
3001 		own_addr_type = ADDR_LE_DEV_PUBLIC;
3002 
3003 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3004 			   hdev->le_scan_window_discovery, own_addr_type,
3005 			   filter_policy, addr_resolv);
3006 	return 0;
3007 }
3008 
interleaved_discov(struct hci_request * req,unsigned long opt)3009 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3010 {
3011 	int err;
3012 
3013 	BT_DBG("%s", req->hdev->name);
3014 
3015 	err = active_scan(req, opt);
3016 	if (err)
3017 		return err;
3018 
3019 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3020 }
3021 
start_discovery(struct hci_dev * hdev,u8 * status)3022 static void start_discovery(struct hci_dev *hdev, u8 *status)
3023 {
3024 	unsigned long timeout;
3025 
3026 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3027 
3028 	switch (hdev->discovery.type) {
3029 	case DISCOV_TYPE_BREDR:
3030 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3031 			hci_req_sync(hdev, bredr_inquiry,
3032 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3033 				     status);
3034 		return;
3035 	case DISCOV_TYPE_INTERLEAVED:
3036 		/* When running simultaneous discovery, the LE scanning time
3037 		 * should occupy the whole discovery time sine BR/EDR inquiry
3038 		 * and LE scanning are scheduled by the controller.
3039 		 *
3040 		 * For interleaving discovery in comparison, BR/EDR inquiry
3041 		 * and LE scanning are done sequentially with separate
3042 		 * timeouts.
3043 		 */
3044 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3045 			     &hdev->quirks)) {
3046 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3047 			/* During simultaneous discovery, we double LE scan
3048 			 * interval. We must leave some time for the controller
3049 			 * to do BR/EDR inquiry.
3050 			 */
3051 			hci_req_sync(hdev, interleaved_discov,
3052 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3053 				     status);
3054 			break;
3055 		}
3056 
3057 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3058 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3059 			     HCI_CMD_TIMEOUT, status);
3060 		break;
3061 	case DISCOV_TYPE_LE:
3062 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3063 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3064 			     HCI_CMD_TIMEOUT, status);
3065 		break;
3066 	default:
3067 		*status = HCI_ERROR_UNSPECIFIED;
3068 		return;
3069 	}
3070 
3071 	if (*status)
3072 		return;
3073 
3074 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3075 
3076 	/* When service discovery is used and the controller has a
3077 	 * strict duplicate filter, it is important to remember the
3078 	 * start and duration of the scan. This is required for
3079 	 * restarting scanning during the discovery phase.
3080 	 */
3081 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3082 		     hdev->discovery.result_filtering) {
3083 		hdev->discovery.scan_start = jiffies;
3084 		hdev->discovery.scan_duration = timeout;
3085 	}
3086 
3087 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3088 			   timeout);
3089 }
3090 
hci_req_stop_discovery(struct hci_request * req)3091 bool hci_req_stop_discovery(struct hci_request *req)
3092 {
3093 	struct hci_dev *hdev = req->hdev;
3094 	struct discovery_state *d = &hdev->discovery;
3095 	struct hci_cp_remote_name_req_cancel cp;
3096 	struct inquiry_entry *e;
3097 	bool ret = false;
3098 
3099 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3100 
3101 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3102 		if (test_bit(HCI_INQUIRY, &hdev->flags))
3103 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3104 
3105 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3106 			cancel_delayed_work(&hdev->le_scan_disable);
3107 			hci_req_add_le_scan_disable(req, false);
3108 		}
3109 
3110 		ret = true;
3111 	} else {
3112 		/* Passive scanning */
3113 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3114 			hci_req_add_le_scan_disable(req, false);
3115 			ret = true;
3116 		}
3117 	}
3118 
3119 	/* No further actions needed for LE-only discovery */
3120 	if (d->type == DISCOV_TYPE_LE)
3121 		return ret;
3122 
3123 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3124 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3125 						     NAME_PENDING);
3126 		if (!e)
3127 			return ret;
3128 
3129 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3130 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3131 			    &cp);
3132 		ret = true;
3133 	}
3134 
3135 	return ret;
3136 }
3137 
stop_discovery(struct hci_request * req,unsigned long opt)3138 static int stop_discovery(struct hci_request *req, unsigned long opt)
3139 {
3140 	hci_dev_lock(req->hdev);
3141 	hci_req_stop_discovery(req);
3142 	hci_dev_unlock(req->hdev);
3143 
3144 	return 0;
3145 }
3146 
discov_update(struct work_struct * work)3147 static void discov_update(struct work_struct *work)
3148 {
3149 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3150 					    discov_update);
3151 	u8 status = 0;
3152 
3153 	switch (hdev->discovery.state) {
3154 	case DISCOVERY_STARTING:
3155 		start_discovery(hdev, &status);
3156 		mgmt_start_discovery_complete(hdev, status);
3157 		if (status)
3158 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3159 		else
3160 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3161 		break;
3162 	case DISCOVERY_STOPPING:
3163 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3164 		mgmt_stop_discovery_complete(hdev, status);
3165 		if (!status)
3166 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3167 		break;
3168 	case DISCOVERY_STOPPED:
3169 	default:
3170 		return;
3171 	}
3172 }
3173 
discov_off(struct work_struct * work)3174 static void discov_off(struct work_struct *work)
3175 {
3176 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3177 					    discov_off.work);
3178 
3179 	BT_DBG("%s", hdev->name);
3180 
3181 	hci_dev_lock(hdev);
3182 
3183 	/* When discoverable timeout triggers, then just make sure
3184 	 * the limited discoverable flag is cleared. Even in the case
3185 	 * of a timeout triggered from general discoverable, it is
3186 	 * safe to unconditionally clear the flag.
3187 	 */
3188 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3189 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3190 	hdev->discov_timeout = 0;
3191 
3192 	hci_dev_unlock(hdev);
3193 
3194 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3195 	mgmt_new_settings(hdev);
3196 }
3197 
powered_update_hci(struct hci_request * req,unsigned long opt)3198 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3199 {
3200 	struct hci_dev *hdev = req->hdev;
3201 	u8 link_sec;
3202 
3203 	hci_dev_lock(hdev);
3204 
3205 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3206 	    !lmp_host_ssp_capable(hdev)) {
3207 		u8 mode = 0x01;
3208 
3209 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3210 
3211 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3212 			u8 support = 0x01;
3213 
3214 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3215 				    sizeof(support), &support);
3216 		}
3217 	}
3218 
3219 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3220 	    lmp_bredr_capable(hdev)) {
3221 		struct hci_cp_write_le_host_supported cp;
3222 
3223 		cp.le = 0x01;
3224 		cp.simul = 0x00;
3225 
3226 		/* Check first if we already have the right
3227 		 * host state (host features set)
3228 		 */
3229 		if (cp.le != lmp_host_le_capable(hdev) ||
3230 		    cp.simul != lmp_host_le_br_capable(hdev))
3231 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3232 				    sizeof(cp), &cp);
3233 	}
3234 
3235 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3236 		/* Make sure the controller has a good default for
3237 		 * advertising data. This also applies to the case
3238 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3239 		 */
3240 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3241 		    list_empty(&hdev->adv_instances)) {
3242 			int err;
3243 
3244 			if (ext_adv_capable(hdev)) {
3245 				err = __hci_req_setup_ext_adv_instance(req,
3246 								       0x00);
3247 				if (!err)
3248 					__hci_req_update_scan_rsp_data(req,
3249 								       0x00);
3250 			} else {
3251 				err = 0;
3252 				__hci_req_update_adv_data(req, 0x00);
3253 				__hci_req_update_scan_rsp_data(req, 0x00);
3254 			}
3255 
3256 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3257 				if (!ext_adv_capable(hdev))
3258 					__hci_req_enable_advertising(req);
3259 				else if (!err)
3260 					__hci_req_enable_ext_advertising(req,
3261 									 0x00);
3262 			}
3263 		} else if (!list_empty(&hdev->adv_instances)) {
3264 			struct adv_info *adv_instance;
3265 
3266 			adv_instance = list_first_entry(&hdev->adv_instances,
3267 							struct adv_info, list);
3268 			__hci_req_schedule_adv_instance(req,
3269 							adv_instance->instance,
3270 							true);
3271 		}
3272 	}
3273 
3274 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3275 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3276 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3277 			    sizeof(link_sec), &link_sec);
3278 
3279 	if (lmp_bredr_capable(hdev)) {
3280 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3281 			__hci_req_write_fast_connectable(req, true);
3282 		else
3283 			__hci_req_write_fast_connectable(req, false);
3284 		__hci_req_update_scan(req);
3285 		__hci_req_update_class(req);
3286 		__hci_req_update_name(req);
3287 		__hci_req_update_eir(req);
3288 	}
3289 
3290 	hci_dev_unlock(hdev);
3291 	return 0;
3292 }
3293 
__hci_req_hci_power_on(struct hci_dev * hdev)3294 int __hci_req_hci_power_on(struct hci_dev *hdev)
3295 {
3296 	/* Register the available SMP channels (BR/EDR and LE) only when
3297 	 * successfully powering on the controller. This late
3298 	 * registration is required so that LE SMP can clearly decide if
3299 	 * the public address or static address is used.
3300 	 */
3301 	smp_register(hdev);
3302 
3303 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3304 			      NULL);
3305 }
3306 
hci_request_setup(struct hci_dev * hdev)3307 void hci_request_setup(struct hci_dev *hdev)
3308 {
3309 	INIT_WORK(&hdev->discov_update, discov_update);
3310 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3311 	INIT_WORK(&hdev->scan_update, scan_update_work);
3312 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3313 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3314 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3315 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3316 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3317 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3318 }
3319 
hci_request_cancel_all(struct hci_dev * hdev)3320 void hci_request_cancel_all(struct hci_dev *hdev)
3321 {
3322 	hci_req_sync_cancel(hdev, ENODEV);
3323 
3324 	cancel_work_sync(&hdev->discov_update);
3325 	cancel_work_sync(&hdev->bg_scan_update);
3326 	cancel_work_sync(&hdev->scan_update);
3327 	cancel_work_sync(&hdev->connectable_update);
3328 	cancel_work_sync(&hdev->discoverable_update);
3329 	cancel_delayed_work_sync(&hdev->discov_off);
3330 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3331 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3332 
3333 	if (hdev->adv_instance_timeout) {
3334 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3335 		hdev->adv_instance_timeout = 0;
3336 	}
3337 }
3338