1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb) {
111 kfree_skb(hdev->req_skb);
112 hdev->req_skb = skb_get(skb);
113 }
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116 }
117
hci_req_sync_cancel(struct hci_dev * hdev,int err)118 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 {
120 BT_DBG("%s err 0x%2.2x", hdev->name, err);
121
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
126 }
127 }
128
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)129 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 const void *param, u8 event, u32 timeout)
131 {
132 struct hci_request req;
133 struct sk_buff *skb;
134 int err = 0;
135
136 BT_DBG("%s", hdev->name);
137
138 hci_req_init(&req, hdev);
139
140 hci_req_add_ev(&req, opcode, plen, param, event);
141
142 hdev->req_status = HCI_REQ_PEND;
143
144 err = hci_req_run_skb(&req, hci_req_sync_complete);
145 if (err < 0)
146 return ERR_PTR(err);
147
148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
150
151 if (err == -ERESTARTSYS)
152 return ERR_PTR(-EINTR);
153
154 switch (hdev->req_status) {
155 case HCI_REQ_DONE:
156 err = -bt_to_errno(hdev->req_result);
157 break;
158
159 case HCI_REQ_CANCELED:
160 err = -hdev->req_result;
161 break;
162
163 default:
164 err = -ETIMEDOUT;
165 break;
166 }
167
168 hdev->req_status = hdev->req_result = 0;
169 skb = hdev->req_skb;
170 hdev->req_skb = NULL;
171
172 BT_DBG("%s end: err %d", hdev->name, err);
173
174 if (err < 0) {
175 kfree_skb(skb);
176 return ERR_PTR(err);
177 }
178
179 if (!skb)
180 return ERR_PTR(-ENODATA);
181
182 return skb;
183 }
184 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)186 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 const void *param, u32 timeout)
188 {
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 }
191 EXPORT_SYMBOL(__hci_cmd_sync);
192
193 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)194 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt),
196 unsigned long opt, u32 timeout, u8 *hci_status)
197 {
198 struct hci_request req;
199 int err = 0;
200
201 BT_DBG("%s start", hdev->name);
202
203 hci_req_init(&req, hdev);
204
205 hdev->req_status = HCI_REQ_PEND;
206
207 err = func(&req, opt);
208 if (err) {
209 if (hci_status)
210 *hci_status = HCI_ERROR_UNSPECIFIED;
211 return err;
212 }
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
226 return 0;
227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
231
232 return err;
233 }
234
235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
237
238 if (err == -ERESTARTSYS)
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 if (hci_status)
245 *hci_status = hdev->req_result;
246 break;
247
248 case HCI_REQ_CANCELED:
249 err = -hdev->req_result;
250 if (hci_status)
251 *hci_status = HCI_ERROR_UNSPECIFIED;
252 break;
253
254 default:
255 err = -ETIMEDOUT;
256 if (hci_status)
257 *hci_status = HCI_ERROR_UNSPECIFIED;
258 break;
259 }
260
261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
263 hdev->req_status = hdev->req_result = 0;
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
267 return err;
268 }
269
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)270 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
272 unsigned long opt, u32 timeout, u8 *hci_status)
273 {
274 int ret;
275
276 /* Serialize all requests */
277 hci_req_sync_lock(hdev);
278 /* check the state after obtaing the lock to protect the HCI_UP
279 * against any races from hci_dev_do_close when the controller
280 * gets removed.
281 */
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 else
285 ret = -ENETDOWN;
286 hci_req_sync_unlock(hdev);
287
288 return ret;
289 }
290
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293 {
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
307 skb_put_data(skb, param, plen);
308
309 BT_DBG("skb len %d", skb->len);
310
311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
313
314 return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320 {
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 opcode);
336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343 bt_cb(skb)->hci.req_event = event;
344
345 skb_queue_tail(&req->cmd_q, skb);
346 }
347
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350 {
351 hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
372 type = hdev->def_page_scan_type;
373 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
374 }
375
376 acp.window = cpu_to_le16(hdev->def_page_scan_window);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
start_interleave_scan(struct hci_dev * hdev)387 static void start_interleave_scan(struct hci_dev *hdev)
388 {
389 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 queue_delayed_work(hdev->req_workqueue,
391 &hdev->interleave_scan, 0);
392 }
393
is_interleave_scanning(struct hci_dev * hdev)394 static bool is_interleave_scanning(struct hci_dev *hdev)
395 {
396 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
397 }
398
cancel_interleave_scan(struct hci_dev * hdev)399 static void cancel_interleave_scan(struct hci_dev *hdev)
400 {
401 bt_dev_dbg(hdev, "cancelling interleave scan");
402
403 cancel_delayed_work_sync(&hdev->interleave_scan);
404
405 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
406 }
407
408 /* Return true if interleave_scan wasn't started until exiting this function,
409 * otherwise, return false
410 */
__hci_update_interleaved_scan(struct hci_dev * hdev)411 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412 {
413 /* If there is at least one ADV monitors and one pending LE connection
414 * or one device to be scanned for, we should alternate between
415 * allowlist scan and one without any filters to save power.
416 */
417 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
418 !(list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports));
420 bool is_interleaving = is_interleave_scanning(hdev);
421
422 if (use_interleaving && !is_interleaving) {
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
425 return true;
426 }
427
428 if (!use_interleaving && is_interleaving)
429 cancel_interleave_scan(hdev);
430
431 return false;
432 }
433
434 /* This function controls the background scanning based on hdev->pend_le_conns
435 * list. If there are pending LE connection we start the background scanning,
436 * otherwise we stop it.
437 *
438 * This function requires the caller holds hdev->lock.
439 */
__hci_update_background_scan(struct hci_request * req)440 static void __hci_update_background_scan(struct hci_request *req)
441 {
442 struct hci_dev *hdev = req->hdev;
443
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
450 return;
451
452 /* No point in doing scanning if LE support hasn't been enabled */
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454 return;
455
456 /* If discovery is active don't interfere with it */
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
458 return;
459
460 /* Reset RSSI and UUID filters when starting background scanning
461 * since these filters are meant for service discovery only.
462 *
463 * The Start Discovery and Start Service Discovery operations
464 * ensure to set proper values for RSSI threshold and UUID
465 * filter list. So it is safe to just reset them here.
466 */
467 hci_discovery_filter_clear(hdev);
468
469 BT_DBG("%s ADV monitoring is %s", hdev->name,
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
471
472 if (list_empty(&hdev->pend_le_conns) &&
473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
475 /* If there is no pending LE connections or devices
476 * to be scanned for or no ADV monitors, we should stop the
477 * background scanning.
478 */
479
480 /* If controller is not scanning we are done. */
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482 return;
483
484 hci_req_add_le_scan_disable(req, false);
485
486 BT_DBG("%s stopping background scanning", hdev->name);
487 } else {
488 /* If there is at least one pending LE connection, we should
489 * keep the background scan running.
490 */
491
492 /* If controller is connecting, we should not start scanning
493 * since some controllers are not able to scan and connect at
494 * the same time.
495 */
496 if (hci_lookup_le_connect(hdev))
497 return;
498
499 /* If controller is currently scanning, we stop it to ensure we
500 * don't miss any advertising (due to duplicates filter).
501 */
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
503 hci_req_add_le_scan_disable(req, false);
504
505 hci_req_add_le_passive_scan(req);
506 bt_dev_dbg(hdev, "starting background scanning");
507 }
508 }
509
__hci_req_update_name(struct hci_request * req)510 void __hci_req_update_name(struct hci_request *req)
511 {
512 struct hci_dev *hdev = req->hdev;
513 struct hci_cp_write_local_name cp;
514
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
516
517 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518 }
519
520 #define PNP_INFO_SVCLASS_ID 0x1200
521
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
523 {
524 u8 *ptr = data, *uuids_start = NULL;
525 struct bt_uuid *uuid;
526
527 if (len < 4)
528 return ptr;
529
530 list_for_each_entry(uuid, &hdev->uuids, list) {
531 u16 uuid16;
532
533 if (uuid->size != 16)
534 continue;
535
536 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
537 if (uuid16 < 0x1100)
538 continue;
539
540 if (uuid16 == PNP_INFO_SVCLASS_ID)
541 continue;
542
543 if (!uuids_start) {
544 uuids_start = ptr;
545 uuids_start[0] = 1;
546 uuids_start[1] = EIR_UUID16_ALL;
547 ptr += 2;
548 }
549
550 /* Stop if not enough space to put next UUID */
551 if ((ptr - data) + sizeof(u16) > len) {
552 uuids_start[1] = EIR_UUID16_SOME;
553 break;
554 }
555
556 *ptr++ = (uuid16 & 0x00ff);
557 *ptr++ = (uuid16 & 0xff00) >> 8;
558 uuids_start[0] += sizeof(uuid16);
559 }
560
561 return ptr;
562 }
563
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
565 {
566 u8 *ptr = data, *uuids_start = NULL;
567 struct bt_uuid *uuid;
568
569 if (len < 6)
570 return ptr;
571
572 list_for_each_entry(uuid, &hdev->uuids, list) {
573 if (uuid->size != 32)
574 continue;
575
576 if (!uuids_start) {
577 uuids_start = ptr;
578 uuids_start[0] = 1;
579 uuids_start[1] = EIR_UUID32_ALL;
580 ptr += 2;
581 }
582
583 /* Stop if not enough space to put next UUID */
584 if ((ptr - data) + sizeof(u32) > len) {
585 uuids_start[1] = EIR_UUID32_SOME;
586 break;
587 }
588
589 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
590 ptr += sizeof(u32);
591 uuids_start[0] += sizeof(u32);
592 }
593
594 return ptr;
595 }
596
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
598 {
599 u8 *ptr = data, *uuids_start = NULL;
600 struct bt_uuid *uuid;
601
602 if (len < 18)
603 return ptr;
604
605 list_for_each_entry(uuid, &hdev->uuids, list) {
606 if (uuid->size != 128)
607 continue;
608
609 if (!uuids_start) {
610 uuids_start = ptr;
611 uuids_start[0] = 1;
612 uuids_start[1] = EIR_UUID128_ALL;
613 ptr += 2;
614 }
615
616 /* Stop if not enough space to put next UUID */
617 if ((ptr - data) + 16 > len) {
618 uuids_start[1] = EIR_UUID128_SOME;
619 break;
620 }
621
622 memcpy(ptr, uuid->uuid, 16);
623 ptr += 16;
624 uuids_start[0] += 16;
625 }
626
627 return ptr;
628 }
629
create_eir(struct hci_dev * hdev,u8 * data)630 static void create_eir(struct hci_dev *hdev, u8 *data)
631 {
632 u8 *ptr = data;
633 size_t name_len;
634
635 name_len = strlen(hdev->dev_name);
636
637 if (name_len > 0) {
638 /* EIR Data type */
639 if (name_len > 48) {
640 name_len = 48;
641 ptr[1] = EIR_NAME_SHORT;
642 } else
643 ptr[1] = EIR_NAME_COMPLETE;
644
645 /* EIR Data length */
646 ptr[0] = name_len + 1;
647
648 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650 ptr += (name_len + 2);
651 }
652
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
654 ptr[0] = 2;
655 ptr[1] = EIR_TX_POWER;
656 ptr[2] = (u8) hdev->inq_tx_power;
657
658 ptr += 3;
659 }
660
661 if (hdev->devid_source > 0) {
662 ptr[0] = 9;
663 ptr[1] = EIR_DEVICE_ID;
664
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
669
670 ptr += 10;
671 }
672
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676 }
677
__hci_req_update_eir(struct hci_request * req)678 void __hci_req_update_eir(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_write_eir cp;
682
683 if (!hdev_is_powered(hdev))
684 return;
685
686 if (!lmp_ext_inq_capable(hdev))
687 return;
688
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690 return;
691
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693 return;
694
695 memset(&cp, 0, sizeof(cp));
696
697 create_eir(hdev, cp.data);
698
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700 return;
701
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
703
704 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705 }
706
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)707 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
708 {
709 struct hci_dev *hdev = req->hdev;
710
711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
713 return;
714 }
715
716 if (use_ext_scan(hdev)) {
717 struct hci_cp_le_set_ext_scan_enable cp;
718
719 memset(&cp, 0, sizeof(cp));
720 cp.enable = LE_SCAN_DISABLE;
721 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
722 &cp);
723 } else {
724 struct hci_cp_le_set_scan_enable cp;
725
726 memset(&cp, 0, sizeof(cp));
727 cp.enable = LE_SCAN_DISABLE;
728 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
729 }
730
731 /* Disable address resolution */
732 if (use_ll_privacy(hdev) &&
733 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
734 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
735 __u8 enable = 0x00;
736
737 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
738 }
739 }
740
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)741 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
742 u8 bdaddr_type)
743 {
744 struct hci_cp_le_del_from_accept_list cp;
745
746 cp.bdaddr_type = bdaddr_type;
747 bacpy(&cp.bdaddr, bdaddr);
748
749 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
750 cp.bdaddr_type);
751 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
752
753 if (use_ll_privacy(req->hdev) &&
754 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
755 struct smp_irk *irk;
756
757 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
758 if (irk) {
759 struct hci_cp_le_del_from_resolv_list cp;
760
761 cp.bdaddr_type = bdaddr_type;
762 bacpy(&cp.bdaddr, bdaddr);
763
764 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
765 sizeof(cp), &cp);
766 }
767 }
768 }
769
770 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)771 static int add_to_accept_list(struct hci_request *req,
772 struct hci_conn_params *params, u8 *num_entries,
773 bool allow_rpa)
774 {
775 struct hci_cp_le_add_to_accept_list cp;
776 struct hci_dev *hdev = req->hdev;
777
778 /* Already in accept list */
779 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
780 params->addr_type))
781 return 0;
782
783 /* Select filter policy to accept all advertising */
784 if (*num_entries >= hdev->le_accept_list_size)
785 return -1;
786
787 /* Accept list can not be used with RPAs */
788 if (!allow_rpa &&
789 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
790 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
791 return -1;
792 }
793
794 /* During suspend, only wakeable devices can be in accept list */
795 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
796 params->current_flags))
797 return 0;
798
799 *num_entries += 1;
800 cp.bdaddr_type = params->addr_type;
801 bacpy(&cp.bdaddr, ¶ms->addr);
802
803 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
804 cp.bdaddr_type);
805 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
806
807 if (use_ll_privacy(hdev) &&
808 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
809 struct smp_irk *irk;
810
811 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
812 params->addr_type);
813 if (irk) {
814 struct hci_cp_le_add_to_resolv_list cp;
815
816 cp.bdaddr_type = params->addr_type;
817 bacpy(&cp.bdaddr, ¶ms->addr);
818 memcpy(cp.peer_irk, irk->val, 16);
819
820 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
821 memcpy(cp.local_irk, hdev->irk, 16);
822 else
823 memset(cp.local_irk, 0, 16);
824
825 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
826 sizeof(cp), &cp);
827 }
828 }
829
830 return 0;
831 }
832
update_accept_list(struct hci_request * req)833 static u8 update_accept_list(struct hci_request *req)
834 {
835 struct hci_dev *hdev = req->hdev;
836 struct hci_conn_params *params;
837 struct bdaddr_list *b;
838 u8 num_entries = 0;
839 bool pend_conn, pend_report;
840 /* We allow usage of accept list even with RPAs in suspend. In the worst
841 * case, we won't be able to wake from devices that use the privacy1.2
842 * features. Additionally, once we support privacy1.2 and IRK
843 * offloading, we can update this to also check for those conditions.
844 */
845 bool allow_rpa = hdev->suspended;
846
847 if (use_ll_privacy(hdev) &&
848 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
849 allow_rpa = true;
850
851 /* Go through the current accept list programmed into the
852 * controller one by one and check if that address is still
853 * in the list of pending connections or list of devices to
854 * report. If not present in either list, then queue the
855 * command to remove it from the controller.
856 */
857 list_for_each_entry(b, &hdev->le_accept_list, list) {
858 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
859 &b->bdaddr,
860 b->bdaddr_type);
861 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
862 &b->bdaddr,
863 b->bdaddr_type);
864
865 /* If the device is not likely to connect or report,
866 * remove it from the accept list.
867 */
868 if (!pend_conn && !pend_report) {
869 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
870 continue;
871 }
872
873 /* Accept list can not be used with RPAs */
874 if (!allow_rpa &&
875 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
876 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
877 return 0x00;
878 }
879
880 num_entries++;
881 }
882
883 /* Since all no longer valid accept list entries have been
884 * removed, walk through the list of pending connections
885 * and ensure that any new device gets programmed into
886 * the controller.
887 *
888 * If the list of the devices is larger than the list of
889 * available accept list entries in the controller, then
890 * just abort and return filer policy value to not use the
891 * accept list.
892 */
893 list_for_each_entry(params, &hdev->pend_le_conns, action) {
894 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
895 return 0x00;
896 }
897
898 /* After adding all new pending connections, walk through
899 * the list of pending reports and also add these to the
900 * accept list if there is still space. Abort if space runs out.
901 */
902 list_for_each_entry(params, &hdev->pend_le_reports, action) {
903 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
904 return 0x00;
905 }
906
907 /* Use the allowlist unless the following conditions are all true:
908 * - We are not currently suspending
909 * - There are 1 or more ADV monitors registered
910 * - Interleaved scanning is not currently using the allowlist
911 *
912 * Once the controller offloading of advertisement monitor is in place,
913 * the above condition should include the support of MSFT extension
914 * support.
915 */
916 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
917 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
918 return 0x00;
919
920 /* Select filter policy to use accept list */
921 return 0x01;
922 }
923
scan_use_rpa(struct hci_dev * hdev)924 static bool scan_use_rpa(struct hci_dev *hdev)
925 {
926 return hci_dev_test_flag(hdev, HCI_PRIVACY);
927 }
928
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)929 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
930 u16 window, u8 own_addr_type, u8 filter_policy,
931 bool addr_resolv)
932 {
933 struct hci_dev *hdev = req->hdev;
934
935 if (hdev->scanning_paused) {
936 bt_dev_dbg(hdev, "Scanning is paused for suspend");
937 return;
938 }
939
940 if (use_ll_privacy(hdev) &&
941 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
942 addr_resolv) {
943 u8 enable = 0x01;
944
945 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
946 }
947
948 /* Use ext scanning if set ext scan param and ext scan enable is
949 * supported
950 */
951 if (use_ext_scan(hdev)) {
952 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
953 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
954 struct hci_cp_le_scan_phy_params *phy_params;
955 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
956 u32 plen;
957
958 ext_param_cp = (void *)data;
959 phy_params = (void *)ext_param_cp->data;
960
961 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
962 ext_param_cp->own_addr_type = own_addr_type;
963 ext_param_cp->filter_policy = filter_policy;
964
965 plen = sizeof(*ext_param_cp);
966
967 if (scan_1m(hdev) || scan_2m(hdev)) {
968 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
969
970 memset(phy_params, 0, sizeof(*phy_params));
971 phy_params->type = type;
972 phy_params->interval = cpu_to_le16(interval);
973 phy_params->window = cpu_to_le16(window);
974
975 plen += sizeof(*phy_params);
976 phy_params++;
977 }
978
979 if (scan_coded(hdev)) {
980 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
981
982 memset(phy_params, 0, sizeof(*phy_params));
983 phy_params->type = type;
984 phy_params->interval = cpu_to_le16(interval);
985 phy_params->window = cpu_to_le16(window);
986
987 plen += sizeof(*phy_params);
988 phy_params++;
989 }
990
991 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
992 plen, ext_param_cp);
993
994 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
995 ext_enable_cp.enable = LE_SCAN_ENABLE;
996 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
997
998 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
999 sizeof(ext_enable_cp), &ext_enable_cp);
1000 } else {
1001 struct hci_cp_le_set_scan_param param_cp;
1002 struct hci_cp_le_set_scan_enable enable_cp;
1003
1004 memset(¶m_cp, 0, sizeof(param_cp));
1005 param_cp.type = type;
1006 param_cp.interval = cpu_to_le16(interval);
1007 param_cp.window = cpu_to_le16(window);
1008 param_cp.own_address_type = own_addr_type;
1009 param_cp.filter_policy = filter_policy;
1010 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1011 ¶m_cp);
1012
1013 memset(&enable_cp, 0, sizeof(enable_cp));
1014 enable_cp.enable = LE_SCAN_ENABLE;
1015 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1016 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1017 &enable_cp);
1018 }
1019 }
1020
1021 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1023 {
1024 struct hci_conn_hash *h = &hdev->conn_hash;
1025 struct hci_conn *c;
1026
1027 rcu_read_lock();
1028
1029 list_for_each_entry_rcu(c, &h->list, list) {
1030 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1031 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1032 rcu_read_unlock();
1033 return true;
1034 }
1035 }
1036
1037 rcu_read_unlock();
1038
1039 return false;
1040 }
1041
1042 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1043 * controller based address resolution to be able to reconfigure
1044 * resolving list.
1045 */
hci_req_add_le_passive_scan(struct hci_request * req)1046 void hci_req_add_le_passive_scan(struct hci_request *req)
1047 {
1048 struct hci_dev *hdev = req->hdev;
1049 u8 own_addr_type;
1050 u8 filter_policy;
1051 u16 window, interval;
1052 /* Background scanning should run with address resolution */
1053 bool addr_resolv = true;
1054
1055 if (hdev->scanning_paused) {
1056 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1057 return;
1058 }
1059
1060 /* Set require_privacy to false since no SCAN_REQ are send
1061 * during passive scanning. Not using an non-resolvable address
1062 * here is important so that peer devices using direct
1063 * advertising with our address will be correctly reported
1064 * by the controller.
1065 */
1066 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1067 &own_addr_type))
1068 return;
1069
1070 if (__hci_update_interleaved_scan(hdev))
1071 return;
1072
1073 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1074 /* Adding or removing entries from the accept list must
1075 * happen before enabling scanning. The controller does
1076 * not allow accept list modification while scanning.
1077 */
1078 filter_policy = update_accept_list(req);
1079
1080 /* When the controller is using random resolvable addresses and
1081 * with that having LE privacy enabled, then controllers with
1082 * Extended Scanner Filter Policies support can now enable support
1083 * for handling directed advertising.
1084 *
1085 * So instead of using filter polices 0x00 (no accept list)
1086 * and 0x01 (accept list enabled) use the new filter policies
1087 * 0x02 (no accept list) and 0x03 (accept list enabled).
1088 */
1089 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1090 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1091 filter_policy |= 0x02;
1092
1093 if (hdev->suspended) {
1094 window = hdev->le_scan_window_suspend;
1095 interval = hdev->le_scan_int_suspend;
1096 } else if (hci_is_le_conn_scanning(hdev)) {
1097 window = hdev->le_scan_window_connect;
1098 interval = hdev->le_scan_int_connect;
1099 } else if (hci_is_adv_monitoring(hdev)) {
1100 window = hdev->le_scan_window_adv_monitor;
1101 interval = hdev->le_scan_int_adv_monitor;
1102 } else {
1103 window = hdev->le_scan_window;
1104 interval = hdev->le_scan_interval;
1105 }
1106
1107 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1108 filter_policy);
1109 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1110 own_addr_type, filter_policy, addr_resolv);
1111 }
1112
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1113 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1114 {
1115 struct adv_info *adv_instance;
1116
1117 /* Instance 0x00 always set local name */
1118 if (instance == 0x00)
1119 return 1;
1120
1121 adv_instance = hci_find_adv_instance(hdev, instance);
1122 if (!adv_instance)
1123 return 0;
1124
1125 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1126 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1127 return 1;
1128
1129 return adv_instance->scan_rsp_len;
1130 }
1131
hci_req_clear_event_filter(struct hci_request * req)1132 static void hci_req_clear_event_filter(struct hci_request *req)
1133 {
1134 struct hci_cp_set_event_filter f;
1135
1136 memset(&f, 0, sizeof(f));
1137 f.flt_type = HCI_FLT_CLEAR_ALL;
1138 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1139
1140 /* Update page scan state (since we may have modified it when setting
1141 * the event filter).
1142 */
1143 __hci_req_update_scan(req);
1144 }
1145
hci_req_set_event_filter(struct hci_request * req)1146 static void hci_req_set_event_filter(struct hci_request *req)
1147 {
1148 struct bdaddr_list_with_flags *b;
1149 struct hci_cp_set_event_filter f;
1150 struct hci_dev *hdev = req->hdev;
1151 u8 scan = SCAN_DISABLED;
1152
1153 /* Always clear event filter when starting */
1154 hci_req_clear_event_filter(req);
1155
1156 list_for_each_entry(b, &hdev->accept_list, list) {
1157 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1158 b->current_flags))
1159 continue;
1160
1161 memset(&f, 0, sizeof(f));
1162 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1163 f.flt_type = HCI_FLT_CONN_SETUP;
1164 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1165 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1166
1167 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1168 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1169 scan = SCAN_PAGE;
1170 }
1171
1172 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1173 }
1174
hci_req_config_le_suspend_scan(struct hci_request * req)1175 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1176 {
1177 /* Before changing params disable scan if enabled */
1178 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1179 hci_req_add_le_scan_disable(req, false);
1180
1181 /* Configure params and enable scanning */
1182 hci_req_add_le_passive_scan(req);
1183
1184 /* Block suspend notifier on response */
1185 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1186 }
1187
cancel_adv_timeout(struct hci_dev * hdev)1188 static void cancel_adv_timeout(struct hci_dev *hdev)
1189 {
1190 if (hdev->adv_instance_timeout) {
1191 hdev->adv_instance_timeout = 0;
1192 cancel_delayed_work(&hdev->adv_instance_expire);
1193 }
1194 }
1195
1196 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1197 static void hci_suspend_adv_instances(struct hci_request *req)
1198 {
1199 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1200
1201 /* Call to disable any advertisements active on the controller.
1202 * This will succeed even if no advertisements are configured.
1203 */
1204 __hci_req_disable_advertising(req);
1205
1206 /* If we are using software rotation, pause the loop */
1207 if (!ext_adv_capable(req->hdev))
1208 cancel_adv_timeout(req->hdev);
1209 }
1210
1211 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1212 static void hci_resume_adv_instances(struct hci_request *req)
1213 {
1214 struct adv_info *adv;
1215
1216 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1217
1218 if (ext_adv_capable(req->hdev)) {
1219 /* Call for each tracked instance to be re-enabled */
1220 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1221 __hci_req_enable_ext_advertising(req,
1222 adv->instance);
1223 }
1224
1225 } else {
1226 /* Schedule for most recent instance to be restarted and begin
1227 * the software rotation loop
1228 */
1229 __hci_req_schedule_adv_instance(req,
1230 req->hdev->cur_adv_instance,
1231 true);
1232 }
1233 }
1234
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1235 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1236 {
1237 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1238 status);
1239 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1240 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1241 wake_up(&hdev->suspend_wait_q);
1242 }
1243 }
1244
1245 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1246 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1247 {
1248 int old_state;
1249 struct hci_conn *conn;
1250 struct hci_request req;
1251 u8 page_scan;
1252 int disconnect_counter;
1253
1254 if (next == hdev->suspend_state) {
1255 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1256 goto done;
1257 }
1258
1259 hdev->suspend_state = next;
1260 hci_req_init(&req, hdev);
1261
1262 if (next == BT_SUSPEND_DISCONNECT) {
1263 /* Mark device as suspended */
1264 hdev->suspended = true;
1265
1266 /* Pause discovery if not already stopped */
1267 old_state = hdev->discovery.state;
1268 if (old_state != DISCOVERY_STOPPED) {
1269 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1270 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1271 queue_work(hdev->req_workqueue, &hdev->discov_update);
1272 }
1273
1274 hdev->discovery_paused = true;
1275 hdev->discovery_old_state = old_state;
1276
1277 /* Stop directed advertising */
1278 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1279 if (old_state) {
1280 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1281 cancel_delayed_work(&hdev->discov_off);
1282 queue_delayed_work(hdev->req_workqueue,
1283 &hdev->discov_off, 0);
1284 }
1285
1286 /* Pause other advertisements */
1287 if (hdev->adv_instance_cnt)
1288 hci_suspend_adv_instances(&req);
1289
1290 hdev->advertising_paused = true;
1291 hdev->advertising_old_state = old_state;
1292 /* Disable page scan */
1293 page_scan = SCAN_DISABLED;
1294 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1295
1296 /* Disable LE passive scan if enabled */
1297 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1298 hci_req_add_le_scan_disable(&req, false);
1299
1300 /* Mark task needing completion */
1301 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1302
1303 /* Prevent disconnects from causing scanning to be re-enabled */
1304 hdev->scanning_paused = true;
1305
1306 /* Run commands before disconnecting */
1307 hci_req_run(&req, suspend_req_complete);
1308
1309 disconnect_counter = 0;
1310 /* Soft disconnect everything (power off) */
1311 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1312 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1313 disconnect_counter++;
1314 }
1315
1316 if (disconnect_counter > 0) {
1317 bt_dev_dbg(hdev,
1318 "Had %d disconnects. Will wait on them",
1319 disconnect_counter);
1320 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1321 }
1322 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1323 /* Unpause to take care of updating scanning params */
1324 hdev->scanning_paused = false;
1325 /* Enable event filter for paired devices */
1326 hci_req_set_event_filter(&req);
1327 /* Enable passive scan at lower duty cycle */
1328 hci_req_config_le_suspend_scan(&req);
1329 /* Pause scan changes again. */
1330 hdev->scanning_paused = true;
1331 hci_req_run(&req, suspend_req_complete);
1332 } else {
1333 hdev->suspended = false;
1334 hdev->scanning_paused = false;
1335
1336 hci_req_clear_event_filter(&req);
1337 /* Reset passive/background scanning to normal */
1338 hci_req_config_le_suspend_scan(&req);
1339
1340 /* Unpause directed advertising */
1341 hdev->advertising_paused = false;
1342 if (hdev->advertising_old_state) {
1343 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1344 hdev->suspend_tasks);
1345 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1346 queue_work(hdev->req_workqueue,
1347 &hdev->discoverable_update);
1348 hdev->advertising_old_state = 0;
1349 }
1350
1351 /* Resume other advertisements */
1352 if (hdev->adv_instance_cnt)
1353 hci_resume_adv_instances(&req);
1354
1355 /* Unpause discovery */
1356 hdev->discovery_paused = false;
1357 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1358 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1359 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1360 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1361 queue_work(hdev->req_workqueue, &hdev->discov_update);
1362 }
1363
1364 hci_req_run(&req, suspend_req_complete);
1365 }
1366
1367 hdev->suspend_state = next;
1368
1369 done:
1370 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1371 wake_up(&hdev->suspend_wait_q);
1372 }
1373
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1374 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1375 {
1376 u8 instance = hdev->cur_adv_instance;
1377 struct adv_info *adv_instance;
1378
1379 /* Instance 0x00 always set local name */
1380 if (instance == 0x00)
1381 return 1;
1382
1383 adv_instance = hci_find_adv_instance(hdev, instance);
1384 if (!adv_instance)
1385 return 0;
1386
1387 /* TODO: Take into account the "appearance" and "local-name" flags here.
1388 * These are currently being ignored as they are not supported.
1389 */
1390 return adv_instance->scan_rsp_len;
1391 }
1392
__hci_req_disable_advertising(struct hci_request * req)1393 void __hci_req_disable_advertising(struct hci_request *req)
1394 {
1395 if (ext_adv_capable(req->hdev)) {
1396 __hci_req_disable_ext_adv_instance(req, 0x00);
1397
1398 } else {
1399 u8 enable = 0x00;
1400
1401 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1402 }
1403 }
1404
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1405 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1406 {
1407 u32 flags;
1408 struct adv_info *adv_instance;
1409
1410 if (instance == 0x00) {
1411 /* Instance 0 always manages the "Tx Power" and "Flags"
1412 * fields
1413 */
1414 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1415
1416 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1417 * corresponds to the "connectable" instance flag.
1418 */
1419 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1420 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1421
1422 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1423 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1424 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1425 flags |= MGMT_ADV_FLAG_DISCOV;
1426
1427 return flags;
1428 }
1429
1430 adv_instance = hci_find_adv_instance(hdev, instance);
1431
1432 /* Return 0 when we got an invalid instance identifier. */
1433 if (!adv_instance)
1434 return 0;
1435
1436 return adv_instance->flags;
1437 }
1438
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1439 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1440 {
1441 /* If privacy is not enabled don't use RPA */
1442 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1443 return false;
1444
1445 /* If basic privacy mode is enabled use RPA */
1446 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1447 return true;
1448
1449 /* If limited privacy mode is enabled don't use RPA if we're
1450 * both discoverable and bondable.
1451 */
1452 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1453 hci_dev_test_flag(hdev, HCI_BONDABLE))
1454 return false;
1455
1456 /* We're neither bondable nor discoverable in the limited
1457 * privacy mode, therefore use RPA.
1458 */
1459 return true;
1460 }
1461
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1462 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1463 {
1464 /* If there is no connection we are OK to advertise. */
1465 if (hci_conn_num(hdev, LE_LINK) == 0)
1466 return true;
1467
1468 /* Check le_states if there is any connection in slave role. */
1469 if (hdev->conn_hash.le_num_slave > 0) {
1470 /* Slave connection state and non connectable mode bit 20. */
1471 if (!connectable && !(hdev->le_states[2] & 0x10))
1472 return false;
1473
1474 /* Slave connection state and connectable mode bit 38
1475 * and scannable bit 21.
1476 */
1477 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1478 !(hdev->le_states[2] & 0x20)))
1479 return false;
1480 }
1481
1482 /* Check le_states if there is any connection in master role. */
1483 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1484 /* Master connection state and non connectable mode bit 18. */
1485 if (!connectable && !(hdev->le_states[2] & 0x02))
1486 return false;
1487
1488 /* Master connection state and connectable mode bit 35 and
1489 * scannable 19.
1490 */
1491 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1492 !(hdev->le_states[2] & 0x08)))
1493 return false;
1494 }
1495
1496 return true;
1497 }
1498
__hci_req_enable_advertising(struct hci_request * req)1499 void __hci_req_enable_advertising(struct hci_request *req)
1500 {
1501 struct hci_dev *hdev = req->hdev;
1502 struct hci_cp_le_set_adv_param cp;
1503 u8 own_addr_type, enable = 0x01;
1504 bool connectable;
1505 u16 adv_min_interval, adv_max_interval;
1506 u32 flags;
1507
1508 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1509
1510 /* If the "connectable" instance flag was not set, then choose between
1511 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1512 */
1513 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1514 mgmt_get_connectable(hdev);
1515
1516 if (!is_advertising_allowed(hdev, connectable))
1517 return;
1518
1519 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1520 __hci_req_disable_advertising(req);
1521
1522 /* Clear the HCI_LE_ADV bit temporarily so that the
1523 * hci_update_random_address knows that it's safe to go ahead
1524 * and write a new random address. The flag will be set back on
1525 * as soon as the SET_ADV_ENABLE HCI command completes.
1526 */
1527 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1528
1529 /* Set require_privacy to true only when non-connectable
1530 * advertising is used. In that case it is fine to use a
1531 * non-resolvable private address.
1532 */
1533 if (hci_update_random_address(req, !connectable,
1534 adv_use_rpa(hdev, flags),
1535 &own_addr_type) < 0)
1536 return;
1537
1538 memset(&cp, 0, sizeof(cp));
1539
1540 if (connectable) {
1541 cp.type = LE_ADV_IND;
1542
1543 adv_min_interval = hdev->le_adv_min_interval;
1544 adv_max_interval = hdev->le_adv_max_interval;
1545 } else {
1546 if (get_cur_adv_instance_scan_rsp_len(hdev))
1547 cp.type = LE_ADV_SCAN_IND;
1548 else
1549 cp.type = LE_ADV_NONCONN_IND;
1550
1551 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1552 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1553 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1554 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1555 } else {
1556 adv_min_interval = hdev->le_adv_min_interval;
1557 adv_max_interval = hdev->le_adv_max_interval;
1558 }
1559 }
1560
1561 cp.min_interval = cpu_to_le16(adv_min_interval);
1562 cp.max_interval = cpu_to_le16(adv_max_interval);
1563 cp.own_address_type = own_addr_type;
1564 cp.channel_map = hdev->le_adv_channel_map;
1565
1566 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1567
1568 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1569 }
1570
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1571 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1572 {
1573 size_t short_len;
1574 size_t complete_len;
1575
1576 /* no space left for name (+ NULL + type + len) */
1577 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1578 return ad_len;
1579
1580 /* use complete name if present and fits */
1581 complete_len = strlen(hdev->dev_name);
1582 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1583 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1584 hdev->dev_name, complete_len + 1);
1585
1586 /* use short name if present */
1587 short_len = strlen(hdev->short_name);
1588 if (short_len)
1589 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1590 hdev->short_name, short_len + 1);
1591
1592 /* use shortened full name if present, we already know that name
1593 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1594 */
1595 if (complete_len) {
1596 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1597
1598 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1599 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1600
1601 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1602 sizeof(name));
1603 }
1604
1605 return ad_len;
1606 }
1607
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1608 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1609 {
1610 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1611 }
1612
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1613 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1614 {
1615 u8 scan_rsp_len = 0;
1616
1617 if (hdev->appearance) {
1618 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1619 }
1620
1621 return append_local_name(hdev, ptr, scan_rsp_len);
1622 }
1623
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1624 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1625 u8 *ptr)
1626 {
1627 struct adv_info *adv_instance;
1628 u32 instance_flags;
1629 u8 scan_rsp_len = 0;
1630
1631 adv_instance = hci_find_adv_instance(hdev, instance);
1632 if (!adv_instance)
1633 return 0;
1634
1635 instance_flags = adv_instance->flags;
1636
1637 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1638 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1639 }
1640
1641 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1642 adv_instance->scan_rsp_len);
1643
1644 scan_rsp_len += adv_instance->scan_rsp_len;
1645
1646 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1647 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1648
1649 return scan_rsp_len;
1650 }
1651
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1652 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1653 {
1654 struct hci_dev *hdev = req->hdev;
1655 u8 len;
1656
1657 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1658 return;
1659
1660 if (ext_adv_capable(hdev)) {
1661 struct {
1662 struct hci_cp_le_set_ext_scan_rsp_data cp;
1663 u8 data[HCI_MAX_EXT_AD_LENGTH];
1664 } pdu;
1665
1666 memset(&pdu, 0, sizeof(pdu));
1667
1668 if (instance)
1669 len = create_instance_scan_rsp_data(hdev, instance,
1670 pdu.data);
1671 else
1672 len = create_default_scan_rsp_data(hdev, pdu.data);
1673
1674 if (hdev->scan_rsp_data_len == len &&
1675 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1676 return;
1677
1678 memcpy(hdev->scan_rsp_data, pdu.data, len);
1679 hdev->scan_rsp_data_len = len;
1680
1681 pdu.cp.handle = instance;
1682 pdu.cp.length = len;
1683 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1684 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1685
1686 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1687 sizeof(pdu.cp) + len, &pdu.cp);
1688 } else {
1689 struct hci_cp_le_set_scan_rsp_data cp;
1690
1691 memset(&cp, 0, sizeof(cp));
1692
1693 if (instance)
1694 len = create_instance_scan_rsp_data(hdev, instance,
1695 cp.data);
1696 else
1697 len = create_default_scan_rsp_data(hdev, cp.data);
1698
1699 if (hdev->scan_rsp_data_len == len &&
1700 !memcmp(cp.data, hdev->scan_rsp_data, len))
1701 return;
1702
1703 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1704 hdev->scan_rsp_data_len = len;
1705
1706 cp.length = len;
1707
1708 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1709 }
1710 }
1711
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1712 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1713 {
1714 struct adv_info *adv_instance = NULL;
1715 u8 ad_len = 0, flags = 0;
1716 u32 instance_flags;
1717
1718 /* Return 0 when the current instance identifier is invalid. */
1719 if (instance) {
1720 adv_instance = hci_find_adv_instance(hdev, instance);
1721 if (!adv_instance)
1722 return 0;
1723 }
1724
1725 instance_flags = get_adv_instance_flags(hdev, instance);
1726
1727 /* If instance already has the flags set skip adding it once
1728 * again.
1729 */
1730 if (adv_instance && eir_get_data(adv_instance->adv_data,
1731 adv_instance->adv_data_len, EIR_FLAGS,
1732 NULL))
1733 goto skip_flags;
1734
1735 /* The Add Advertising command allows userspace to set both the general
1736 * and limited discoverable flags.
1737 */
1738 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1739 flags |= LE_AD_GENERAL;
1740
1741 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1742 flags |= LE_AD_LIMITED;
1743
1744 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1745 flags |= LE_AD_NO_BREDR;
1746
1747 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1748 /* If a discovery flag wasn't provided, simply use the global
1749 * settings.
1750 */
1751 if (!flags)
1752 flags |= mgmt_get_adv_discov_flags(hdev);
1753
1754 /* If flags would still be empty, then there is no need to
1755 * include the "Flags" AD field".
1756 */
1757 if (flags) {
1758 ptr[0] = 0x02;
1759 ptr[1] = EIR_FLAGS;
1760 ptr[2] = flags;
1761
1762 ad_len += 3;
1763 ptr += 3;
1764 }
1765 }
1766
1767 skip_flags:
1768 if (adv_instance) {
1769 memcpy(ptr, adv_instance->adv_data,
1770 adv_instance->adv_data_len);
1771 ad_len += adv_instance->adv_data_len;
1772 ptr += adv_instance->adv_data_len;
1773 }
1774
1775 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1776 s8 adv_tx_power;
1777
1778 if (ext_adv_capable(hdev)) {
1779 if (adv_instance)
1780 adv_tx_power = adv_instance->tx_power;
1781 else
1782 adv_tx_power = hdev->adv_tx_power;
1783 } else {
1784 adv_tx_power = hdev->adv_tx_power;
1785 }
1786
1787 /* Provide Tx Power only if we can provide a valid value for it */
1788 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1789 ptr[0] = 0x02;
1790 ptr[1] = EIR_TX_POWER;
1791 ptr[2] = (u8)adv_tx_power;
1792
1793 ad_len += 3;
1794 ptr += 3;
1795 }
1796 }
1797
1798 return ad_len;
1799 }
1800
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1801 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1802 {
1803 struct hci_dev *hdev = req->hdev;
1804 u8 len;
1805
1806 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1807 return;
1808
1809 if (ext_adv_capable(hdev)) {
1810 struct {
1811 struct hci_cp_le_set_ext_adv_data cp;
1812 u8 data[HCI_MAX_EXT_AD_LENGTH];
1813 } pdu;
1814
1815 memset(&pdu, 0, sizeof(pdu));
1816
1817 len = create_instance_adv_data(hdev, instance, pdu.data);
1818
1819 /* There's nothing to do if the data hasn't changed */
1820 if (hdev->adv_data_len == len &&
1821 memcmp(pdu.data, hdev->adv_data, len) == 0)
1822 return;
1823
1824 memcpy(hdev->adv_data, pdu.data, len);
1825 hdev->adv_data_len = len;
1826
1827 pdu.cp.length = len;
1828 pdu.cp.handle = instance;
1829 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1830 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1831
1832 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1833 sizeof(pdu.cp) + len, &pdu.cp);
1834 } else {
1835 struct hci_cp_le_set_adv_data cp;
1836
1837 memset(&cp, 0, sizeof(cp));
1838
1839 len = create_instance_adv_data(hdev, instance, cp.data);
1840
1841 /* There's nothing to do if the data hasn't changed */
1842 if (hdev->adv_data_len == len &&
1843 memcmp(cp.data, hdev->adv_data, len) == 0)
1844 return;
1845
1846 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1847 hdev->adv_data_len = len;
1848
1849 cp.length = len;
1850
1851 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1852 }
1853 }
1854
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1855 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1856 {
1857 struct hci_request req;
1858
1859 hci_req_init(&req, hdev);
1860 __hci_req_update_adv_data(&req, instance);
1861
1862 return hci_req_run(&req, NULL);
1863 }
1864
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1865 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1866 u16 opcode)
1867 {
1868 BT_DBG("%s status %u", hdev->name, status);
1869 }
1870
hci_req_disable_address_resolution(struct hci_dev * hdev)1871 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1872 {
1873 struct hci_request req;
1874 __u8 enable = 0x00;
1875
1876 if (!use_ll_privacy(hdev) &&
1877 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1878 return;
1879
1880 hci_req_init(&req, hdev);
1881
1882 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1883
1884 hci_req_run(&req, enable_addr_resolution_complete);
1885 }
1886
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1887 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1888 {
1889 BT_DBG("%s status %u", hdev->name, status);
1890 }
1891
hci_req_reenable_advertising(struct hci_dev * hdev)1892 void hci_req_reenable_advertising(struct hci_dev *hdev)
1893 {
1894 struct hci_request req;
1895
1896 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1897 list_empty(&hdev->adv_instances))
1898 return;
1899
1900 hci_req_init(&req, hdev);
1901
1902 if (hdev->cur_adv_instance) {
1903 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1904 true);
1905 } else {
1906 if (ext_adv_capable(hdev)) {
1907 __hci_req_start_ext_adv(&req, 0x00);
1908 } else {
1909 __hci_req_update_adv_data(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1911 __hci_req_enable_advertising(&req);
1912 }
1913 }
1914
1915 hci_req_run(&req, adv_enable_complete);
1916 }
1917
adv_timeout_expire(struct work_struct * work)1918 static void adv_timeout_expire(struct work_struct *work)
1919 {
1920 struct hci_dev *hdev = container_of(work, struct hci_dev,
1921 adv_instance_expire.work);
1922
1923 struct hci_request req;
1924 u8 instance;
1925
1926 BT_DBG("%s", hdev->name);
1927
1928 hci_dev_lock(hdev);
1929
1930 hdev->adv_instance_timeout = 0;
1931
1932 instance = hdev->cur_adv_instance;
1933 if (instance == 0x00)
1934 goto unlock;
1935
1936 hci_req_init(&req, hdev);
1937
1938 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1939
1940 if (list_empty(&hdev->adv_instances))
1941 __hci_req_disable_advertising(&req);
1942
1943 hci_req_run(&req, NULL);
1944
1945 unlock:
1946 hci_dev_unlock(hdev);
1947 }
1948
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)1949 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1950 unsigned long opt)
1951 {
1952 struct hci_dev *hdev = req->hdev;
1953 int ret = 0;
1954
1955 hci_dev_lock(hdev);
1956
1957 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1958 hci_req_add_le_scan_disable(req, false);
1959 hci_req_add_le_passive_scan(req);
1960
1961 switch (hdev->interleave_scan_state) {
1962 case INTERLEAVE_SCAN_ALLOWLIST:
1963 bt_dev_dbg(hdev, "next state: allowlist");
1964 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1965 break;
1966 case INTERLEAVE_SCAN_NO_FILTER:
1967 bt_dev_dbg(hdev, "next state: no filter");
1968 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1969 break;
1970 case INTERLEAVE_SCAN_NONE:
1971 BT_ERR("unexpected error");
1972 ret = -1;
1973 }
1974
1975 hci_dev_unlock(hdev);
1976
1977 return ret;
1978 }
1979
interleave_scan_work(struct work_struct * work)1980 static void interleave_scan_work(struct work_struct *work)
1981 {
1982 struct hci_dev *hdev = container_of(work, struct hci_dev,
1983 interleave_scan.work);
1984 u8 status;
1985 unsigned long timeout;
1986
1987 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1988 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1989 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1990 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1991 } else {
1992 bt_dev_err(hdev, "unexpected error");
1993 return;
1994 }
1995
1996 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1997 HCI_CMD_TIMEOUT, &status);
1998
1999 /* Don't continue interleaving if it was canceled */
2000 if (is_interleave_scanning(hdev))
2001 queue_delayed_work(hdev->req_workqueue,
2002 &hdev->interleave_scan, timeout);
2003 }
2004
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)2005 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2006 bool use_rpa, struct adv_info *adv_instance,
2007 u8 *own_addr_type, bdaddr_t *rand_addr)
2008 {
2009 int err;
2010
2011 bacpy(rand_addr, BDADDR_ANY);
2012
2013 /* If privacy is enabled use a resolvable private address. If
2014 * current RPA has expired then generate a new one.
2015 */
2016 if (use_rpa) {
2017 int to;
2018
2019 /* If Controller supports LL Privacy use own address type is
2020 * 0x03
2021 */
2022 if (use_ll_privacy(hdev))
2023 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2024 else
2025 *own_addr_type = ADDR_LE_DEV_RANDOM;
2026
2027 if (adv_instance) {
2028 if (!adv_instance->rpa_expired &&
2029 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2030 return 0;
2031
2032 adv_instance->rpa_expired = false;
2033 } else {
2034 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2035 !bacmp(&hdev->random_addr, &hdev->rpa))
2036 return 0;
2037 }
2038
2039 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2040 if (err < 0) {
2041 bt_dev_err(hdev, "failed to generate new RPA");
2042 return err;
2043 }
2044
2045 bacpy(rand_addr, &hdev->rpa);
2046
2047 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2048 if (adv_instance)
2049 queue_delayed_work(hdev->workqueue,
2050 &adv_instance->rpa_expired_cb, to);
2051 else
2052 queue_delayed_work(hdev->workqueue,
2053 &hdev->rpa_expired, to);
2054
2055 return 0;
2056 }
2057
2058 /* In case of required privacy without resolvable private address,
2059 * use an non-resolvable private address. This is useful for
2060 * non-connectable advertising.
2061 */
2062 if (require_privacy) {
2063 bdaddr_t nrpa;
2064
2065 while (true) {
2066 /* The non-resolvable private address is generated
2067 * from random six bytes with the two most significant
2068 * bits cleared.
2069 */
2070 get_random_bytes(&nrpa, 6);
2071 nrpa.b[5] &= 0x3f;
2072
2073 /* The non-resolvable private address shall not be
2074 * equal to the public address.
2075 */
2076 if (bacmp(&hdev->bdaddr, &nrpa))
2077 break;
2078 }
2079
2080 *own_addr_type = ADDR_LE_DEV_RANDOM;
2081 bacpy(rand_addr, &nrpa);
2082
2083 return 0;
2084 }
2085
2086 /* No privacy so use a public address. */
2087 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2088
2089 return 0;
2090 }
2091
__hci_req_clear_ext_adv_sets(struct hci_request * req)2092 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2093 {
2094 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2095 }
2096
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)2097 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2098 {
2099 struct hci_cp_le_set_ext_adv_params cp;
2100 struct hci_dev *hdev = req->hdev;
2101 bool connectable;
2102 u32 flags;
2103 bdaddr_t random_addr;
2104 u8 own_addr_type;
2105 int err;
2106 struct adv_info *adv_instance;
2107 bool secondary_adv;
2108
2109 if (instance > 0) {
2110 adv_instance = hci_find_adv_instance(hdev, instance);
2111 if (!adv_instance)
2112 return -EINVAL;
2113 } else {
2114 adv_instance = NULL;
2115 }
2116
2117 flags = get_adv_instance_flags(hdev, instance);
2118
2119 /* If the "connectable" instance flag was not set, then choose between
2120 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2121 */
2122 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2123 mgmt_get_connectable(hdev);
2124
2125 if (!is_advertising_allowed(hdev, connectable))
2126 return -EPERM;
2127
2128 /* Set require_privacy to true only when non-connectable
2129 * advertising is used. In that case it is fine to use a
2130 * non-resolvable private address.
2131 */
2132 err = hci_get_random_address(hdev, !connectable,
2133 adv_use_rpa(hdev, flags), adv_instance,
2134 &own_addr_type, &random_addr);
2135 if (err < 0)
2136 return err;
2137
2138 memset(&cp, 0, sizeof(cp));
2139
2140 /* In ext adv set param interval is 3 octets */
2141 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2142 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2143
2144 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2145
2146 if (connectable) {
2147 if (secondary_adv)
2148 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2149 else
2150 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2151 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2152 if (secondary_adv)
2153 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2154 else
2155 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2156 } else {
2157 if (secondary_adv)
2158 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2159 else
2160 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2161 }
2162
2163 cp.own_addr_type = own_addr_type;
2164 cp.channel_map = hdev->le_adv_channel_map;
2165 cp.tx_power = 127;
2166 cp.handle = instance;
2167
2168 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2169 cp.primary_phy = HCI_ADV_PHY_1M;
2170 cp.secondary_phy = HCI_ADV_PHY_2M;
2171 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2172 cp.primary_phy = HCI_ADV_PHY_CODED;
2173 cp.secondary_phy = HCI_ADV_PHY_CODED;
2174 } else {
2175 /* In all other cases use 1M */
2176 cp.primary_phy = HCI_ADV_PHY_1M;
2177 cp.secondary_phy = HCI_ADV_PHY_1M;
2178 }
2179
2180 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2181
2182 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2183 bacmp(&random_addr, BDADDR_ANY)) {
2184 struct hci_cp_le_set_adv_set_rand_addr cp;
2185
2186 /* Check if random address need to be updated */
2187 if (adv_instance) {
2188 if (!bacmp(&random_addr, &adv_instance->random_addr))
2189 return 0;
2190 } else {
2191 if (!bacmp(&random_addr, &hdev->random_addr))
2192 return 0;
2193 }
2194
2195 memset(&cp, 0, sizeof(cp));
2196
2197 cp.handle = instance;
2198 bacpy(&cp.bdaddr, &random_addr);
2199
2200 hci_req_add(req,
2201 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2202 sizeof(cp), &cp);
2203 }
2204
2205 return 0;
2206 }
2207
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2208 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2209 {
2210 struct hci_dev *hdev = req->hdev;
2211 struct hci_cp_le_set_ext_adv_enable *cp;
2212 struct hci_cp_ext_adv_set *adv_set;
2213 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2214 struct adv_info *adv_instance;
2215
2216 if (instance > 0) {
2217 adv_instance = hci_find_adv_instance(hdev, instance);
2218 if (!adv_instance)
2219 return -EINVAL;
2220 } else {
2221 adv_instance = NULL;
2222 }
2223
2224 cp = (void *) data;
2225 adv_set = (void *) cp->data;
2226
2227 memset(cp, 0, sizeof(*cp));
2228
2229 cp->enable = 0x01;
2230 cp->num_of_sets = 0x01;
2231
2232 memset(adv_set, 0, sizeof(*adv_set));
2233
2234 adv_set->handle = instance;
2235
2236 /* Set duration per instance since controller is responsible for
2237 * scheduling it.
2238 */
2239 if (adv_instance && adv_instance->timeout) {
2240 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2241
2242 /* Time = N * 10 ms */
2243 adv_set->duration = cpu_to_le16(duration / 10);
2244 }
2245
2246 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2247 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2248 data);
2249
2250 return 0;
2251 }
2252
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2253 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2254 {
2255 struct hci_dev *hdev = req->hdev;
2256 struct hci_cp_le_set_ext_adv_enable *cp;
2257 struct hci_cp_ext_adv_set *adv_set;
2258 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2259 u8 req_size;
2260
2261 /* If request specifies an instance that doesn't exist, fail */
2262 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2263 return -EINVAL;
2264
2265 memset(data, 0, sizeof(data));
2266
2267 cp = (void *)data;
2268 adv_set = (void *)cp->data;
2269
2270 /* Instance 0x00 indicates all advertising instances will be disabled */
2271 cp->num_of_sets = !!instance;
2272 cp->enable = 0x00;
2273
2274 adv_set->handle = instance;
2275
2276 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2277 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2278
2279 return 0;
2280 }
2281
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2282 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2283 {
2284 struct hci_dev *hdev = req->hdev;
2285
2286 /* If request specifies an instance that doesn't exist, fail */
2287 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2288 return -EINVAL;
2289
2290 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2291
2292 return 0;
2293 }
2294
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2295 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2296 {
2297 struct hci_dev *hdev = req->hdev;
2298 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2299 int err;
2300
2301 /* If instance isn't pending, the chip knows about it, and it's safe to
2302 * disable
2303 */
2304 if (adv_instance && !adv_instance->pending)
2305 __hci_req_disable_ext_adv_instance(req, instance);
2306
2307 err = __hci_req_setup_ext_adv_instance(req, instance);
2308 if (err < 0)
2309 return err;
2310
2311 __hci_req_update_scan_rsp_data(req, instance);
2312 __hci_req_enable_ext_advertising(req, instance);
2313
2314 return 0;
2315 }
2316
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2317 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2318 bool force)
2319 {
2320 struct hci_dev *hdev = req->hdev;
2321 struct adv_info *adv_instance = NULL;
2322 u16 timeout;
2323
2324 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2325 list_empty(&hdev->adv_instances))
2326 return -EPERM;
2327
2328 if (hdev->adv_instance_timeout)
2329 return -EBUSY;
2330
2331 adv_instance = hci_find_adv_instance(hdev, instance);
2332 if (!adv_instance)
2333 return -ENOENT;
2334
2335 /* A zero timeout means unlimited advertising. As long as there is
2336 * only one instance, duration should be ignored. We still set a timeout
2337 * in case further instances are being added later on.
2338 *
2339 * If the remaining lifetime of the instance is more than the duration
2340 * then the timeout corresponds to the duration, otherwise it will be
2341 * reduced to the remaining instance lifetime.
2342 */
2343 if (adv_instance->timeout == 0 ||
2344 adv_instance->duration <= adv_instance->remaining_time)
2345 timeout = adv_instance->duration;
2346 else
2347 timeout = adv_instance->remaining_time;
2348
2349 /* The remaining time is being reduced unless the instance is being
2350 * advertised without time limit.
2351 */
2352 if (adv_instance->timeout)
2353 adv_instance->remaining_time =
2354 adv_instance->remaining_time - timeout;
2355
2356 /* Only use work for scheduling instances with legacy advertising */
2357 if (!ext_adv_capable(hdev)) {
2358 hdev->adv_instance_timeout = timeout;
2359 queue_delayed_work(hdev->req_workqueue,
2360 &hdev->adv_instance_expire,
2361 msecs_to_jiffies(timeout * 1000));
2362 }
2363
2364 /* If we're just re-scheduling the same instance again then do not
2365 * execute any HCI commands. This happens when a single instance is
2366 * being advertised.
2367 */
2368 if (!force && hdev->cur_adv_instance == instance &&
2369 hci_dev_test_flag(hdev, HCI_LE_ADV))
2370 return 0;
2371
2372 hdev->cur_adv_instance = instance;
2373 if (ext_adv_capable(hdev)) {
2374 __hci_req_start_ext_adv(req, instance);
2375 } else {
2376 __hci_req_update_adv_data(req, instance);
2377 __hci_req_update_scan_rsp_data(req, instance);
2378 __hci_req_enable_advertising(req);
2379 }
2380
2381 return 0;
2382 }
2383
2384 /* For a single instance:
2385 * - force == true: The instance will be removed even when its remaining
2386 * lifetime is not zero.
2387 * - force == false: the instance will be deactivated but kept stored unless
2388 * the remaining lifetime is zero.
2389 *
2390 * For instance == 0x00:
2391 * - force == true: All instances will be removed regardless of their timeout
2392 * setting.
2393 * - force == false: Only instances that have a timeout will be removed.
2394 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2395 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2396 struct hci_request *req, u8 instance,
2397 bool force)
2398 {
2399 struct adv_info *adv_instance, *n, *next_instance = NULL;
2400 int err;
2401 u8 rem_inst;
2402
2403 /* Cancel any timeout concerning the removed instance(s). */
2404 if (!instance || hdev->cur_adv_instance == instance)
2405 cancel_adv_timeout(hdev);
2406
2407 /* Get the next instance to advertise BEFORE we remove
2408 * the current one. This can be the same instance again
2409 * if there is only one instance.
2410 */
2411 if (instance && hdev->cur_adv_instance == instance)
2412 next_instance = hci_get_next_instance(hdev, instance);
2413
2414 if (instance == 0x00) {
2415 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2416 list) {
2417 if (!(force || adv_instance->timeout))
2418 continue;
2419
2420 rem_inst = adv_instance->instance;
2421 err = hci_remove_adv_instance(hdev, rem_inst);
2422 if (!err)
2423 mgmt_advertising_removed(sk, hdev, rem_inst);
2424 }
2425 } else {
2426 adv_instance = hci_find_adv_instance(hdev, instance);
2427
2428 if (force || (adv_instance && adv_instance->timeout &&
2429 !adv_instance->remaining_time)) {
2430 /* Don't advertise a removed instance. */
2431 if (next_instance &&
2432 next_instance->instance == instance)
2433 next_instance = NULL;
2434
2435 err = hci_remove_adv_instance(hdev, instance);
2436 if (!err)
2437 mgmt_advertising_removed(sk, hdev, instance);
2438 }
2439 }
2440
2441 if (!req || !hdev_is_powered(hdev) ||
2442 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2443 return;
2444
2445 if (next_instance && !ext_adv_capable(hdev))
2446 __hci_req_schedule_adv_instance(req, next_instance->instance,
2447 false);
2448 }
2449
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2450 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2451 {
2452 struct hci_dev *hdev = req->hdev;
2453
2454 /* If we're advertising or initiating an LE connection we can't
2455 * go ahead and change the random address at this time. This is
2456 * because the eventual initiator address used for the
2457 * subsequently created connection will be undefined (some
2458 * controllers use the new address and others the one we had
2459 * when the operation started).
2460 *
2461 * In this kind of scenario skip the update and let the random
2462 * address be updated at the next cycle.
2463 */
2464 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2465 hci_lookup_le_connect(hdev)) {
2466 BT_DBG("Deferring random address update");
2467 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2468 return;
2469 }
2470
2471 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2472 }
2473
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2474 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2475 bool use_rpa, u8 *own_addr_type)
2476 {
2477 struct hci_dev *hdev = req->hdev;
2478 int err;
2479
2480 /* If privacy is enabled use a resolvable private address. If
2481 * current RPA has expired or there is something else than
2482 * the current RPA in use, then generate a new one.
2483 */
2484 if (use_rpa) {
2485 int to;
2486
2487 /* If Controller supports LL Privacy use own address type is
2488 * 0x03
2489 */
2490 if (use_ll_privacy(hdev))
2491 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2492 else
2493 *own_addr_type = ADDR_LE_DEV_RANDOM;
2494
2495 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2496 !bacmp(&hdev->random_addr, &hdev->rpa))
2497 return 0;
2498
2499 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2500 if (err < 0) {
2501 bt_dev_err(hdev, "failed to generate new RPA");
2502 return err;
2503 }
2504
2505 set_random_addr(req, &hdev->rpa);
2506
2507 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2508 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2509
2510 return 0;
2511 }
2512
2513 /* In case of required privacy without resolvable private address,
2514 * use an non-resolvable private address. This is useful for active
2515 * scanning and non-connectable advertising.
2516 */
2517 if (require_privacy) {
2518 bdaddr_t nrpa;
2519
2520 while (true) {
2521 /* The non-resolvable private address is generated
2522 * from random six bytes with the two most significant
2523 * bits cleared.
2524 */
2525 get_random_bytes(&nrpa, 6);
2526 nrpa.b[5] &= 0x3f;
2527
2528 /* The non-resolvable private address shall not be
2529 * equal to the public address.
2530 */
2531 if (bacmp(&hdev->bdaddr, &nrpa))
2532 break;
2533 }
2534
2535 *own_addr_type = ADDR_LE_DEV_RANDOM;
2536 set_random_addr(req, &nrpa);
2537 return 0;
2538 }
2539
2540 /* If forcing static address is in use or there is no public
2541 * address use the static address as random address (but skip
2542 * the HCI command if the current random address is already the
2543 * static one.
2544 *
2545 * In case BR/EDR has been disabled on a dual-mode controller
2546 * and a static address has been configured, then use that
2547 * address instead of the public BR/EDR address.
2548 */
2549 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2550 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2551 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2552 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2553 *own_addr_type = ADDR_LE_DEV_RANDOM;
2554 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2555 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2556 &hdev->static_addr);
2557 return 0;
2558 }
2559
2560 /* Neither privacy nor static address is being used so use a
2561 * public address.
2562 */
2563 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2564
2565 return 0;
2566 }
2567
disconnected_accept_list_entries(struct hci_dev * hdev)2568 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2569 {
2570 struct bdaddr_list *b;
2571
2572 list_for_each_entry(b, &hdev->accept_list, list) {
2573 struct hci_conn *conn;
2574
2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2576 if (!conn)
2577 return true;
2578
2579 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2580 return true;
2581 }
2582
2583 return false;
2584 }
2585
__hci_req_update_scan(struct hci_request * req)2586 void __hci_req_update_scan(struct hci_request *req)
2587 {
2588 struct hci_dev *hdev = req->hdev;
2589 u8 scan;
2590
2591 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2592 return;
2593
2594 if (!hdev_is_powered(hdev))
2595 return;
2596
2597 if (mgmt_powering_down(hdev))
2598 return;
2599
2600 if (hdev->scanning_paused)
2601 return;
2602
2603 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2604 disconnected_accept_list_entries(hdev))
2605 scan = SCAN_PAGE;
2606 else
2607 scan = SCAN_DISABLED;
2608
2609 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2610 scan |= SCAN_INQUIRY;
2611
2612 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2613 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2614 return;
2615
2616 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2617 }
2618
update_scan(struct hci_request * req,unsigned long opt)2619 static int update_scan(struct hci_request *req, unsigned long opt)
2620 {
2621 hci_dev_lock(req->hdev);
2622 __hci_req_update_scan(req);
2623 hci_dev_unlock(req->hdev);
2624 return 0;
2625 }
2626
scan_update_work(struct work_struct * work)2627 static void scan_update_work(struct work_struct *work)
2628 {
2629 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2630
2631 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2632 }
2633
connectable_update(struct hci_request * req,unsigned long opt)2634 static int connectable_update(struct hci_request *req, unsigned long opt)
2635 {
2636 struct hci_dev *hdev = req->hdev;
2637
2638 hci_dev_lock(hdev);
2639
2640 __hci_req_update_scan(req);
2641
2642 /* If BR/EDR is not enabled and we disable advertising as a
2643 * by-product of disabling connectable, we need to update the
2644 * advertising flags.
2645 */
2646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2647 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2648
2649 /* Update the advertising parameters if necessary */
2650 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2651 !list_empty(&hdev->adv_instances)) {
2652 if (ext_adv_capable(hdev))
2653 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2654 else
2655 __hci_req_enable_advertising(req);
2656 }
2657
2658 __hci_update_background_scan(req);
2659
2660 hci_dev_unlock(hdev);
2661
2662 return 0;
2663 }
2664
connectable_update_work(struct work_struct * work)2665 static void connectable_update_work(struct work_struct *work)
2666 {
2667 struct hci_dev *hdev = container_of(work, struct hci_dev,
2668 connectable_update);
2669 u8 status;
2670
2671 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2672 mgmt_set_connectable_complete(hdev, status);
2673 }
2674
get_service_classes(struct hci_dev * hdev)2675 static u8 get_service_classes(struct hci_dev *hdev)
2676 {
2677 struct bt_uuid *uuid;
2678 u8 val = 0;
2679
2680 list_for_each_entry(uuid, &hdev->uuids, list)
2681 val |= uuid->svc_hint;
2682
2683 return val;
2684 }
2685
__hci_req_update_class(struct hci_request * req)2686 void __hci_req_update_class(struct hci_request *req)
2687 {
2688 struct hci_dev *hdev = req->hdev;
2689 u8 cod[3];
2690
2691 BT_DBG("%s", hdev->name);
2692
2693 if (!hdev_is_powered(hdev))
2694 return;
2695
2696 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2697 return;
2698
2699 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2700 return;
2701
2702 cod[0] = hdev->minor_class;
2703 cod[1] = hdev->major_class;
2704 cod[2] = get_service_classes(hdev);
2705
2706 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2707 cod[1] |= 0x20;
2708
2709 if (memcmp(cod, hdev->dev_class, 3) == 0)
2710 return;
2711
2712 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2713 }
2714
write_iac(struct hci_request * req)2715 static void write_iac(struct hci_request *req)
2716 {
2717 struct hci_dev *hdev = req->hdev;
2718 struct hci_cp_write_current_iac_lap cp;
2719
2720 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2721 return;
2722
2723 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2724 /* Limited discoverable mode */
2725 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2726 cp.iac_lap[0] = 0x00; /* LIAC */
2727 cp.iac_lap[1] = 0x8b;
2728 cp.iac_lap[2] = 0x9e;
2729 cp.iac_lap[3] = 0x33; /* GIAC */
2730 cp.iac_lap[4] = 0x8b;
2731 cp.iac_lap[5] = 0x9e;
2732 } else {
2733 /* General discoverable mode */
2734 cp.num_iac = 1;
2735 cp.iac_lap[0] = 0x33; /* GIAC */
2736 cp.iac_lap[1] = 0x8b;
2737 cp.iac_lap[2] = 0x9e;
2738 }
2739
2740 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2741 (cp.num_iac * 3) + 1, &cp);
2742 }
2743
discoverable_update(struct hci_request * req,unsigned long opt)2744 static int discoverable_update(struct hci_request *req, unsigned long opt)
2745 {
2746 struct hci_dev *hdev = req->hdev;
2747
2748 hci_dev_lock(hdev);
2749
2750 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2751 write_iac(req);
2752 __hci_req_update_scan(req);
2753 __hci_req_update_class(req);
2754 }
2755
2756 /* Advertising instances don't use the global discoverable setting, so
2757 * only update AD if advertising was enabled using Set Advertising.
2758 */
2759 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2760 __hci_req_update_adv_data(req, 0x00);
2761
2762 /* Discoverable mode affects the local advertising
2763 * address in limited privacy mode.
2764 */
2765 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2766 if (ext_adv_capable(hdev))
2767 __hci_req_start_ext_adv(req, 0x00);
2768 else
2769 __hci_req_enable_advertising(req);
2770 }
2771 }
2772
2773 hci_dev_unlock(hdev);
2774
2775 return 0;
2776 }
2777
discoverable_update_work(struct work_struct * work)2778 static void discoverable_update_work(struct work_struct *work)
2779 {
2780 struct hci_dev *hdev = container_of(work, struct hci_dev,
2781 discoverable_update);
2782 u8 status;
2783
2784 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2785 mgmt_set_discoverable_complete(hdev, status);
2786 }
2787
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2788 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2789 u8 reason)
2790 {
2791 switch (conn->state) {
2792 case BT_CONNECTED:
2793 case BT_CONFIG:
2794 if (conn->type == AMP_LINK) {
2795 struct hci_cp_disconn_phy_link cp;
2796
2797 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2798 cp.reason = reason;
2799 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2800 &cp);
2801 } else {
2802 struct hci_cp_disconnect dc;
2803
2804 dc.handle = cpu_to_le16(conn->handle);
2805 dc.reason = reason;
2806 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2807 }
2808
2809 conn->state = BT_DISCONN;
2810
2811 break;
2812 case BT_CONNECT:
2813 if (conn->type == LE_LINK) {
2814 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2815 break;
2816 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2817 0, NULL);
2818 } else if (conn->type == ACL_LINK) {
2819 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2820 break;
2821 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2822 6, &conn->dst);
2823 }
2824 break;
2825 case BT_CONNECT2:
2826 if (conn->type == ACL_LINK) {
2827 struct hci_cp_reject_conn_req rej;
2828
2829 bacpy(&rej.bdaddr, &conn->dst);
2830 rej.reason = reason;
2831
2832 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2833 sizeof(rej), &rej);
2834 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2835 struct hci_cp_reject_sync_conn_req rej;
2836
2837 bacpy(&rej.bdaddr, &conn->dst);
2838
2839 /* SCO rejection has its own limited set of
2840 * allowed error values (0x0D-0x0F) which isn't
2841 * compatible with most values passed to this
2842 * function. To be safe hard-code one of the
2843 * values that's suitable for SCO.
2844 */
2845 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2846
2847 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2848 sizeof(rej), &rej);
2849 }
2850 break;
2851 default:
2852 conn->state = BT_CLOSED;
2853 break;
2854 }
2855 }
2856
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2857 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2858 {
2859 if (status)
2860 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2861 }
2862
hci_abort_conn(struct hci_conn * conn,u8 reason)2863 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2864 {
2865 struct hci_request req;
2866 int err;
2867
2868 hci_req_init(&req, conn->hdev);
2869
2870 __hci_abort_conn(&req, conn, reason);
2871
2872 err = hci_req_run(&req, abort_conn_complete);
2873 if (err && err != -ENODATA) {
2874 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2875 return err;
2876 }
2877
2878 return 0;
2879 }
2880
update_bg_scan(struct hci_request * req,unsigned long opt)2881 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2882 {
2883 hci_dev_lock(req->hdev);
2884 __hci_update_background_scan(req);
2885 hci_dev_unlock(req->hdev);
2886 return 0;
2887 }
2888
bg_scan_update(struct work_struct * work)2889 static void bg_scan_update(struct work_struct *work)
2890 {
2891 struct hci_dev *hdev = container_of(work, struct hci_dev,
2892 bg_scan_update);
2893 struct hci_conn *conn;
2894 u8 status;
2895 int err;
2896
2897 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2898 if (!err)
2899 return;
2900
2901 hci_dev_lock(hdev);
2902
2903 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2904 if (conn)
2905 hci_le_conn_failed(conn, status);
2906
2907 hci_dev_unlock(hdev);
2908 }
2909
le_scan_disable(struct hci_request * req,unsigned long opt)2910 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2911 {
2912 hci_req_add_le_scan_disable(req, false);
2913 return 0;
2914 }
2915
bredr_inquiry(struct hci_request * req,unsigned long opt)2916 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2917 {
2918 u8 length = opt;
2919 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2920 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2921 struct hci_cp_inquiry cp;
2922
2923 BT_DBG("%s", req->hdev->name);
2924
2925 hci_dev_lock(req->hdev);
2926 hci_inquiry_cache_flush(req->hdev);
2927 hci_dev_unlock(req->hdev);
2928
2929 memset(&cp, 0, sizeof(cp));
2930
2931 if (req->hdev->discovery.limited)
2932 memcpy(&cp.lap, liac, sizeof(cp.lap));
2933 else
2934 memcpy(&cp.lap, giac, sizeof(cp.lap));
2935
2936 cp.length = length;
2937
2938 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2939
2940 return 0;
2941 }
2942
le_scan_disable_work(struct work_struct * work)2943 static void le_scan_disable_work(struct work_struct *work)
2944 {
2945 struct hci_dev *hdev = container_of(work, struct hci_dev,
2946 le_scan_disable.work);
2947 u8 status;
2948
2949 BT_DBG("%s", hdev->name);
2950
2951 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2952 return;
2953
2954 cancel_delayed_work(&hdev->le_scan_restart);
2955
2956 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2957 if (status) {
2958 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2959 status);
2960 return;
2961 }
2962
2963 hdev->discovery.scan_start = 0;
2964
2965 /* If we were running LE only scan, change discovery state. If
2966 * we were running both LE and BR/EDR inquiry simultaneously,
2967 * and BR/EDR inquiry is already finished, stop discovery,
2968 * otherwise BR/EDR inquiry will stop discovery when finished.
2969 * If we will resolve remote device name, do not change
2970 * discovery state.
2971 */
2972
2973 if (hdev->discovery.type == DISCOV_TYPE_LE)
2974 goto discov_stopped;
2975
2976 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2977 return;
2978
2979 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2980 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2981 hdev->discovery.state != DISCOVERY_RESOLVING)
2982 goto discov_stopped;
2983
2984 return;
2985 }
2986
2987 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2988 HCI_CMD_TIMEOUT, &status);
2989 if (status) {
2990 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2991 goto discov_stopped;
2992 }
2993
2994 return;
2995
2996 discov_stopped:
2997 hci_dev_lock(hdev);
2998 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2999 hci_dev_unlock(hdev);
3000 }
3001
le_scan_restart(struct hci_request * req,unsigned long opt)3002 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3003 {
3004 struct hci_dev *hdev = req->hdev;
3005
3006 /* If controller is not scanning we are done. */
3007 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3008 return 0;
3009
3010 if (hdev->scanning_paused) {
3011 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3012 return 0;
3013 }
3014
3015 hci_req_add_le_scan_disable(req, false);
3016
3017 if (use_ext_scan(hdev)) {
3018 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3019
3020 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3021 ext_enable_cp.enable = LE_SCAN_ENABLE;
3022 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3023
3024 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3025 sizeof(ext_enable_cp), &ext_enable_cp);
3026 } else {
3027 struct hci_cp_le_set_scan_enable cp;
3028
3029 memset(&cp, 0, sizeof(cp));
3030 cp.enable = LE_SCAN_ENABLE;
3031 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3032 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3033 }
3034
3035 return 0;
3036 }
3037
le_scan_restart_work(struct work_struct * work)3038 static void le_scan_restart_work(struct work_struct *work)
3039 {
3040 struct hci_dev *hdev = container_of(work, struct hci_dev,
3041 le_scan_restart.work);
3042 unsigned long timeout, duration, scan_start, now;
3043 u8 status;
3044
3045 BT_DBG("%s", hdev->name);
3046
3047 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3048 if (status) {
3049 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3050 status);
3051 return;
3052 }
3053
3054 hci_dev_lock(hdev);
3055
3056 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3057 !hdev->discovery.scan_start)
3058 goto unlock;
3059
3060 /* When the scan was started, hdev->le_scan_disable has been queued
3061 * after duration from scan_start. During scan restart this job
3062 * has been canceled, and we need to queue it again after proper
3063 * timeout, to make sure that scan does not run indefinitely.
3064 */
3065 duration = hdev->discovery.scan_duration;
3066 scan_start = hdev->discovery.scan_start;
3067 now = jiffies;
3068 if (now - scan_start <= duration) {
3069 int elapsed;
3070
3071 if (now >= scan_start)
3072 elapsed = now - scan_start;
3073 else
3074 elapsed = ULONG_MAX - scan_start + now;
3075
3076 timeout = duration - elapsed;
3077 } else {
3078 timeout = 0;
3079 }
3080
3081 queue_delayed_work(hdev->req_workqueue,
3082 &hdev->le_scan_disable, timeout);
3083
3084 unlock:
3085 hci_dev_unlock(hdev);
3086 }
3087
active_scan(struct hci_request * req,unsigned long opt)3088 static int active_scan(struct hci_request *req, unsigned long opt)
3089 {
3090 uint16_t interval = opt;
3091 struct hci_dev *hdev = req->hdev;
3092 u8 own_addr_type;
3093 /* Accept list is not used for discovery */
3094 u8 filter_policy = 0x00;
3095 /* Discovery doesn't require controller address resolution */
3096 bool addr_resolv = false;
3097 int err;
3098
3099 BT_DBG("%s", hdev->name);
3100
3101 /* If controller is scanning, it means the background scanning is
3102 * running. Thus, we should temporarily stop it in order to set the
3103 * discovery scanning parameters.
3104 */
3105 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3106 hci_req_add_le_scan_disable(req, false);
3107
3108 /* All active scans will be done with either a resolvable private
3109 * address (when privacy feature has been enabled) or non-resolvable
3110 * private address.
3111 */
3112 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3113 &own_addr_type);
3114 if (err < 0)
3115 own_addr_type = ADDR_LE_DEV_PUBLIC;
3116
3117 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3118 hdev->le_scan_window_discovery, own_addr_type,
3119 filter_policy, addr_resolv);
3120 return 0;
3121 }
3122
interleaved_discov(struct hci_request * req,unsigned long opt)3123 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3124 {
3125 int err;
3126
3127 BT_DBG("%s", req->hdev->name);
3128
3129 err = active_scan(req, opt);
3130 if (err)
3131 return err;
3132
3133 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3134 }
3135
start_discovery(struct hci_dev * hdev,u8 * status)3136 static void start_discovery(struct hci_dev *hdev, u8 *status)
3137 {
3138 unsigned long timeout;
3139
3140 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3141
3142 switch (hdev->discovery.type) {
3143 case DISCOV_TYPE_BREDR:
3144 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3145 hci_req_sync(hdev, bredr_inquiry,
3146 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3147 status);
3148 return;
3149 case DISCOV_TYPE_INTERLEAVED:
3150 /* When running simultaneous discovery, the LE scanning time
3151 * should occupy the whole discovery time sine BR/EDR inquiry
3152 * and LE scanning are scheduled by the controller.
3153 *
3154 * For interleaving discovery in comparison, BR/EDR inquiry
3155 * and LE scanning are done sequentially with separate
3156 * timeouts.
3157 */
3158 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3159 &hdev->quirks)) {
3160 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3161 /* During simultaneous discovery, we double LE scan
3162 * interval. We must leave some time for the controller
3163 * to do BR/EDR inquiry.
3164 */
3165 hci_req_sync(hdev, interleaved_discov,
3166 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3167 status);
3168 break;
3169 }
3170
3171 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3172 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3173 HCI_CMD_TIMEOUT, status);
3174 break;
3175 case DISCOV_TYPE_LE:
3176 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3177 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3178 HCI_CMD_TIMEOUT, status);
3179 break;
3180 default:
3181 *status = HCI_ERROR_UNSPECIFIED;
3182 return;
3183 }
3184
3185 if (*status)
3186 return;
3187
3188 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3189
3190 /* When service discovery is used and the controller has a
3191 * strict duplicate filter, it is important to remember the
3192 * start and duration of the scan. This is required for
3193 * restarting scanning during the discovery phase.
3194 */
3195 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3196 hdev->discovery.result_filtering) {
3197 hdev->discovery.scan_start = jiffies;
3198 hdev->discovery.scan_duration = timeout;
3199 }
3200
3201 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3202 timeout);
3203 }
3204
hci_req_stop_discovery(struct hci_request * req)3205 bool hci_req_stop_discovery(struct hci_request *req)
3206 {
3207 struct hci_dev *hdev = req->hdev;
3208 struct discovery_state *d = &hdev->discovery;
3209 struct hci_cp_remote_name_req_cancel cp;
3210 struct inquiry_entry *e;
3211 bool ret = false;
3212
3213 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3214
3215 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3216 if (test_bit(HCI_INQUIRY, &hdev->flags))
3217 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3218
3219 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3220 cancel_delayed_work(&hdev->le_scan_disable);
3221 hci_req_add_le_scan_disable(req, false);
3222 }
3223
3224 ret = true;
3225 } else {
3226 /* Passive scanning */
3227 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3228 hci_req_add_le_scan_disable(req, false);
3229 ret = true;
3230 }
3231 }
3232
3233 /* No further actions needed for LE-only discovery */
3234 if (d->type == DISCOV_TYPE_LE)
3235 return ret;
3236
3237 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3238 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3239 NAME_PENDING);
3240 if (!e)
3241 return ret;
3242
3243 bacpy(&cp.bdaddr, &e->data.bdaddr);
3244 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3245 &cp);
3246 ret = true;
3247 }
3248
3249 return ret;
3250 }
3251
stop_discovery(struct hci_request * req,unsigned long opt)3252 static int stop_discovery(struct hci_request *req, unsigned long opt)
3253 {
3254 hci_dev_lock(req->hdev);
3255 hci_req_stop_discovery(req);
3256 hci_dev_unlock(req->hdev);
3257
3258 return 0;
3259 }
3260
discov_update(struct work_struct * work)3261 static void discov_update(struct work_struct *work)
3262 {
3263 struct hci_dev *hdev = container_of(work, struct hci_dev,
3264 discov_update);
3265 u8 status = 0;
3266
3267 switch (hdev->discovery.state) {
3268 case DISCOVERY_STARTING:
3269 start_discovery(hdev, &status);
3270 mgmt_start_discovery_complete(hdev, status);
3271 if (status)
3272 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3273 else
3274 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3275 break;
3276 case DISCOVERY_STOPPING:
3277 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3278 mgmt_stop_discovery_complete(hdev, status);
3279 if (!status)
3280 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3281 break;
3282 case DISCOVERY_STOPPED:
3283 default:
3284 return;
3285 }
3286 }
3287
discov_off(struct work_struct * work)3288 static void discov_off(struct work_struct *work)
3289 {
3290 struct hci_dev *hdev = container_of(work, struct hci_dev,
3291 discov_off.work);
3292
3293 BT_DBG("%s", hdev->name);
3294
3295 hci_dev_lock(hdev);
3296
3297 /* When discoverable timeout triggers, then just make sure
3298 * the limited discoverable flag is cleared. Even in the case
3299 * of a timeout triggered from general discoverable, it is
3300 * safe to unconditionally clear the flag.
3301 */
3302 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3303 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3304 hdev->discov_timeout = 0;
3305
3306 hci_dev_unlock(hdev);
3307
3308 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3309 mgmt_new_settings(hdev);
3310 }
3311
powered_update_hci(struct hci_request * req,unsigned long opt)3312 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3313 {
3314 struct hci_dev *hdev = req->hdev;
3315 u8 link_sec;
3316
3317 hci_dev_lock(hdev);
3318
3319 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3320 !lmp_host_ssp_capable(hdev)) {
3321 u8 mode = 0x01;
3322
3323 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3324
3325 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3326 u8 support = 0x01;
3327
3328 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3329 sizeof(support), &support);
3330 }
3331 }
3332
3333 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3334 lmp_bredr_capable(hdev)) {
3335 struct hci_cp_write_le_host_supported cp;
3336
3337 cp.le = 0x01;
3338 cp.simul = 0x00;
3339
3340 /* Check first if we already have the right
3341 * host state (host features set)
3342 */
3343 if (cp.le != lmp_host_le_capable(hdev) ||
3344 cp.simul != lmp_host_le_br_capable(hdev))
3345 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3346 sizeof(cp), &cp);
3347 }
3348
3349 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3350 /* Make sure the controller has a good default for
3351 * advertising data. This also applies to the case
3352 * where BR/EDR was toggled during the AUTO_OFF phase.
3353 */
3354 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3355 list_empty(&hdev->adv_instances)) {
3356 int err;
3357
3358 if (ext_adv_capable(hdev)) {
3359 err = __hci_req_setup_ext_adv_instance(req,
3360 0x00);
3361 if (!err)
3362 __hci_req_update_scan_rsp_data(req,
3363 0x00);
3364 } else {
3365 err = 0;
3366 __hci_req_update_adv_data(req, 0x00);
3367 __hci_req_update_scan_rsp_data(req, 0x00);
3368 }
3369
3370 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3371 if (!ext_adv_capable(hdev))
3372 __hci_req_enable_advertising(req);
3373 else if (!err)
3374 __hci_req_enable_ext_advertising(req,
3375 0x00);
3376 }
3377 } else if (!list_empty(&hdev->adv_instances)) {
3378 struct adv_info *adv_instance;
3379
3380 adv_instance = list_first_entry(&hdev->adv_instances,
3381 struct adv_info, list);
3382 __hci_req_schedule_adv_instance(req,
3383 adv_instance->instance,
3384 true);
3385 }
3386 }
3387
3388 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3389 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3390 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3391 sizeof(link_sec), &link_sec);
3392
3393 if (lmp_bredr_capable(hdev)) {
3394 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3395 __hci_req_write_fast_connectable(req, true);
3396 else
3397 __hci_req_write_fast_connectable(req, false);
3398 __hci_req_update_scan(req);
3399 __hci_req_update_class(req);
3400 __hci_req_update_name(req);
3401 __hci_req_update_eir(req);
3402 }
3403
3404 hci_dev_unlock(hdev);
3405 return 0;
3406 }
3407
__hci_req_hci_power_on(struct hci_dev * hdev)3408 int __hci_req_hci_power_on(struct hci_dev *hdev)
3409 {
3410 /* Register the available SMP channels (BR/EDR and LE) only when
3411 * successfully powering on the controller. This late
3412 * registration is required so that LE SMP can clearly decide if
3413 * the public address or static address is used.
3414 */
3415 smp_register(hdev);
3416
3417 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3418 NULL);
3419 }
3420
hci_request_setup(struct hci_dev * hdev)3421 void hci_request_setup(struct hci_dev *hdev)
3422 {
3423 INIT_WORK(&hdev->discov_update, discov_update);
3424 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3425 INIT_WORK(&hdev->scan_update, scan_update_work);
3426 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3427 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3428 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3429 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3430 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3431 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3432 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3433 }
3434
hci_request_cancel_all(struct hci_dev * hdev)3435 void hci_request_cancel_all(struct hci_dev *hdev)
3436 {
3437 hci_req_sync_cancel(hdev, ENODEV);
3438
3439 cancel_work_sync(&hdev->discov_update);
3440 cancel_work_sync(&hdev->bg_scan_update);
3441 cancel_work_sync(&hdev->scan_update);
3442 cancel_work_sync(&hdev->connectable_update);
3443 cancel_work_sync(&hdev->discoverable_update);
3444 cancel_delayed_work_sync(&hdev->discov_off);
3445 cancel_delayed_work_sync(&hdev->le_scan_disable);
3446 cancel_delayed_work_sync(&hdev->le_scan_restart);
3447
3448 if (hdev->adv_instance_timeout) {
3449 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3450 hdev->adv_instance_timeout = 0;
3451 }
3452
3453 cancel_interleave_scan(hdev);
3454 }
3455