1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114 }
115
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125 }
126
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129 {
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186 {
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266 }
267
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 int ret;
273
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
278 * gets removed.
279 */
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 else
283 ret = -ENETDOWN;
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287 }
288
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291 {
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 skb_put_data(skb, param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318 {
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344 }
345
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348 {
349 hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = hdev->def_page_scan_type;
371 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
372 }
373
374 acp.window = cpu_to_le16(hdev->def_page_scan_window);
375
376 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
377 __cpu_to_le16(hdev->page_scan_window) != acp.window)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
379 sizeof(acp), &acp);
380
381 if (hdev->page_scan_type != type)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
383 }
384
385 /* This function controls the background scanning based on hdev->pend_le_conns
386 * list. If there are pending LE connection we start the background scanning,
387 * otherwise we stop it.
388 *
389 * This function requires the caller holds hdev->lock.
390 */
__hci_update_background_scan(struct hci_request * req)391 static void __hci_update_background_scan(struct hci_request *req)
392 {
393 struct hci_dev *hdev = req->hdev;
394
395 if (!test_bit(HCI_UP, &hdev->flags) ||
396 test_bit(HCI_INIT, &hdev->flags) ||
397 hci_dev_test_flag(hdev, HCI_SETUP) ||
398 hci_dev_test_flag(hdev, HCI_CONFIG) ||
399 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
400 hci_dev_test_flag(hdev, HCI_UNREGISTER))
401 return;
402
403 /* No point in doing scanning if LE support hasn't been enabled */
404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
405 return;
406
407 /* If discovery is active don't interfere with it */
408 if (hdev->discovery.state != DISCOVERY_STOPPED)
409 return;
410
411 /* Reset RSSI and UUID filters when starting background scanning
412 * since these filters are meant for service discovery only.
413 *
414 * The Start Discovery and Start Service Discovery operations
415 * ensure to set proper values for RSSI threshold and UUID
416 * filter list. So it is safe to just reset them here.
417 */
418 hci_discovery_filter_clear(hdev);
419
420 BT_DBG("%s ADV monitoring is %s", hdev->name,
421 hci_is_adv_monitoring(hdev) ? "on" : "off");
422
423 if (list_empty(&hdev->pend_le_conns) &&
424 list_empty(&hdev->pend_le_reports) &&
425 !hci_is_adv_monitoring(hdev)) {
426 /* If there is no pending LE connections or devices
427 * to be scanned for or no ADV monitors, we should stop the
428 * background scanning.
429 */
430
431 /* If controller is not scanning we are done. */
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 return;
434
435 hci_req_add_le_scan_disable(req, false);
436
437 BT_DBG("%s stopping background scanning", hdev->name);
438 } else {
439 /* If there is at least one pending LE connection, we should
440 * keep the background scan running.
441 */
442
443 /* If controller is connecting, we should not start scanning
444 * since some controllers are not able to scan and connect at
445 * the same time.
446 */
447 if (hci_lookup_le_connect(hdev))
448 return;
449
450 /* If controller is currently scanning, we stop it to ensure we
451 * don't miss any advertising (due to duplicates filter).
452 */
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 hci_req_add_le_scan_disable(req, false);
455
456 hci_req_add_le_passive_scan(req);
457
458 BT_DBG("%s starting background scanning", hdev->name);
459 }
460 }
461
__hci_req_update_name(struct hci_request * req)462 void __hci_req_update_name(struct hci_request *req)
463 {
464 struct hci_dev *hdev = req->hdev;
465 struct hci_cp_write_local_name cp;
466
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID 0x1200
473
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
478
479 if (len < 4)
480 return ptr;
481
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 u16 uuid16;
484
485 if (uuid->size != 16)
486 continue;
487
488 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 < 0x1100)
490 continue;
491
492 if (uuid16 == PNP_INFO_SVCLASS_ID)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID16_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u16) > len) {
504 uuids_start[1] = EIR_UUID16_SOME;
505 break;
506 }
507
508 *ptr++ = (uuid16 & 0x00ff);
509 *ptr++ = (uuid16 & 0xff00) >> 8;
510 uuids_start[0] += sizeof(uuid16);
511 }
512
513 return ptr;
514 }
515
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 6)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 32)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID32_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + sizeof(u32) > len) {
537 uuids_start[1] = EIR_UUID32_SOME;
538 break;
539 }
540
541 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 ptr += sizeof(u32);
543 uuids_start[0] += sizeof(u32);
544 }
545
546 return ptr;
547 }
548
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551 u8 *ptr = data, *uuids_start = NULL;
552 struct bt_uuid *uuid;
553
554 if (len < 18)
555 return ptr;
556
557 list_for_each_entry(uuid, &hdev->uuids, list) {
558 if (uuid->size != 128)
559 continue;
560
561 if (!uuids_start) {
562 uuids_start = ptr;
563 uuids_start[0] = 1;
564 uuids_start[1] = EIR_UUID128_ALL;
565 ptr += 2;
566 }
567
568 /* Stop if not enough space to put next UUID */
569 if ((ptr - data) + 16 > len) {
570 uuids_start[1] = EIR_UUID128_SOME;
571 break;
572 }
573
574 memcpy(ptr, uuid->uuid, 16);
575 ptr += 16;
576 uuids_start[0] += 16;
577 }
578
579 return ptr;
580 }
581
create_eir(struct hci_dev * hdev,u8 * data)582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584 u8 *ptr = data;
585 size_t name_len;
586
587 name_len = strlen(hdev->dev_name);
588
589 if (name_len > 0) {
590 /* EIR Data type */
591 if (name_len > 48) {
592 name_len = 48;
593 ptr[1] = EIR_NAME_SHORT;
594 } else
595 ptr[1] = EIR_NAME_COMPLETE;
596
597 /* EIR Data length */
598 ptr[0] = name_len + 1;
599
600 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602 ptr += (name_len + 2);
603 }
604
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 ptr[0] = 2;
607 ptr[1] = EIR_TX_POWER;
608 ptr[2] = (u8) hdev->inq_tx_power;
609
610 ptr += 3;
611 }
612
613 if (hdev->devid_source > 0) {
614 ptr[0] = 9;
615 ptr[1] = EIR_DEVICE_ID;
616
617 put_unaligned_le16(hdev->devid_source, ptr + 2);
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 put_unaligned_le16(hdev->devid_product, ptr + 6);
620 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622 ptr += 10;
623 }
624
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
__hci_req_update_eir(struct hci_request * req)630 void __hci_req_update_eir(struct hci_request *req)
631 {
632 struct hci_dev *hdev = req->hdev;
633 struct hci_cp_write_eir cp;
634
635 if (!hdev_is_powered(hdev))
636 return;
637
638 if (!lmp_ext_inq_capable(hdev))
639 return;
640
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 return;
643
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 return;
646
647 memset(&cp, 0, sizeof(cp));
648
649 create_eir(hdev, cp.data);
650
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 return;
653
654 memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)659 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
660 {
661 struct hci_dev *hdev = req->hdev;
662
663 if (hdev->scanning_paused) {
664 bt_dev_dbg(hdev, "Scanning is paused for suspend");
665 return;
666 }
667
668 if (use_ext_scan(hdev)) {
669 struct hci_cp_le_set_ext_scan_enable cp;
670
671 memset(&cp, 0, sizeof(cp));
672 cp.enable = LE_SCAN_DISABLE;
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
674 &cp);
675 } else {
676 struct hci_cp_le_set_scan_enable cp;
677
678 memset(&cp, 0, sizeof(cp));
679 cp.enable = LE_SCAN_DISABLE;
680 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
681 }
682
683 /* Disable address resolution */
684 if (use_ll_privacy(hdev) &&
685 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
686 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
687 __u8 enable = 0x00;
688
689 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
690 }
691 }
692
del_from_white_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)693 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
694 u8 bdaddr_type)
695 {
696 struct hci_cp_le_del_from_white_list cp;
697
698 cp.bdaddr_type = bdaddr_type;
699 bacpy(&cp.bdaddr, bdaddr);
700
701 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
702 cp.bdaddr_type);
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
704
705 if (use_ll_privacy(req->hdev) &&
706 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
707 struct smp_irk *irk;
708
709 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
710 if (irk) {
711 struct hci_cp_le_del_from_resolv_list cp;
712
713 cp.bdaddr_type = bdaddr_type;
714 bacpy(&cp.bdaddr, bdaddr);
715
716 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
717 sizeof(cp), &cp);
718 }
719 }
720 }
721
722 /* Adds connection to white list if needed. On error, returns -1. */
add_to_white_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)723 static int add_to_white_list(struct hci_request *req,
724 struct hci_conn_params *params, u8 *num_entries,
725 bool allow_rpa)
726 {
727 struct hci_cp_le_add_to_white_list cp;
728 struct hci_dev *hdev = req->hdev;
729
730 /* Already in white list */
731 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
732 params->addr_type))
733 return 0;
734
735 /* Select filter policy to accept all advertising */
736 if (*num_entries >= hdev->le_white_list_size)
737 return -1;
738
739 /* White list can not be used with RPAs */
740 if (!allow_rpa &&
741 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
742 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
743 return -1;
744 }
745
746 /* During suspend, only wakeable devices can be in whitelist */
747 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
748 params->current_flags))
749 return 0;
750
751 *num_entries += 1;
752 cp.bdaddr_type = params->addr_type;
753 bacpy(&cp.bdaddr, ¶ms->addr);
754
755 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
756 cp.bdaddr_type);
757 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
758
759 if (use_ll_privacy(hdev) &&
760 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
761 struct smp_irk *irk;
762
763 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
764 params->addr_type);
765 if (irk) {
766 struct hci_cp_le_add_to_resolv_list cp;
767
768 cp.bdaddr_type = params->addr_type;
769 bacpy(&cp.bdaddr, ¶ms->addr);
770 memcpy(cp.peer_irk, irk->val, 16);
771
772 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
773 memcpy(cp.local_irk, hdev->irk, 16);
774 else
775 memset(cp.local_irk, 0, 16);
776
777 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
778 sizeof(cp), &cp);
779 }
780 }
781
782 return 0;
783 }
784
update_white_list(struct hci_request * req)785 static u8 update_white_list(struct hci_request *req)
786 {
787 struct hci_dev *hdev = req->hdev;
788 struct hci_conn_params *params;
789 struct bdaddr_list *b;
790 u8 num_entries = 0;
791 bool pend_conn, pend_report;
792 /* We allow whitelisting even with RPAs in suspend. In the worst case,
793 * we won't be able to wake from devices that use the privacy1.2
794 * features. Additionally, once we support privacy1.2 and IRK
795 * offloading, we can update this to also check for those conditions.
796 */
797 bool allow_rpa = hdev->suspended;
798
799 /* Go through the current white list programmed into the
800 * controller one by one and check if that address is still
801 * in the list of pending connections or list of devices to
802 * report. If not present in either list, then queue the
803 * command to remove it from the controller.
804 */
805 list_for_each_entry(b, &hdev->le_white_list, list) {
806 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
807 &b->bdaddr,
808 b->bdaddr_type);
809 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
810 &b->bdaddr,
811 b->bdaddr_type);
812
813 /* If the device is not likely to connect or report,
814 * remove it from the whitelist.
815 */
816 if (!pend_conn && !pend_report) {
817 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
818 continue;
819 }
820
821 /* White list can not be used with RPAs */
822 if (!allow_rpa &&
823 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
824 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
825 return 0x00;
826 }
827
828 num_entries++;
829 }
830
831 /* Since all no longer valid white list entries have been
832 * removed, walk through the list of pending connections
833 * and ensure that any new device gets programmed into
834 * the controller.
835 *
836 * If the list of the devices is larger than the list of
837 * available white list entries in the controller, then
838 * just abort and return filer policy value to not use the
839 * white list.
840 */
841 list_for_each_entry(params, &hdev->pend_le_conns, action) {
842 if (add_to_white_list(req, params, &num_entries, allow_rpa))
843 return 0x00;
844 }
845
846 /* After adding all new pending connections, walk through
847 * the list of pending reports and also add these to the
848 * white list if there is still space. Abort if space runs out.
849 */
850 list_for_each_entry(params, &hdev->pend_le_reports, action) {
851 if (add_to_white_list(req, params, &num_entries, allow_rpa))
852 return 0x00;
853 }
854
855 /* Once the controller offloading of advertisement monitor is in place,
856 * the if condition should include the support of MSFT extension
857 * support. If suspend is ongoing, whitelist should be the default to
858 * prevent waking by random advertisements.
859 */
860 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
861 return 0x00;
862
863 /* Select filter policy to use white list */
864 return 0x01;
865 }
866
scan_use_rpa(struct hci_dev * hdev)867 static bool scan_use_rpa(struct hci_dev *hdev)
868 {
869 return hci_dev_test_flag(hdev, HCI_PRIVACY);
870 }
871
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)872 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
873 u16 window, u8 own_addr_type, u8 filter_policy,
874 bool addr_resolv)
875 {
876 struct hci_dev *hdev = req->hdev;
877
878 if (hdev->scanning_paused) {
879 bt_dev_dbg(hdev, "Scanning is paused for suspend");
880 return;
881 }
882
883 if (use_ll_privacy(hdev) &&
884 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
885 addr_resolv) {
886 u8 enable = 0x01;
887
888 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
889 }
890
891 /* Use ext scanning if set ext scan param and ext scan enable is
892 * supported
893 */
894 if (use_ext_scan(hdev)) {
895 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
896 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
897 struct hci_cp_le_scan_phy_params *phy_params;
898 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
899 u32 plen;
900
901 ext_param_cp = (void *)data;
902 phy_params = (void *)ext_param_cp->data;
903
904 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
905 ext_param_cp->own_addr_type = own_addr_type;
906 ext_param_cp->filter_policy = filter_policy;
907
908 plen = sizeof(*ext_param_cp);
909
910 if (scan_1m(hdev) || scan_2m(hdev)) {
911 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
912
913 memset(phy_params, 0, sizeof(*phy_params));
914 phy_params->type = type;
915 phy_params->interval = cpu_to_le16(interval);
916 phy_params->window = cpu_to_le16(window);
917
918 plen += sizeof(*phy_params);
919 phy_params++;
920 }
921
922 if (scan_coded(hdev)) {
923 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
924
925 memset(phy_params, 0, sizeof(*phy_params));
926 phy_params->type = type;
927 phy_params->interval = cpu_to_le16(interval);
928 phy_params->window = cpu_to_le16(window);
929
930 plen += sizeof(*phy_params);
931 phy_params++;
932 }
933
934 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
935 plen, ext_param_cp);
936
937 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
938 ext_enable_cp.enable = LE_SCAN_ENABLE;
939 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
940
941 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
942 sizeof(ext_enable_cp), &ext_enable_cp);
943 } else {
944 struct hci_cp_le_set_scan_param param_cp;
945 struct hci_cp_le_set_scan_enable enable_cp;
946
947 memset(¶m_cp, 0, sizeof(param_cp));
948 param_cp.type = type;
949 param_cp.interval = cpu_to_le16(interval);
950 param_cp.window = cpu_to_le16(window);
951 param_cp.own_address_type = own_addr_type;
952 param_cp.filter_policy = filter_policy;
953 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
954 ¶m_cp);
955
956 memset(&enable_cp, 0, sizeof(enable_cp));
957 enable_cp.enable = LE_SCAN_ENABLE;
958 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
959 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
960 &enable_cp);
961 }
962 }
963
964 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)965 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
966 {
967 struct hci_conn_hash *h = &hdev->conn_hash;
968 struct hci_conn *c;
969
970 rcu_read_lock();
971
972 list_for_each_entry_rcu(c, &h->list, list) {
973 if (c->type == LE_LINK && c->state == BT_CONNECT &&
974 test_bit(HCI_CONN_SCANNING, &c->flags)) {
975 rcu_read_unlock();
976 return true;
977 }
978 }
979
980 rcu_read_unlock();
981
982 return false;
983 }
984
985 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
986 * controller based address resolution to be able to reconfigure
987 * resolving list.
988 */
hci_req_add_le_passive_scan(struct hci_request * req)989 void hci_req_add_le_passive_scan(struct hci_request *req)
990 {
991 struct hci_dev *hdev = req->hdev;
992 u8 own_addr_type;
993 u8 filter_policy;
994 u16 window, interval;
995 /* Background scanning should run with address resolution */
996 bool addr_resolv = true;
997
998 if (hdev->scanning_paused) {
999 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1000 return;
1001 }
1002
1003 /* Set require_privacy to false since no SCAN_REQ are send
1004 * during passive scanning. Not using an non-resolvable address
1005 * here is important so that peer devices using direct
1006 * advertising with our address will be correctly reported
1007 * by the controller.
1008 */
1009 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1010 &own_addr_type))
1011 return;
1012
1013 /* Adding or removing entries from the white list must
1014 * happen before enabling scanning. The controller does
1015 * not allow white list modification while scanning.
1016 */
1017 filter_policy = update_white_list(req);
1018
1019 /* When the controller is using random resolvable addresses and
1020 * with that having LE privacy enabled, then controllers with
1021 * Extended Scanner Filter Policies support can now enable support
1022 * for handling directed advertising.
1023 *
1024 * So instead of using filter polices 0x00 (no whitelist)
1025 * and 0x01 (whitelist enabled) use the new filter policies
1026 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1027 */
1028 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1029 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1030 filter_policy |= 0x02;
1031
1032 if (hdev->suspended) {
1033 window = hdev->le_scan_window_suspend;
1034 interval = hdev->le_scan_int_suspend;
1035 } else if (hci_is_le_conn_scanning(hdev)) {
1036 window = hdev->le_scan_window_connect;
1037 interval = hdev->le_scan_int_connect;
1038 } else if (hci_is_adv_monitoring(hdev)) {
1039 window = hdev->le_scan_window_adv_monitor;
1040 interval = hdev->le_scan_int_adv_monitor;
1041 } else {
1042 window = hdev->le_scan_window;
1043 interval = hdev->le_scan_interval;
1044 }
1045
1046 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1047 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1048 own_addr_type, filter_policy, addr_resolv);
1049 }
1050
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1051 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1052 {
1053 struct adv_info *adv_instance;
1054
1055 /* Instance 0x00 always set local name */
1056 if (instance == 0x00)
1057 return 1;
1058
1059 adv_instance = hci_find_adv_instance(hdev, instance);
1060 if (!adv_instance)
1061 return 0;
1062
1063 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1064 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1065 return 1;
1066
1067 return adv_instance->scan_rsp_len;
1068 }
1069
hci_req_clear_event_filter(struct hci_request * req)1070 static void hci_req_clear_event_filter(struct hci_request *req)
1071 {
1072 struct hci_cp_set_event_filter f;
1073
1074 memset(&f, 0, sizeof(f));
1075 f.flt_type = HCI_FLT_CLEAR_ALL;
1076 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1077
1078 /* Update page scan state (since we may have modified it when setting
1079 * the event filter).
1080 */
1081 __hci_req_update_scan(req);
1082 }
1083
hci_req_set_event_filter(struct hci_request * req)1084 static void hci_req_set_event_filter(struct hci_request *req)
1085 {
1086 struct bdaddr_list_with_flags *b;
1087 struct hci_cp_set_event_filter f;
1088 struct hci_dev *hdev = req->hdev;
1089 u8 scan = SCAN_DISABLED;
1090
1091 /* Always clear event filter when starting */
1092 hci_req_clear_event_filter(req);
1093
1094 list_for_each_entry(b, &hdev->whitelist, list) {
1095 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1096 b->current_flags))
1097 continue;
1098
1099 memset(&f, 0, sizeof(f));
1100 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1101 f.flt_type = HCI_FLT_CONN_SETUP;
1102 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1103 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1104
1105 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1106 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1107 scan = SCAN_PAGE;
1108 }
1109
1110 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1111 }
1112
hci_req_config_le_suspend_scan(struct hci_request * req)1113 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1114 {
1115 /* Before changing params disable scan if enabled */
1116 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1117 hci_req_add_le_scan_disable(req, false);
1118
1119 /* Configure params and enable scanning */
1120 hci_req_add_le_passive_scan(req);
1121
1122 /* Block suspend notifier on response */
1123 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1124 }
1125
cancel_adv_timeout(struct hci_dev * hdev)1126 static void cancel_adv_timeout(struct hci_dev *hdev)
1127 {
1128 if (hdev->adv_instance_timeout) {
1129 hdev->adv_instance_timeout = 0;
1130 cancel_delayed_work(&hdev->adv_instance_expire);
1131 }
1132 }
1133
1134 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1135 static void hci_suspend_adv_instances(struct hci_request *req)
1136 {
1137 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1138
1139 /* Call to disable any advertisements active on the controller.
1140 * This will succeed even if no advertisements are configured.
1141 */
1142 __hci_req_disable_advertising(req);
1143
1144 /* If we are using software rotation, pause the loop */
1145 if (!ext_adv_capable(req->hdev))
1146 cancel_adv_timeout(req->hdev);
1147 }
1148
1149 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1150 static void hci_resume_adv_instances(struct hci_request *req)
1151 {
1152 struct adv_info *adv;
1153
1154 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1155
1156 if (ext_adv_capable(req->hdev)) {
1157 /* Call for each tracked instance to be re-enabled */
1158 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1159 __hci_req_enable_ext_advertising(req,
1160 adv->instance);
1161 }
1162
1163 } else {
1164 /* Schedule for most recent instance to be restarted and begin
1165 * the software rotation loop
1166 */
1167 __hci_req_schedule_adv_instance(req,
1168 req->hdev->cur_adv_instance,
1169 true);
1170 }
1171 }
1172
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1173 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1174 {
1175 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1176 status);
1177 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1178 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1179 wake_up(&hdev->suspend_wait_q);
1180 }
1181 }
1182
1183 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1184 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1185 {
1186 int old_state;
1187 struct hci_conn *conn;
1188 struct hci_request req;
1189 u8 page_scan;
1190 int disconnect_counter;
1191
1192 if (next == hdev->suspend_state) {
1193 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1194 goto done;
1195 }
1196
1197 hdev->suspend_state = next;
1198 hci_req_init(&req, hdev);
1199
1200 if (next == BT_SUSPEND_DISCONNECT) {
1201 /* Mark device as suspended */
1202 hdev->suspended = true;
1203
1204 /* Pause discovery if not already stopped */
1205 old_state = hdev->discovery.state;
1206 if (old_state != DISCOVERY_STOPPED) {
1207 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1208 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1209 queue_work(hdev->req_workqueue, &hdev->discov_update);
1210 }
1211
1212 hdev->discovery_paused = true;
1213 hdev->discovery_old_state = old_state;
1214
1215 /* Stop directed advertising */
1216 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1217 if (old_state) {
1218 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1219 cancel_delayed_work(&hdev->discov_off);
1220 queue_delayed_work(hdev->req_workqueue,
1221 &hdev->discov_off, 0);
1222 }
1223
1224 /* Pause other advertisements */
1225 if (hdev->adv_instance_cnt)
1226 hci_suspend_adv_instances(&req);
1227
1228 hdev->advertising_paused = true;
1229 hdev->advertising_old_state = old_state;
1230 /* Disable page scan */
1231 page_scan = SCAN_DISABLED;
1232 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1233
1234 /* Disable LE passive scan if enabled */
1235 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1236 hci_req_add_le_scan_disable(&req, false);
1237
1238 /* Mark task needing completion */
1239 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1240
1241 /* Prevent disconnects from causing scanning to be re-enabled */
1242 hdev->scanning_paused = true;
1243
1244 /* Run commands before disconnecting */
1245 hci_req_run(&req, suspend_req_complete);
1246
1247 disconnect_counter = 0;
1248 /* Soft disconnect everything (power off) */
1249 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1250 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1251 disconnect_counter++;
1252 }
1253
1254 if (disconnect_counter > 0) {
1255 bt_dev_dbg(hdev,
1256 "Had %d disconnects. Will wait on them",
1257 disconnect_counter);
1258 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1259 }
1260 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1261 /* Unpause to take care of updating scanning params */
1262 hdev->scanning_paused = false;
1263 /* Enable event filter for paired devices */
1264 hci_req_set_event_filter(&req);
1265 /* Enable passive scan at lower duty cycle */
1266 hci_req_config_le_suspend_scan(&req);
1267 /* Pause scan changes again. */
1268 hdev->scanning_paused = true;
1269 hci_req_run(&req, suspend_req_complete);
1270 } else {
1271 hdev->suspended = false;
1272 hdev->scanning_paused = false;
1273
1274 hci_req_clear_event_filter(&req);
1275 /* Reset passive/background scanning to normal */
1276 hci_req_config_le_suspend_scan(&req);
1277
1278 /* Unpause directed advertising */
1279 hdev->advertising_paused = false;
1280 if (hdev->advertising_old_state) {
1281 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1282 hdev->suspend_tasks);
1283 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1284 queue_work(hdev->req_workqueue,
1285 &hdev->discoverable_update);
1286 hdev->advertising_old_state = 0;
1287 }
1288
1289 /* Resume other advertisements */
1290 if (hdev->adv_instance_cnt)
1291 hci_resume_adv_instances(&req);
1292
1293 /* Unpause discovery */
1294 hdev->discovery_paused = false;
1295 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1296 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1297 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1298 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1299 queue_work(hdev->req_workqueue, &hdev->discov_update);
1300 }
1301
1302 hci_req_run(&req, suspend_req_complete);
1303 }
1304
1305 hdev->suspend_state = next;
1306
1307 done:
1308 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1309 wake_up(&hdev->suspend_wait_q);
1310 }
1311
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1312 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1313 {
1314 u8 instance = hdev->cur_adv_instance;
1315 struct adv_info *adv_instance;
1316
1317 /* Instance 0x00 always set local name */
1318 if (instance == 0x00)
1319 return 1;
1320
1321 adv_instance = hci_find_adv_instance(hdev, instance);
1322 if (!adv_instance)
1323 return 0;
1324
1325 /* TODO: Take into account the "appearance" and "local-name" flags here.
1326 * These are currently being ignored as they are not supported.
1327 */
1328 return adv_instance->scan_rsp_len;
1329 }
1330
__hci_req_disable_advertising(struct hci_request * req)1331 void __hci_req_disable_advertising(struct hci_request *req)
1332 {
1333 if (ext_adv_capable(req->hdev)) {
1334 __hci_req_disable_ext_adv_instance(req, 0x00);
1335
1336 } else {
1337 u8 enable = 0x00;
1338
1339 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1340 }
1341 }
1342
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1343 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1344 {
1345 u32 flags;
1346 struct adv_info *adv_instance;
1347
1348 if (instance == 0x00) {
1349 /* Instance 0 always manages the "Tx Power" and "Flags"
1350 * fields
1351 */
1352 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1353
1354 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1355 * corresponds to the "connectable" instance flag.
1356 */
1357 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1358 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1359
1360 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1361 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1362 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1363 flags |= MGMT_ADV_FLAG_DISCOV;
1364
1365 return flags;
1366 }
1367
1368 adv_instance = hci_find_adv_instance(hdev, instance);
1369
1370 /* Return 0 when we got an invalid instance identifier. */
1371 if (!adv_instance)
1372 return 0;
1373
1374 return adv_instance->flags;
1375 }
1376
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1377 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1378 {
1379 /* If privacy is not enabled don't use RPA */
1380 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1381 return false;
1382
1383 /* If basic privacy mode is enabled use RPA */
1384 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1385 return true;
1386
1387 /* If limited privacy mode is enabled don't use RPA if we're
1388 * both discoverable and bondable.
1389 */
1390 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1391 hci_dev_test_flag(hdev, HCI_BONDABLE))
1392 return false;
1393
1394 /* We're neither bondable nor discoverable in the limited
1395 * privacy mode, therefore use RPA.
1396 */
1397 return true;
1398 }
1399
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1400 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1401 {
1402 /* If there is no connection we are OK to advertise. */
1403 if (hci_conn_num(hdev, LE_LINK) == 0)
1404 return true;
1405
1406 /* Check le_states if there is any connection in slave role. */
1407 if (hdev->conn_hash.le_num_slave > 0) {
1408 /* Slave connection state and non connectable mode bit 20. */
1409 if (!connectable && !(hdev->le_states[2] & 0x10))
1410 return false;
1411
1412 /* Slave connection state and connectable mode bit 38
1413 * and scannable bit 21.
1414 */
1415 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1416 !(hdev->le_states[2] & 0x20)))
1417 return false;
1418 }
1419
1420 /* Check le_states if there is any connection in master role. */
1421 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1422 /* Master connection state and non connectable mode bit 18. */
1423 if (!connectable && !(hdev->le_states[2] & 0x02))
1424 return false;
1425
1426 /* Master connection state and connectable mode bit 35 and
1427 * scannable 19.
1428 */
1429 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1430 !(hdev->le_states[2] & 0x08)))
1431 return false;
1432 }
1433
1434 return true;
1435 }
1436
__hci_req_enable_advertising(struct hci_request * req)1437 void __hci_req_enable_advertising(struct hci_request *req)
1438 {
1439 struct hci_dev *hdev = req->hdev;
1440 struct hci_cp_le_set_adv_param cp;
1441 u8 own_addr_type, enable = 0x01;
1442 bool connectable;
1443 u16 adv_min_interval, adv_max_interval;
1444 u32 flags;
1445
1446 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1447
1448 /* If the "connectable" instance flag was not set, then choose between
1449 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1450 */
1451 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1452 mgmt_get_connectable(hdev);
1453
1454 if (!is_advertising_allowed(hdev, connectable))
1455 return;
1456
1457 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1458 __hci_req_disable_advertising(req);
1459
1460 /* Clear the HCI_LE_ADV bit temporarily so that the
1461 * hci_update_random_address knows that it's safe to go ahead
1462 * and write a new random address. The flag will be set back on
1463 * as soon as the SET_ADV_ENABLE HCI command completes.
1464 */
1465 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1466
1467 /* Set require_privacy to true only when non-connectable
1468 * advertising is used. In that case it is fine to use a
1469 * non-resolvable private address.
1470 */
1471 if (hci_update_random_address(req, !connectable,
1472 adv_use_rpa(hdev, flags),
1473 &own_addr_type) < 0)
1474 return;
1475
1476 memset(&cp, 0, sizeof(cp));
1477
1478 if (connectable) {
1479 cp.type = LE_ADV_IND;
1480
1481 adv_min_interval = hdev->le_adv_min_interval;
1482 adv_max_interval = hdev->le_adv_max_interval;
1483 } else {
1484 if (get_cur_adv_instance_scan_rsp_len(hdev))
1485 cp.type = LE_ADV_SCAN_IND;
1486 else
1487 cp.type = LE_ADV_NONCONN_IND;
1488
1489 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1490 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1491 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1492 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1493 } else {
1494 adv_min_interval = hdev->le_adv_min_interval;
1495 adv_max_interval = hdev->le_adv_max_interval;
1496 }
1497 }
1498
1499 cp.min_interval = cpu_to_le16(adv_min_interval);
1500 cp.max_interval = cpu_to_le16(adv_max_interval);
1501 cp.own_address_type = own_addr_type;
1502 cp.channel_map = hdev->le_adv_channel_map;
1503
1504 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1505
1506 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1507 }
1508
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1509 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1510 {
1511 size_t short_len;
1512 size_t complete_len;
1513
1514 /* no space left for name (+ NULL + type + len) */
1515 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1516 return ad_len;
1517
1518 /* use complete name if present and fits */
1519 complete_len = strlen(hdev->dev_name);
1520 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1521 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1522 hdev->dev_name, complete_len + 1);
1523
1524 /* use short name if present */
1525 short_len = strlen(hdev->short_name);
1526 if (short_len)
1527 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1528 hdev->short_name, short_len + 1);
1529
1530 /* use shortened full name if present, we already know that name
1531 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1532 */
1533 if (complete_len) {
1534 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1535
1536 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1537 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1538
1539 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1540 sizeof(name));
1541 }
1542
1543 return ad_len;
1544 }
1545
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1546 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1547 {
1548 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1549 }
1550
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1551 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1552 {
1553 u8 scan_rsp_len = 0;
1554
1555 if (hdev->appearance) {
1556 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1557 }
1558
1559 return append_local_name(hdev, ptr, scan_rsp_len);
1560 }
1561
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1562 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1563 u8 *ptr)
1564 {
1565 struct adv_info *adv_instance;
1566 u32 instance_flags;
1567 u8 scan_rsp_len = 0;
1568
1569 adv_instance = hci_find_adv_instance(hdev, instance);
1570 if (!adv_instance)
1571 return 0;
1572
1573 instance_flags = adv_instance->flags;
1574
1575 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1576 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1577 }
1578
1579 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1580 adv_instance->scan_rsp_len);
1581
1582 scan_rsp_len += adv_instance->scan_rsp_len;
1583
1584 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1585 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1586
1587 return scan_rsp_len;
1588 }
1589
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1590 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1591 {
1592 struct hci_dev *hdev = req->hdev;
1593 u8 len;
1594
1595 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1596 return;
1597
1598 if (ext_adv_capable(hdev)) {
1599 struct {
1600 struct hci_cp_le_set_ext_scan_rsp_data cp;
1601 u8 data[HCI_MAX_EXT_AD_LENGTH];
1602 } pdu;
1603
1604 memset(&pdu, 0, sizeof(pdu));
1605
1606 if (instance)
1607 len = create_instance_scan_rsp_data(hdev, instance,
1608 pdu.data);
1609 else
1610 len = create_default_scan_rsp_data(hdev, pdu.data);
1611
1612 if (hdev->scan_rsp_data_len == len &&
1613 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1614 return;
1615
1616 memcpy(hdev->scan_rsp_data, pdu.data, len);
1617 hdev->scan_rsp_data_len = len;
1618
1619 pdu.cp.handle = instance;
1620 pdu.cp.length = len;
1621 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1622 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1623
1624 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1625 sizeof(pdu.cp) + len, &pdu.cp);
1626 } else {
1627 struct hci_cp_le_set_scan_rsp_data cp;
1628
1629 memset(&cp, 0, sizeof(cp));
1630
1631 if (instance)
1632 len = create_instance_scan_rsp_data(hdev, instance,
1633 cp.data);
1634 else
1635 len = create_default_scan_rsp_data(hdev, cp.data);
1636
1637 if (hdev->scan_rsp_data_len == len &&
1638 !memcmp(cp.data, hdev->scan_rsp_data, len))
1639 return;
1640
1641 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1642 hdev->scan_rsp_data_len = len;
1643
1644 cp.length = len;
1645
1646 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1647 }
1648 }
1649
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1650 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1651 {
1652 struct adv_info *adv_instance = NULL;
1653 u8 ad_len = 0, flags = 0;
1654 u32 instance_flags;
1655
1656 /* Return 0 when the current instance identifier is invalid. */
1657 if (instance) {
1658 adv_instance = hci_find_adv_instance(hdev, instance);
1659 if (!adv_instance)
1660 return 0;
1661 }
1662
1663 instance_flags = get_adv_instance_flags(hdev, instance);
1664
1665 /* If instance already has the flags set skip adding it once
1666 * again.
1667 */
1668 if (adv_instance && eir_get_data(adv_instance->adv_data,
1669 adv_instance->adv_data_len, EIR_FLAGS,
1670 NULL))
1671 goto skip_flags;
1672
1673 /* The Add Advertising command allows userspace to set both the general
1674 * and limited discoverable flags.
1675 */
1676 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1677 flags |= LE_AD_GENERAL;
1678
1679 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1680 flags |= LE_AD_LIMITED;
1681
1682 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1683 flags |= LE_AD_NO_BREDR;
1684
1685 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1686 /* If a discovery flag wasn't provided, simply use the global
1687 * settings.
1688 */
1689 if (!flags)
1690 flags |= mgmt_get_adv_discov_flags(hdev);
1691
1692 /* If flags would still be empty, then there is no need to
1693 * include the "Flags" AD field".
1694 */
1695 if (flags) {
1696 ptr[0] = 0x02;
1697 ptr[1] = EIR_FLAGS;
1698 ptr[2] = flags;
1699
1700 ad_len += 3;
1701 ptr += 3;
1702 }
1703 }
1704
1705 skip_flags:
1706 if (adv_instance) {
1707 memcpy(ptr, adv_instance->adv_data,
1708 adv_instance->adv_data_len);
1709 ad_len += adv_instance->adv_data_len;
1710 ptr += adv_instance->adv_data_len;
1711 }
1712
1713 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1714 s8 adv_tx_power;
1715
1716 if (ext_adv_capable(hdev)) {
1717 if (adv_instance)
1718 adv_tx_power = adv_instance->tx_power;
1719 else
1720 adv_tx_power = hdev->adv_tx_power;
1721 } else {
1722 adv_tx_power = hdev->adv_tx_power;
1723 }
1724
1725 /* Provide Tx Power only if we can provide a valid value for it */
1726 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1727 ptr[0] = 0x02;
1728 ptr[1] = EIR_TX_POWER;
1729 ptr[2] = (u8)adv_tx_power;
1730
1731 ad_len += 3;
1732 ptr += 3;
1733 }
1734 }
1735
1736 return ad_len;
1737 }
1738
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1739 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1740 {
1741 struct hci_dev *hdev = req->hdev;
1742 u8 len;
1743
1744 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1745 return;
1746
1747 if (ext_adv_capable(hdev)) {
1748 struct {
1749 struct hci_cp_le_set_ext_adv_data cp;
1750 u8 data[HCI_MAX_EXT_AD_LENGTH];
1751 } pdu;
1752
1753 memset(&pdu, 0, sizeof(pdu));
1754
1755 len = create_instance_adv_data(hdev, instance, pdu.data);
1756
1757 /* There's nothing to do if the data hasn't changed */
1758 if (hdev->adv_data_len == len &&
1759 memcmp(pdu.data, hdev->adv_data, len) == 0)
1760 return;
1761
1762 memcpy(hdev->adv_data, pdu.data, len);
1763 hdev->adv_data_len = len;
1764
1765 pdu.cp.length = len;
1766 pdu.cp.handle = instance;
1767 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1768 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1769
1770 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1771 sizeof(pdu.cp) + len, &pdu.cp);
1772 } else {
1773 struct hci_cp_le_set_adv_data cp;
1774
1775 memset(&cp, 0, sizeof(cp));
1776
1777 len = create_instance_adv_data(hdev, instance, cp.data);
1778
1779 /* There's nothing to do if the data hasn't changed */
1780 if (hdev->adv_data_len == len &&
1781 memcmp(cp.data, hdev->adv_data, len) == 0)
1782 return;
1783
1784 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1785 hdev->adv_data_len = len;
1786
1787 cp.length = len;
1788
1789 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1790 }
1791 }
1792
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1793 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1794 {
1795 struct hci_request req;
1796
1797 hci_req_init(&req, hdev);
1798 __hci_req_update_adv_data(&req, instance);
1799
1800 return hci_req_run(&req, NULL);
1801 }
1802
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1803 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1804 u16 opcode)
1805 {
1806 BT_DBG("%s status %u", hdev->name, status);
1807 }
1808
hci_req_disable_address_resolution(struct hci_dev * hdev)1809 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1810 {
1811 struct hci_request req;
1812 __u8 enable = 0x00;
1813
1814 if (!use_ll_privacy(hdev) &&
1815 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1816 return;
1817
1818 hci_req_init(&req, hdev);
1819
1820 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1821
1822 hci_req_run(&req, enable_addr_resolution_complete);
1823 }
1824
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1825 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1826 {
1827 BT_DBG("%s status %u", hdev->name, status);
1828 }
1829
hci_req_reenable_advertising(struct hci_dev * hdev)1830 void hci_req_reenable_advertising(struct hci_dev *hdev)
1831 {
1832 struct hci_request req;
1833
1834 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1835 list_empty(&hdev->adv_instances))
1836 return;
1837
1838 hci_req_init(&req, hdev);
1839
1840 if (hdev->cur_adv_instance) {
1841 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1842 true);
1843 } else {
1844 if (ext_adv_capable(hdev)) {
1845 __hci_req_start_ext_adv(&req, 0x00);
1846 } else {
1847 __hci_req_update_adv_data(&req, 0x00);
1848 __hci_req_update_scan_rsp_data(&req, 0x00);
1849 __hci_req_enable_advertising(&req);
1850 }
1851 }
1852
1853 hci_req_run(&req, adv_enable_complete);
1854 }
1855
adv_timeout_expire(struct work_struct * work)1856 static void adv_timeout_expire(struct work_struct *work)
1857 {
1858 struct hci_dev *hdev = container_of(work, struct hci_dev,
1859 adv_instance_expire.work);
1860
1861 struct hci_request req;
1862 u8 instance;
1863
1864 BT_DBG("%s", hdev->name);
1865
1866 hci_dev_lock(hdev);
1867
1868 hdev->adv_instance_timeout = 0;
1869
1870 instance = hdev->cur_adv_instance;
1871 if (instance == 0x00)
1872 goto unlock;
1873
1874 hci_req_init(&req, hdev);
1875
1876 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1877
1878 if (list_empty(&hdev->adv_instances))
1879 __hci_req_disable_advertising(&req);
1880
1881 hci_req_run(&req, NULL);
1882
1883 unlock:
1884 hci_dev_unlock(hdev);
1885 }
1886
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1887 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1888 bool use_rpa, struct adv_info *adv_instance,
1889 u8 *own_addr_type, bdaddr_t *rand_addr)
1890 {
1891 int err;
1892
1893 bacpy(rand_addr, BDADDR_ANY);
1894
1895 /* If privacy is enabled use a resolvable private address. If
1896 * current RPA has expired then generate a new one.
1897 */
1898 if (use_rpa) {
1899 int to;
1900
1901 /* If Controller supports LL Privacy use own address type is
1902 * 0x03
1903 */
1904 if (use_ll_privacy(hdev))
1905 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1906 else
1907 *own_addr_type = ADDR_LE_DEV_RANDOM;
1908
1909 if (adv_instance) {
1910 if (!adv_instance->rpa_expired &&
1911 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1912 return 0;
1913
1914 adv_instance->rpa_expired = false;
1915 } else {
1916 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1917 !bacmp(&hdev->random_addr, &hdev->rpa))
1918 return 0;
1919 }
1920
1921 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1922 if (err < 0) {
1923 bt_dev_err(hdev, "failed to generate new RPA");
1924 return err;
1925 }
1926
1927 bacpy(rand_addr, &hdev->rpa);
1928
1929 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1930 if (adv_instance)
1931 queue_delayed_work(hdev->workqueue,
1932 &adv_instance->rpa_expired_cb, to);
1933 else
1934 queue_delayed_work(hdev->workqueue,
1935 &hdev->rpa_expired, to);
1936
1937 return 0;
1938 }
1939
1940 /* In case of required privacy without resolvable private address,
1941 * use an non-resolvable private address. This is useful for
1942 * non-connectable advertising.
1943 */
1944 if (require_privacy) {
1945 bdaddr_t nrpa;
1946
1947 while (true) {
1948 /* The non-resolvable private address is generated
1949 * from random six bytes with the two most significant
1950 * bits cleared.
1951 */
1952 get_random_bytes(&nrpa, 6);
1953 nrpa.b[5] &= 0x3f;
1954
1955 /* The non-resolvable private address shall not be
1956 * equal to the public address.
1957 */
1958 if (bacmp(&hdev->bdaddr, &nrpa))
1959 break;
1960 }
1961
1962 *own_addr_type = ADDR_LE_DEV_RANDOM;
1963 bacpy(rand_addr, &nrpa);
1964
1965 return 0;
1966 }
1967
1968 /* No privacy so use a public address. */
1969 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1970
1971 return 0;
1972 }
1973
__hci_req_clear_ext_adv_sets(struct hci_request * req)1974 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1975 {
1976 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1977 }
1978
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1979 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1980 {
1981 struct hci_cp_le_set_ext_adv_params cp;
1982 struct hci_dev *hdev = req->hdev;
1983 bool connectable;
1984 u32 flags;
1985 bdaddr_t random_addr;
1986 u8 own_addr_type;
1987 int err;
1988 struct adv_info *adv_instance;
1989 bool secondary_adv;
1990
1991 if (instance > 0) {
1992 adv_instance = hci_find_adv_instance(hdev, instance);
1993 if (!adv_instance)
1994 return -EINVAL;
1995 } else {
1996 adv_instance = NULL;
1997 }
1998
1999 flags = get_adv_instance_flags(hdev, instance);
2000
2001 /* If the "connectable" instance flag was not set, then choose between
2002 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2003 */
2004 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2005 mgmt_get_connectable(hdev);
2006
2007 if (!is_advertising_allowed(hdev, connectable))
2008 return -EPERM;
2009
2010 /* Set require_privacy to true only when non-connectable
2011 * advertising is used. In that case it is fine to use a
2012 * non-resolvable private address.
2013 */
2014 err = hci_get_random_address(hdev, !connectable,
2015 adv_use_rpa(hdev, flags), adv_instance,
2016 &own_addr_type, &random_addr);
2017 if (err < 0)
2018 return err;
2019
2020 memset(&cp, 0, sizeof(cp));
2021
2022 /* In ext adv set param interval is 3 octets */
2023 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2024 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2025
2026 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2027
2028 if (connectable) {
2029 if (secondary_adv)
2030 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2031 else
2032 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2033 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2034 if (secondary_adv)
2035 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2036 else
2037 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2038 } else {
2039 if (secondary_adv)
2040 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2041 else
2042 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2043 }
2044
2045 cp.own_addr_type = own_addr_type;
2046 cp.channel_map = hdev->le_adv_channel_map;
2047 cp.tx_power = 127;
2048 cp.handle = instance;
2049
2050 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2051 cp.primary_phy = HCI_ADV_PHY_1M;
2052 cp.secondary_phy = HCI_ADV_PHY_2M;
2053 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2054 cp.primary_phy = HCI_ADV_PHY_CODED;
2055 cp.secondary_phy = HCI_ADV_PHY_CODED;
2056 } else {
2057 /* In all other cases use 1M */
2058 cp.primary_phy = HCI_ADV_PHY_1M;
2059 cp.secondary_phy = HCI_ADV_PHY_1M;
2060 }
2061
2062 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2063
2064 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2065 bacmp(&random_addr, BDADDR_ANY)) {
2066 struct hci_cp_le_set_adv_set_rand_addr cp;
2067
2068 /* Check if random address need to be updated */
2069 if (adv_instance) {
2070 if (!bacmp(&random_addr, &adv_instance->random_addr))
2071 return 0;
2072 } else {
2073 if (!bacmp(&random_addr, &hdev->random_addr))
2074 return 0;
2075 }
2076
2077 memset(&cp, 0, sizeof(cp));
2078
2079 cp.handle = instance;
2080 bacpy(&cp.bdaddr, &random_addr);
2081
2082 hci_req_add(req,
2083 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2084 sizeof(cp), &cp);
2085 }
2086
2087 return 0;
2088 }
2089
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2090 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2091 {
2092 struct hci_dev *hdev = req->hdev;
2093 struct hci_cp_le_set_ext_adv_enable *cp;
2094 struct hci_cp_ext_adv_set *adv_set;
2095 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2096 struct adv_info *adv_instance;
2097
2098 if (instance > 0) {
2099 adv_instance = hci_find_adv_instance(hdev, instance);
2100 if (!adv_instance)
2101 return -EINVAL;
2102 } else {
2103 adv_instance = NULL;
2104 }
2105
2106 cp = (void *) data;
2107 adv_set = (void *) cp->data;
2108
2109 memset(cp, 0, sizeof(*cp));
2110
2111 cp->enable = 0x01;
2112 cp->num_of_sets = 0x01;
2113
2114 memset(adv_set, 0, sizeof(*adv_set));
2115
2116 adv_set->handle = instance;
2117
2118 /* Set duration per instance since controller is responsible for
2119 * scheduling it.
2120 */
2121 if (adv_instance && adv_instance->timeout) {
2122 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2123
2124 /* Time = N * 10 ms */
2125 adv_set->duration = cpu_to_le16(duration / 10);
2126 }
2127
2128 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2129 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2130 data);
2131
2132 return 0;
2133 }
2134
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2135 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2136 {
2137 struct hci_dev *hdev = req->hdev;
2138 struct hci_cp_le_set_ext_adv_enable *cp;
2139 struct hci_cp_ext_adv_set *adv_set;
2140 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2141 u8 req_size;
2142
2143 /* If request specifies an instance that doesn't exist, fail */
2144 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2145 return -EINVAL;
2146
2147 memset(data, 0, sizeof(data));
2148
2149 cp = (void *)data;
2150 adv_set = (void *)cp->data;
2151
2152 /* Instance 0x00 indicates all advertising instances will be disabled */
2153 cp->num_of_sets = !!instance;
2154 cp->enable = 0x00;
2155
2156 adv_set->handle = instance;
2157
2158 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2159 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2160
2161 return 0;
2162 }
2163
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2164 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2165 {
2166 struct hci_dev *hdev = req->hdev;
2167
2168 /* If request specifies an instance that doesn't exist, fail */
2169 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2170 return -EINVAL;
2171
2172 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2173
2174 return 0;
2175 }
2176
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2177 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2178 {
2179 struct hci_dev *hdev = req->hdev;
2180 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2181 int err;
2182
2183 /* If instance isn't pending, the chip knows about it, and it's safe to
2184 * disable
2185 */
2186 if (adv_instance && !adv_instance->pending)
2187 __hci_req_disable_ext_adv_instance(req, instance);
2188
2189 err = __hci_req_setup_ext_adv_instance(req, instance);
2190 if (err < 0)
2191 return err;
2192
2193 __hci_req_update_scan_rsp_data(req, instance);
2194 __hci_req_enable_ext_advertising(req, instance);
2195
2196 return 0;
2197 }
2198
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2199 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2200 bool force)
2201 {
2202 struct hci_dev *hdev = req->hdev;
2203 struct adv_info *adv_instance = NULL;
2204 u16 timeout;
2205
2206 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2207 list_empty(&hdev->adv_instances))
2208 return -EPERM;
2209
2210 if (hdev->adv_instance_timeout)
2211 return -EBUSY;
2212
2213 adv_instance = hci_find_adv_instance(hdev, instance);
2214 if (!adv_instance)
2215 return -ENOENT;
2216
2217 /* A zero timeout means unlimited advertising. As long as there is
2218 * only one instance, duration should be ignored. We still set a timeout
2219 * in case further instances are being added later on.
2220 *
2221 * If the remaining lifetime of the instance is more than the duration
2222 * then the timeout corresponds to the duration, otherwise it will be
2223 * reduced to the remaining instance lifetime.
2224 */
2225 if (adv_instance->timeout == 0 ||
2226 adv_instance->duration <= adv_instance->remaining_time)
2227 timeout = adv_instance->duration;
2228 else
2229 timeout = adv_instance->remaining_time;
2230
2231 /* The remaining time is being reduced unless the instance is being
2232 * advertised without time limit.
2233 */
2234 if (adv_instance->timeout)
2235 adv_instance->remaining_time =
2236 adv_instance->remaining_time - timeout;
2237
2238 /* Only use work for scheduling instances with legacy advertising */
2239 if (!ext_adv_capable(hdev)) {
2240 hdev->adv_instance_timeout = timeout;
2241 queue_delayed_work(hdev->req_workqueue,
2242 &hdev->adv_instance_expire,
2243 msecs_to_jiffies(timeout * 1000));
2244 }
2245
2246 /* If we're just re-scheduling the same instance again then do not
2247 * execute any HCI commands. This happens when a single instance is
2248 * being advertised.
2249 */
2250 if (!force && hdev->cur_adv_instance == instance &&
2251 hci_dev_test_flag(hdev, HCI_LE_ADV))
2252 return 0;
2253
2254 hdev->cur_adv_instance = instance;
2255 if (ext_adv_capable(hdev)) {
2256 __hci_req_start_ext_adv(req, instance);
2257 } else {
2258 __hci_req_update_adv_data(req, instance);
2259 __hci_req_update_scan_rsp_data(req, instance);
2260 __hci_req_enable_advertising(req);
2261 }
2262
2263 return 0;
2264 }
2265
2266 /* For a single instance:
2267 * - force == true: The instance will be removed even when its remaining
2268 * lifetime is not zero.
2269 * - force == false: the instance will be deactivated but kept stored unless
2270 * the remaining lifetime is zero.
2271 *
2272 * For instance == 0x00:
2273 * - force == true: All instances will be removed regardless of their timeout
2274 * setting.
2275 * - force == false: Only instances that have a timeout will be removed.
2276 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2277 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2278 struct hci_request *req, u8 instance,
2279 bool force)
2280 {
2281 struct adv_info *adv_instance, *n, *next_instance = NULL;
2282 int err;
2283 u8 rem_inst;
2284
2285 /* Cancel any timeout concerning the removed instance(s). */
2286 if (!instance || hdev->cur_adv_instance == instance)
2287 cancel_adv_timeout(hdev);
2288
2289 /* Get the next instance to advertise BEFORE we remove
2290 * the current one. This can be the same instance again
2291 * if there is only one instance.
2292 */
2293 if (instance && hdev->cur_adv_instance == instance)
2294 next_instance = hci_get_next_instance(hdev, instance);
2295
2296 if (instance == 0x00) {
2297 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2298 list) {
2299 if (!(force || adv_instance->timeout))
2300 continue;
2301
2302 rem_inst = adv_instance->instance;
2303 err = hci_remove_adv_instance(hdev, rem_inst);
2304 if (!err)
2305 mgmt_advertising_removed(sk, hdev, rem_inst);
2306 }
2307 } else {
2308 adv_instance = hci_find_adv_instance(hdev, instance);
2309
2310 if (force || (adv_instance && adv_instance->timeout &&
2311 !adv_instance->remaining_time)) {
2312 /* Don't advertise a removed instance. */
2313 if (next_instance &&
2314 next_instance->instance == instance)
2315 next_instance = NULL;
2316
2317 err = hci_remove_adv_instance(hdev, instance);
2318 if (!err)
2319 mgmt_advertising_removed(sk, hdev, instance);
2320 }
2321 }
2322
2323 if (!req || !hdev_is_powered(hdev) ||
2324 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2325 return;
2326
2327 if (next_instance && !ext_adv_capable(hdev))
2328 __hci_req_schedule_adv_instance(req, next_instance->instance,
2329 false);
2330 }
2331
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2332 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2333 {
2334 struct hci_dev *hdev = req->hdev;
2335
2336 /* If we're advertising or initiating an LE connection we can't
2337 * go ahead and change the random address at this time. This is
2338 * because the eventual initiator address used for the
2339 * subsequently created connection will be undefined (some
2340 * controllers use the new address and others the one we had
2341 * when the operation started).
2342 *
2343 * In this kind of scenario skip the update and let the random
2344 * address be updated at the next cycle.
2345 */
2346 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2347 hci_lookup_le_connect(hdev)) {
2348 BT_DBG("Deferring random address update");
2349 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2350 return;
2351 }
2352
2353 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2354 }
2355
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2356 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2357 bool use_rpa, u8 *own_addr_type)
2358 {
2359 struct hci_dev *hdev = req->hdev;
2360 int err;
2361
2362 /* If privacy is enabled use a resolvable private address. If
2363 * current RPA has expired or there is something else than
2364 * the current RPA in use, then generate a new one.
2365 */
2366 if (use_rpa) {
2367 int to;
2368
2369 /* If Controller supports LL Privacy use own address type is
2370 * 0x03
2371 */
2372 if (use_ll_privacy(hdev))
2373 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2374 else
2375 *own_addr_type = ADDR_LE_DEV_RANDOM;
2376
2377 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2378 !bacmp(&hdev->random_addr, &hdev->rpa))
2379 return 0;
2380
2381 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2382 if (err < 0) {
2383 bt_dev_err(hdev, "failed to generate new RPA");
2384 return err;
2385 }
2386
2387 set_random_addr(req, &hdev->rpa);
2388
2389 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2390 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2391
2392 return 0;
2393 }
2394
2395 /* In case of required privacy without resolvable private address,
2396 * use an non-resolvable private address. This is useful for active
2397 * scanning and non-connectable advertising.
2398 */
2399 if (require_privacy) {
2400 bdaddr_t nrpa;
2401
2402 while (true) {
2403 /* The non-resolvable private address is generated
2404 * from random six bytes with the two most significant
2405 * bits cleared.
2406 */
2407 get_random_bytes(&nrpa, 6);
2408 nrpa.b[5] &= 0x3f;
2409
2410 /* The non-resolvable private address shall not be
2411 * equal to the public address.
2412 */
2413 if (bacmp(&hdev->bdaddr, &nrpa))
2414 break;
2415 }
2416
2417 *own_addr_type = ADDR_LE_DEV_RANDOM;
2418 set_random_addr(req, &nrpa);
2419 return 0;
2420 }
2421
2422 /* If forcing static address is in use or there is no public
2423 * address use the static address as random address (but skip
2424 * the HCI command if the current random address is already the
2425 * static one.
2426 *
2427 * In case BR/EDR has been disabled on a dual-mode controller
2428 * and a static address has been configured, then use that
2429 * address instead of the public BR/EDR address.
2430 */
2431 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2432 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2433 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2434 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2435 *own_addr_type = ADDR_LE_DEV_RANDOM;
2436 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2437 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2438 &hdev->static_addr);
2439 return 0;
2440 }
2441
2442 /* Neither privacy nor static address is being used so use a
2443 * public address.
2444 */
2445 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2446
2447 return 0;
2448 }
2449
disconnected_whitelist_entries(struct hci_dev * hdev)2450 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2451 {
2452 struct bdaddr_list *b;
2453
2454 list_for_each_entry(b, &hdev->whitelist, list) {
2455 struct hci_conn *conn;
2456
2457 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2458 if (!conn)
2459 return true;
2460
2461 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2462 return true;
2463 }
2464
2465 return false;
2466 }
2467
__hci_req_update_scan(struct hci_request * req)2468 void __hci_req_update_scan(struct hci_request *req)
2469 {
2470 struct hci_dev *hdev = req->hdev;
2471 u8 scan;
2472
2473 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2474 return;
2475
2476 if (!hdev_is_powered(hdev))
2477 return;
2478
2479 if (mgmt_powering_down(hdev))
2480 return;
2481
2482 if (hdev->scanning_paused)
2483 return;
2484
2485 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2486 disconnected_whitelist_entries(hdev))
2487 scan = SCAN_PAGE;
2488 else
2489 scan = SCAN_DISABLED;
2490
2491 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2492 scan |= SCAN_INQUIRY;
2493
2494 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2495 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2496 return;
2497
2498 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2499 }
2500
update_scan(struct hci_request * req,unsigned long opt)2501 static int update_scan(struct hci_request *req, unsigned long opt)
2502 {
2503 hci_dev_lock(req->hdev);
2504 __hci_req_update_scan(req);
2505 hci_dev_unlock(req->hdev);
2506 return 0;
2507 }
2508
scan_update_work(struct work_struct * work)2509 static void scan_update_work(struct work_struct *work)
2510 {
2511 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2512
2513 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2514 }
2515
connectable_update(struct hci_request * req,unsigned long opt)2516 static int connectable_update(struct hci_request *req, unsigned long opt)
2517 {
2518 struct hci_dev *hdev = req->hdev;
2519
2520 hci_dev_lock(hdev);
2521
2522 __hci_req_update_scan(req);
2523
2524 /* If BR/EDR is not enabled and we disable advertising as a
2525 * by-product of disabling connectable, we need to update the
2526 * advertising flags.
2527 */
2528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2529 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2530
2531 /* Update the advertising parameters if necessary */
2532 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2533 !list_empty(&hdev->adv_instances)) {
2534 if (ext_adv_capable(hdev))
2535 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2536 else
2537 __hci_req_enable_advertising(req);
2538 }
2539
2540 __hci_update_background_scan(req);
2541
2542 hci_dev_unlock(hdev);
2543
2544 return 0;
2545 }
2546
connectable_update_work(struct work_struct * work)2547 static void connectable_update_work(struct work_struct *work)
2548 {
2549 struct hci_dev *hdev = container_of(work, struct hci_dev,
2550 connectable_update);
2551 u8 status;
2552
2553 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2554 mgmt_set_connectable_complete(hdev, status);
2555 }
2556
get_service_classes(struct hci_dev * hdev)2557 static u8 get_service_classes(struct hci_dev *hdev)
2558 {
2559 struct bt_uuid *uuid;
2560 u8 val = 0;
2561
2562 list_for_each_entry(uuid, &hdev->uuids, list)
2563 val |= uuid->svc_hint;
2564
2565 return val;
2566 }
2567
__hci_req_update_class(struct hci_request * req)2568 void __hci_req_update_class(struct hci_request *req)
2569 {
2570 struct hci_dev *hdev = req->hdev;
2571 u8 cod[3];
2572
2573 BT_DBG("%s", hdev->name);
2574
2575 if (!hdev_is_powered(hdev))
2576 return;
2577
2578 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2579 return;
2580
2581 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2582 return;
2583
2584 cod[0] = hdev->minor_class;
2585 cod[1] = hdev->major_class;
2586 cod[2] = get_service_classes(hdev);
2587
2588 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2589 cod[1] |= 0x20;
2590
2591 if (memcmp(cod, hdev->dev_class, 3) == 0)
2592 return;
2593
2594 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2595 }
2596
write_iac(struct hci_request * req)2597 static void write_iac(struct hci_request *req)
2598 {
2599 struct hci_dev *hdev = req->hdev;
2600 struct hci_cp_write_current_iac_lap cp;
2601
2602 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2603 return;
2604
2605 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2606 /* Limited discoverable mode */
2607 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2608 cp.iac_lap[0] = 0x00; /* LIAC */
2609 cp.iac_lap[1] = 0x8b;
2610 cp.iac_lap[2] = 0x9e;
2611 cp.iac_lap[3] = 0x33; /* GIAC */
2612 cp.iac_lap[4] = 0x8b;
2613 cp.iac_lap[5] = 0x9e;
2614 } else {
2615 /* General discoverable mode */
2616 cp.num_iac = 1;
2617 cp.iac_lap[0] = 0x33; /* GIAC */
2618 cp.iac_lap[1] = 0x8b;
2619 cp.iac_lap[2] = 0x9e;
2620 }
2621
2622 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2623 (cp.num_iac * 3) + 1, &cp);
2624 }
2625
discoverable_update(struct hci_request * req,unsigned long opt)2626 static int discoverable_update(struct hci_request *req, unsigned long opt)
2627 {
2628 struct hci_dev *hdev = req->hdev;
2629
2630 hci_dev_lock(hdev);
2631
2632 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2633 write_iac(req);
2634 __hci_req_update_scan(req);
2635 __hci_req_update_class(req);
2636 }
2637
2638 /* Advertising instances don't use the global discoverable setting, so
2639 * only update AD if advertising was enabled using Set Advertising.
2640 */
2641 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2642 __hci_req_update_adv_data(req, 0x00);
2643
2644 /* Discoverable mode affects the local advertising
2645 * address in limited privacy mode.
2646 */
2647 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2648 if (ext_adv_capable(hdev))
2649 __hci_req_start_ext_adv(req, 0x00);
2650 else
2651 __hci_req_enable_advertising(req);
2652 }
2653 }
2654
2655 hci_dev_unlock(hdev);
2656
2657 return 0;
2658 }
2659
discoverable_update_work(struct work_struct * work)2660 static void discoverable_update_work(struct work_struct *work)
2661 {
2662 struct hci_dev *hdev = container_of(work, struct hci_dev,
2663 discoverable_update);
2664 u8 status;
2665
2666 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2667 mgmt_set_discoverable_complete(hdev, status);
2668 }
2669
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2670 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2671 u8 reason)
2672 {
2673 switch (conn->state) {
2674 case BT_CONNECTED:
2675 case BT_CONFIG:
2676 if (conn->type == AMP_LINK) {
2677 struct hci_cp_disconn_phy_link cp;
2678
2679 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2680 cp.reason = reason;
2681 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2682 &cp);
2683 } else {
2684 struct hci_cp_disconnect dc;
2685
2686 dc.handle = cpu_to_le16(conn->handle);
2687 dc.reason = reason;
2688 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2689 }
2690
2691 conn->state = BT_DISCONN;
2692
2693 break;
2694 case BT_CONNECT:
2695 if (conn->type == LE_LINK) {
2696 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2697 break;
2698 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2699 0, NULL);
2700 } else if (conn->type == ACL_LINK) {
2701 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2702 break;
2703 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2704 6, &conn->dst);
2705 }
2706 break;
2707 case BT_CONNECT2:
2708 if (conn->type == ACL_LINK) {
2709 struct hci_cp_reject_conn_req rej;
2710
2711 bacpy(&rej.bdaddr, &conn->dst);
2712 rej.reason = reason;
2713
2714 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2715 sizeof(rej), &rej);
2716 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2717 struct hci_cp_reject_sync_conn_req rej;
2718
2719 bacpy(&rej.bdaddr, &conn->dst);
2720
2721 /* SCO rejection has its own limited set of
2722 * allowed error values (0x0D-0x0F) which isn't
2723 * compatible with most values passed to this
2724 * function. To be safe hard-code one of the
2725 * values that's suitable for SCO.
2726 */
2727 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2728
2729 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2730 sizeof(rej), &rej);
2731 }
2732 break;
2733 default:
2734 conn->state = BT_CLOSED;
2735 break;
2736 }
2737 }
2738
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2739 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2740 {
2741 if (status)
2742 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2743 }
2744
hci_abort_conn(struct hci_conn * conn,u8 reason)2745 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2746 {
2747 struct hci_request req;
2748 int err;
2749
2750 hci_req_init(&req, conn->hdev);
2751
2752 __hci_abort_conn(&req, conn, reason);
2753
2754 err = hci_req_run(&req, abort_conn_complete);
2755 if (err && err != -ENODATA) {
2756 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2757 return err;
2758 }
2759
2760 return 0;
2761 }
2762
update_bg_scan(struct hci_request * req,unsigned long opt)2763 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2764 {
2765 hci_dev_lock(req->hdev);
2766 __hci_update_background_scan(req);
2767 hci_dev_unlock(req->hdev);
2768 return 0;
2769 }
2770
bg_scan_update(struct work_struct * work)2771 static void bg_scan_update(struct work_struct *work)
2772 {
2773 struct hci_dev *hdev = container_of(work, struct hci_dev,
2774 bg_scan_update);
2775 struct hci_conn *conn;
2776 u8 status;
2777 int err;
2778
2779 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2780 if (!err)
2781 return;
2782
2783 hci_dev_lock(hdev);
2784
2785 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2786 if (conn)
2787 hci_le_conn_failed(conn, status);
2788
2789 hci_dev_unlock(hdev);
2790 }
2791
le_scan_disable(struct hci_request * req,unsigned long opt)2792 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2793 {
2794 hci_req_add_le_scan_disable(req, false);
2795 return 0;
2796 }
2797
bredr_inquiry(struct hci_request * req,unsigned long opt)2798 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2799 {
2800 u8 length = opt;
2801 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2802 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2803 struct hci_cp_inquiry cp;
2804
2805 BT_DBG("%s", req->hdev->name);
2806
2807 hci_dev_lock(req->hdev);
2808 hci_inquiry_cache_flush(req->hdev);
2809 hci_dev_unlock(req->hdev);
2810
2811 memset(&cp, 0, sizeof(cp));
2812
2813 if (req->hdev->discovery.limited)
2814 memcpy(&cp.lap, liac, sizeof(cp.lap));
2815 else
2816 memcpy(&cp.lap, giac, sizeof(cp.lap));
2817
2818 cp.length = length;
2819
2820 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2821
2822 return 0;
2823 }
2824
le_scan_disable_work(struct work_struct * work)2825 static void le_scan_disable_work(struct work_struct *work)
2826 {
2827 struct hci_dev *hdev = container_of(work, struct hci_dev,
2828 le_scan_disable.work);
2829 u8 status;
2830
2831 BT_DBG("%s", hdev->name);
2832
2833 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2834 return;
2835
2836 cancel_delayed_work(&hdev->le_scan_restart);
2837
2838 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2839 if (status) {
2840 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2841 status);
2842 return;
2843 }
2844
2845 hdev->discovery.scan_start = 0;
2846
2847 /* If we were running LE only scan, change discovery state. If
2848 * we were running both LE and BR/EDR inquiry simultaneously,
2849 * and BR/EDR inquiry is already finished, stop discovery,
2850 * otherwise BR/EDR inquiry will stop discovery when finished.
2851 * If we will resolve remote device name, do not change
2852 * discovery state.
2853 */
2854
2855 if (hdev->discovery.type == DISCOV_TYPE_LE)
2856 goto discov_stopped;
2857
2858 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2859 return;
2860
2861 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2862 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2863 hdev->discovery.state != DISCOVERY_RESOLVING)
2864 goto discov_stopped;
2865
2866 return;
2867 }
2868
2869 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2870 HCI_CMD_TIMEOUT, &status);
2871 if (status) {
2872 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2873 goto discov_stopped;
2874 }
2875
2876 return;
2877
2878 discov_stopped:
2879 hci_dev_lock(hdev);
2880 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2881 hci_dev_unlock(hdev);
2882 }
2883
le_scan_restart(struct hci_request * req,unsigned long opt)2884 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2885 {
2886 struct hci_dev *hdev = req->hdev;
2887
2888 /* If controller is not scanning we are done. */
2889 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2890 return 0;
2891
2892 if (hdev->scanning_paused) {
2893 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2894 return 0;
2895 }
2896
2897 hci_req_add_le_scan_disable(req, false);
2898
2899 if (use_ext_scan(hdev)) {
2900 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2901
2902 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2903 ext_enable_cp.enable = LE_SCAN_ENABLE;
2904 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2905
2906 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2907 sizeof(ext_enable_cp), &ext_enable_cp);
2908 } else {
2909 struct hci_cp_le_set_scan_enable cp;
2910
2911 memset(&cp, 0, sizeof(cp));
2912 cp.enable = LE_SCAN_ENABLE;
2913 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2914 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2915 }
2916
2917 return 0;
2918 }
2919
le_scan_restart_work(struct work_struct * work)2920 static void le_scan_restart_work(struct work_struct *work)
2921 {
2922 struct hci_dev *hdev = container_of(work, struct hci_dev,
2923 le_scan_restart.work);
2924 unsigned long timeout, duration, scan_start, now;
2925 u8 status;
2926
2927 BT_DBG("%s", hdev->name);
2928
2929 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2930 if (status) {
2931 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2932 status);
2933 return;
2934 }
2935
2936 hci_dev_lock(hdev);
2937
2938 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2939 !hdev->discovery.scan_start)
2940 goto unlock;
2941
2942 /* When the scan was started, hdev->le_scan_disable has been queued
2943 * after duration from scan_start. During scan restart this job
2944 * has been canceled, and we need to queue it again after proper
2945 * timeout, to make sure that scan does not run indefinitely.
2946 */
2947 duration = hdev->discovery.scan_duration;
2948 scan_start = hdev->discovery.scan_start;
2949 now = jiffies;
2950 if (now - scan_start <= duration) {
2951 int elapsed;
2952
2953 if (now >= scan_start)
2954 elapsed = now - scan_start;
2955 else
2956 elapsed = ULONG_MAX - scan_start + now;
2957
2958 timeout = duration - elapsed;
2959 } else {
2960 timeout = 0;
2961 }
2962
2963 queue_delayed_work(hdev->req_workqueue,
2964 &hdev->le_scan_disable, timeout);
2965
2966 unlock:
2967 hci_dev_unlock(hdev);
2968 }
2969
active_scan(struct hci_request * req,unsigned long opt)2970 static int active_scan(struct hci_request *req, unsigned long opt)
2971 {
2972 uint16_t interval = opt;
2973 struct hci_dev *hdev = req->hdev;
2974 u8 own_addr_type;
2975 /* White list is not used for discovery */
2976 u8 filter_policy = 0x00;
2977 /* Discovery doesn't require controller address resolution */
2978 bool addr_resolv = false;
2979 int err;
2980
2981 BT_DBG("%s", hdev->name);
2982
2983 /* If controller is scanning, it means the background scanning is
2984 * running. Thus, we should temporarily stop it in order to set the
2985 * discovery scanning parameters.
2986 */
2987 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2988 hci_req_add_le_scan_disable(req, false);
2989
2990 /* All active scans will be done with either a resolvable private
2991 * address (when privacy feature has been enabled) or non-resolvable
2992 * private address.
2993 */
2994 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2995 &own_addr_type);
2996 if (err < 0)
2997 own_addr_type = ADDR_LE_DEV_PUBLIC;
2998
2999 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3000 hdev->le_scan_window_discovery, own_addr_type,
3001 filter_policy, addr_resolv);
3002 return 0;
3003 }
3004
interleaved_discov(struct hci_request * req,unsigned long opt)3005 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3006 {
3007 int err;
3008
3009 BT_DBG("%s", req->hdev->name);
3010
3011 err = active_scan(req, opt);
3012 if (err)
3013 return err;
3014
3015 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3016 }
3017
start_discovery(struct hci_dev * hdev,u8 * status)3018 static void start_discovery(struct hci_dev *hdev, u8 *status)
3019 {
3020 unsigned long timeout;
3021
3022 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3023
3024 switch (hdev->discovery.type) {
3025 case DISCOV_TYPE_BREDR:
3026 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3027 hci_req_sync(hdev, bredr_inquiry,
3028 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3029 status);
3030 return;
3031 case DISCOV_TYPE_INTERLEAVED:
3032 /* When running simultaneous discovery, the LE scanning time
3033 * should occupy the whole discovery time sine BR/EDR inquiry
3034 * and LE scanning are scheduled by the controller.
3035 *
3036 * For interleaving discovery in comparison, BR/EDR inquiry
3037 * and LE scanning are done sequentially with separate
3038 * timeouts.
3039 */
3040 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3041 &hdev->quirks)) {
3042 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3043 /* During simultaneous discovery, we double LE scan
3044 * interval. We must leave some time for the controller
3045 * to do BR/EDR inquiry.
3046 */
3047 hci_req_sync(hdev, interleaved_discov,
3048 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3049 status);
3050 break;
3051 }
3052
3053 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3054 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3055 HCI_CMD_TIMEOUT, status);
3056 break;
3057 case DISCOV_TYPE_LE:
3058 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3059 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3060 HCI_CMD_TIMEOUT, status);
3061 break;
3062 default:
3063 *status = HCI_ERROR_UNSPECIFIED;
3064 return;
3065 }
3066
3067 if (*status)
3068 return;
3069
3070 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3071
3072 /* When service discovery is used and the controller has a
3073 * strict duplicate filter, it is important to remember the
3074 * start and duration of the scan. This is required for
3075 * restarting scanning during the discovery phase.
3076 */
3077 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3078 hdev->discovery.result_filtering) {
3079 hdev->discovery.scan_start = jiffies;
3080 hdev->discovery.scan_duration = timeout;
3081 }
3082
3083 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3084 timeout);
3085 }
3086
hci_req_stop_discovery(struct hci_request * req)3087 bool hci_req_stop_discovery(struct hci_request *req)
3088 {
3089 struct hci_dev *hdev = req->hdev;
3090 struct discovery_state *d = &hdev->discovery;
3091 struct hci_cp_remote_name_req_cancel cp;
3092 struct inquiry_entry *e;
3093 bool ret = false;
3094
3095 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3096
3097 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3098 if (test_bit(HCI_INQUIRY, &hdev->flags))
3099 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3100
3101 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3102 cancel_delayed_work(&hdev->le_scan_disable);
3103 hci_req_add_le_scan_disable(req, false);
3104 }
3105
3106 ret = true;
3107 } else {
3108 /* Passive scanning */
3109 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3110 hci_req_add_le_scan_disable(req, false);
3111 ret = true;
3112 }
3113 }
3114
3115 /* No further actions needed for LE-only discovery */
3116 if (d->type == DISCOV_TYPE_LE)
3117 return ret;
3118
3119 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3120 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3121 NAME_PENDING);
3122 if (!e)
3123 return ret;
3124
3125 bacpy(&cp.bdaddr, &e->data.bdaddr);
3126 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3127 &cp);
3128 ret = true;
3129 }
3130
3131 return ret;
3132 }
3133
stop_discovery(struct hci_request * req,unsigned long opt)3134 static int stop_discovery(struct hci_request *req, unsigned long opt)
3135 {
3136 hci_dev_lock(req->hdev);
3137 hci_req_stop_discovery(req);
3138 hci_dev_unlock(req->hdev);
3139
3140 return 0;
3141 }
3142
discov_update(struct work_struct * work)3143 static void discov_update(struct work_struct *work)
3144 {
3145 struct hci_dev *hdev = container_of(work, struct hci_dev,
3146 discov_update);
3147 u8 status = 0;
3148
3149 switch (hdev->discovery.state) {
3150 case DISCOVERY_STARTING:
3151 start_discovery(hdev, &status);
3152 mgmt_start_discovery_complete(hdev, status);
3153 if (status)
3154 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3155 else
3156 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3157 break;
3158 case DISCOVERY_STOPPING:
3159 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3160 mgmt_stop_discovery_complete(hdev, status);
3161 if (!status)
3162 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3163 break;
3164 case DISCOVERY_STOPPED:
3165 default:
3166 return;
3167 }
3168 }
3169
discov_off(struct work_struct * work)3170 static void discov_off(struct work_struct *work)
3171 {
3172 struct hci_dev *hdev = container_of(work, struct hci_dev,
3173 discov_off.work);
3174
3175 BT_DBG("%s", hdev->name);
3176
3177 hci_dev_lock(hdev);
3178
3179 /* When discoverable timeout triggers, then just make sure
3180 * the limited discoverable flag is cleared. Even in the case
3181 * of a timeout triggered from general discoverable, it is
3182 * safe to unconditionally clear the flag.
3183 */
3184 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3185 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3186 hdev->discov_timeout = 0;
3187
3188 hci_dev_unlock(hdev);
3189
3190 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3191 mgmt_new_settings(hdev);
3192 }
3193
powered_update_hci(struct hci_request * req,unsigned long opt)3194 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3195 {
3196 struct hci_dev *hdev = req->hdev;
3197 u8 link_sec;
3198
3199 hci_dev_lock(hdev);
3200
3201 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3202 !lmp_host_ssp_capable(hdev)) {
3203 u8 mode = 0x01;
3204
3205 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3206
3207 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3208 u8 support = 0x01;
3209
3210 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3211 sizeof(support), &support);
3212 }
3213 }
3214
3215 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3216 lmp_bredr_capable(hdev)) {
3217 struct hci_cp_write_le_host_supported cp;
3218
3219 cp.le = 0x01;
3220 cp.simul = 0x00;
3221
3222 /* Check first if we already have the right
3223 * host state (host features set)
3224 */
3225 if (cp.le != lmp_host_le_capable(hdev) ||
3226 cp.simul != lmp_host_le_br_capable(hdev))
3227 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3228 sizeof(cp), &cp);
3229 }
3230
3231 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3232 /* Make sure the controller has a good default for
3233 * advertising data. This also applies to the case
3234 * where BR/EDR was toggled during the AUTO_OFF phase.
3235 */
3236 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3237 list_empty(&hdev->adv_instances)) {
3238 int err;
3239
3240 if (ext_adv_capable(hdev)) {
3241 err = __hci_req_setup_ext_adv_instance(req,
3242 0x00);
3243 if (!err)
3244 __hci_req_update_scan_rsp_data(req,
3245 0x00);
3246 } else {
3247 err = 0;
3248 __hci_req_update_adv_data(req, 0x00);
3249 __hci_req_update_scan_rsp_data(req, 0x00);
3250 }
3251
3252 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3253 if (!ext_adv_capable(hdev))
3254 __hci_req_enable_advertising(req);
3255 else if (!err)
3256 __hci_req_enable_ext_advertising(req,
3257 0x00);
3258 }
3259 } else if (!list_empty(&hdev->adv_instances)) {
3260 struct adv_info *adv_instance;
3261
3262 adv_instance = list_first_entry(&hdev->adv_instances,
3263 struct adv_info, list);
3264 __hci_req_schedule_adv_instance(req,
3265 adv_instance->instance,
3266 true);
3267 }
3268 }
3269
3270 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3271 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3272 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3273 sizeof(link_sec), &link_sec);
3274
3275 if (lmp_bredr_capable(hdev)) {
3276 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3277 __hci_req_write_fast_connectable(req, true);
3278 else
3279 __hci_req_write_fast_connectable(req, false);
3280 __hci_req_update_scan(req);
3281 __hci_req_update_class(req);
3282 __hci_req_update_name(req);
3283 __hci_req_update_eir(req);
3284 }
3285
3286 hci_dev_unlock(hdev);
3287 return 0;
3288 }
3289
__hci_req_hci_power_on(struct hci_dev * hdev)3290 int __hci_req_hci_power_on(struct hci_dev *hdev)
3291 {
3292 /* Register the available SMP channels (BR/EDR and LE) only when
3293 * successfully powering on the controller. This late
3294 * registration is required so that LE SMP can clearly decide if
3295 * the public address or static address is used.
3296 */
3297 smp_register(hdev);
3298
3299 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3300 NULL);
3301 }
3302
hci_request_setup(struct hci_dev * hdev)3303 void hci_request_setup(struct hci_dev *hdev)
3304 {
3305 INIT_WORK(&hdev->discov_update, discov_update);
3306 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3307 INIT_WORK(&hdev->scan_update, scan_update_work);
3308 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3309 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3310 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3311 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3312 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3313 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3314 }
3315
hci_request_cancel_all(struct hci_dev * hdev)3316 void hci_request_cancel_all(struct hci_dev *hdev)
3317 {
3318 hci_req_sync_cancel(hdev, ENODEV);
3319
3320 cancel_work_sync(&hdev->discov_update);
3321 cancel_work_sync(&hdev->bg_scan_update);
3322 cancel_work_sync(&hdev->scan_update);
3323 cancel_work_sync(&hdev->connectable_update);
3324 cancel_work_sync(&hdev->discoverable_update);
3325 cancel_delayed_work_sync(&hdev->discov_off);
3326 cancel_delayed_work_sync(&hdev->le_scan_disable);
3327 cancel_delayed_work_sync(&hdev->le_scan_restart);
3328
3329 if (hdev->adv_instance_timeout) {
3330 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3331 hdev->adv_instance_timeout = 0;
3332 }
3333 }
3334