1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114 }
115
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125 }
126
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129 {
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186 {
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266 }
267
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 int ret;
273
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
278 * gets removed.
279 */
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 else
283 ret = -ENETDOWN;
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287 }
288
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291 {
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 skb_put_data(skb, param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318 {
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344 }
345
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348 {
349 hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = hdev->def_page_scan_type;
371 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
372 }
373
374 acp.window = cpu_to_le16(hdev->def_page_scan_window);
375
376 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
377 __cpu_to_le16(hdev->page_scan_window) != acp.window)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
379 sizeof(acp), &acp);
380
381 if (hdev->page_scan_type != type)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
383 }
384
385 /* This function controls the background scanning based on hdev->pend_le_conns
386 * list. If there are pending LE connection we start the background scanning,
387 * otherwise we stop it.
388 *
389 * This function requires the caller holds hdev->lock.
390 */
__hci_update_background_scan(struct hci_request * req)391 static void __hci_update_background_scan(struct hci_request *req)
392 {
393 struct hci_dev *hdev = req->hdev;
394
395 if (!test_bit(HCI_UP, &hdev->flags) ||
396 test_bit(HCI_INIT, &hdev->flags) ||
397 hci_dev_test_flag(hdev, HCI_SETUP) ||
398 hci_dev_test_flag(hdev, HCI_CONFIG) ||
399 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
400 hci_dev_test_flag(hdev, HCI_UNREGISTER))
401 return;
402
403 /* No point in doing scanning if LE support hasn't been enabled */
404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
405 return;
406
407 /* If discovery is active don't interfere with it */
408 if (hdev->discovery.state != DISCOVERY_STOPPED)
409 return;
410
411 /* Reset RSSI and UUID filters when starting background scanning
412 * since these filters are meant for service discovery only.
413 *
414 * The Start Discovery and Start Service Discovery operations
415 * ensure to set proper values for RSSI threshold and UUID
416 * filter list. So it is safe to just reset them here.
417 */
418 hci_discovery_filter_clear(hdev);
419
420 BT_DBG("%s ADV monitoring is %s", hdev->name,
421 hci_is_adv_monitoring(hdev) ? "on" : "off");
422
423 if (list_empty(&hdev->pend_le_conns) &&
424 list_empty(&hdev->pend_le_reports) &&
425 !hci_is_adv_monitoring(hdev)) {
426 /* If there is no pending LE connections or devices
427 * to be scanned for or no ADV monitors, we should stop the
428 * background scanning.
429 */
430
431 /* If controller is not scanning we are done. */
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 return;
434
435 hci_req_add_le_scan_disable(req, false);
436
437 BT_DBG("%s stopping background scanning", hdev->name);
438 } else {
439 /* If there is at least one pending LE connection, we should
440 * keep the background scan running.
441 */
442
443 /* If controller is connecting, we should not start scanning
444 * since some controllers are not able to scan and connect at
445 * the same time.
446 */
447 if (hci_lookup_le_connect(hdev))
448 return;
449
450 /* If controller is currently scanning, we stop it to ensure we
451 * don't miss any advertising (due to duplicates filter).
452 */
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 hci_req_add_le_scan_disable(req, false);
455
456 hci_req_add_le_passive_scan(req);
457
458 BT_DBG("%s starting background scanning", hdev->name);
459 }
460 }
461
__hci_req_update_name(struct hci_request * req)462 void __hci_req_update_name(struct hci_request *req)
463 {
464 struct hci_dev *hdev = req->hdev;
465 struct hci_cp_write_local_name cp;
466
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID 0x1200
473
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
478
479 if (len < 4)
480 return ptr;
481
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 u16 uuid16;
484
485 if (uuid->size != 16)
486 continue;
487
488 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 < 0x1100)
490 continue;
491
492 if (uuid16 == PNP_INFO_SVCLASS_ID)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID16_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u16) > len) {
504 uuids_start[1] = EIR_UUID16_SOME;
505 break;
506 }
507
508 *ptr++ = (uuid16 & 0x00ff);
509 *ptr++ = (uuid16 & 0xff00) >> 8;
510 uuids_start[0] += sizeof(uuid16);
511 }
512
513 return ptr;
514 }
515
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 6)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 32)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID32_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + sizeof(u32) > len) {
537 uuids_start[1] = EIR_UUID32_SOME;
538 break;
539 }
540
541 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 ptr += sizeof(u32);
543 uuids_start[0] += sizeof(u32);
544 }
545
546 return ptr;
547 }
548
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551 u8 *ptr = data, *uuids_start = NULL;
552 struct bt_uuid *uuid;
553
554 if (len < 18)
555 return ptr;
556
557 list_for_each_entry(uuid, &hdev->uuids, list) {
558 if (uuid->size != 128)
559 continue;
560
561 if (!uuids_start) {
562 uuids_start = ptr;
563 uuids_start[0] = 1;
564 uuids_start[1] = EIR_UUID128_ALL;
565 ptr += 2;
566 }
567
568 /* Stop if not enough space to put next UUID */
569 if ((ptr - data) + 16 > len) {
570 uuids_start[1] = EIR_UUID128_SOME;
571 break;
572 }
573
574 memcpy(ptr, uuid->uuid, 16);
575 ptr += 16;
576 uuids_start[0] += 16;
577 }
578
579 return ptr;
580 }
581
create_eir(struct hci_dev * hdev,u8 * data)582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584 u8 *ptr = data;
585 size_t name_len;
586
587 name_len = strlen(hdev->dev_name);
588
589 if (name_len > 0) {
590 /* EIR Data type */
591 if (name_len > 48) {
592 name_len = 48;
593 ptr[1] = EIR_NAME_SHORT;
594 } else
595 ptr[1] = EIR_NAME_COMPLETE;
596
597 /* EIR Data length */
598 ptr[0] = name_len + 1;
599
600 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602 ptr += (name_len + 2);
603 }
604
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 ptr[0] = 2;
607 ptr[1] = EIR_TX_POWER;
608 ptr[2] = (u8) hdev->inq_tx_power;
609
610 ptr += 3;
611 }
612
613 if (hdev->devid_source > 0) {
614 ptr[0] = 9;
615 ptr[1] = EIR_DEVICE_ID;
616
617 put_unaligned_le16(hdev->devid_source, ptr + 2);
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 put_unaligned_le16(hdev->devid_product, ptr + 6);
620 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622 ptr += 10;
623 }
624
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
__hci_req_update_eir(struct hci_request * req)630 void __hci_req_update_eir(struct hci_request *req)
631 {
632 struct hci_dev *hdev = req->hdev;
633 struct hci_cp_write_eir cp;
634
635 if (!hdev_is_powered(hdev))
636 return;
637
638 if (!lmp_ext_inq_capable(hdev))
639 return;
640
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 return;
643
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 return;
646
647 memset(&cp, 0, sizeof(cp));
648
649 create_eir(hdev, cp.data);
650
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 return;
653
654 memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)659 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
660 {
661 struct hci_dev *hdev = req->hdev;
662
663 if (hdev->scanning_paused) {
664 bt_dev_dbg(hdev, "Scanning is paused for suspend");
665 return;
666 }
667
668 if (use_ext_scan(hdev)) {
669 struct hci_cp_le_set_ext_scan_enable cp;
670
671 memset(&cp, 0, sizeof(cp));
672 cp.enable = LE_SCAN_DISABLE;
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
674 &cp);
675 } else {
676 struct hci_cp_le_set_scan_enable cp;
677
678 memset(&cp, 0, sizeof(cp));
679 cp.enable = LE_SCAN_DISABLE;
680 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
681 }
682
683 /* Disable address resolution */
684 if (use_ll_privacy(hdev) &&
685 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
686 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
687 __u8 enable = 0x00;
688
689 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
690 }
691 }
692
del_from_white_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)693 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
694 u8 bdaddr_type)
695 {
696 struct hci_cp_le_del_from_white_list cp;
697
698 cp.bdaddr_type = bdaddr_type;
699 bacpy(&cp.bdaddr, bdaddr);
700
701 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
702 cp.bdaddr_type);
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
704
705 if (use_ll_privacy(req->hdev) &&
706 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
707 struct smp_irk *irk;
708
709 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
710 if (irk) {
711 struct hci_cp_le_del_from_resolv_list cp;
712
713 cp.bdaddr_type = bdaddr_type;
714 bacpy(&cp.bdaddr, bdaddr);
715
716 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
717 sizeof(cp), &cp);
718 }
719 }
720 }
721
722 /* Adds connection to white list if needed. On error, returns -1. */
add_to_white_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)723 static int add_to_white_list(struct hci_request *req,
724 struct hci_conn_params *params, u8 *num_entries,
725 bool allow_rpa)
726 {
727 struct hci_cp_le_add_to_white_list cp;
728 struct hci_dev *hdev = req->hdev;
729
730 /* Already in white list */
731 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
732 params->addr_type))
733 return 0;
734
735 /* Select filter policy to accept all advertising */
736 if (*num_entries >= hdev->le_white_list_size)
737 return -1;
738
739 /* White list can not be used with RPAs */
740 if (!allow_rpa &&
741 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
742 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
743 return -1;
744 }
745
746 /* During suspend, only wakeable devices can be in whitelist */
747 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
748 params->current_flags))
749 return 0;
750
751 *num_entries += 1;
752 cp.bdaddr_type = params->addr_type;
753 bacpy(&cp.bdaddr, ¶ms->addr);
754
755 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
756 cp.bdaddr_type);
757 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
758
759 if (use_ll_privacy(hdev) &&
760 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
761 struct smp_irk *irk;
762
763 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
764 params->addr_type);
765 if (irk) {
766 struct hci_cp_le_add_to_resolv_list cp;
767
768 cp.bdaddr_type = params->addr_type;
769 bacpy(&cp.bdaddr, ¶ms->addr);
770 memcpy(cp.peer_irk, irk->val, 16);
771
772 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
773 memcpy(cp.local_irk, hdev->irk, 16);
774 else
775 memset(cp.local_irk, 0, 16);
776
777 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
778 sizeof(cp), &cp);
779 }
780 }
781
782 return 0;
783 }
784
update_white_list(struct hci_request * req)785 static u8 update_white_list(struct hci_request *req)
786 {
787 struct hci_dev *hdev = req->hdev;
788 struct hci_conn_params *params;
789 struct bdaddr_list *b;
790 u8 num_entries = 0;
791 bool pend_conn, pend_report;
792 /* We allow whitelisting even with RPAs in suspend. In the worst case,
793 * we won't be able to wake from devices that use the privacy1.2
794 * features. Additionally, once we support privacy1.2 and IRK
795 * offloading, we can update this to also check for those conditions.
796 */
797 bool allow_rpa = hdev->suspended;
798
799 if (use_ll_privacy(hdev) &&
800 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
801 allow_rpa = true;
802
803 /* Go through the current white list programmed into the
804 * controller one by one and check if that address is still
805 * in the list of pending connections or list of devices to
806 * report. If not present in either list, then queue the
807 * command to remove it from the controller.
808 */
809 list_for_each_entry(b, &hdev->le_white_list, list) {
810 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
811 &b->bdaddr,
812 b->bdaddr_type);
813 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
814 &b->bdaddr,
815 b->bdaddr_type);
816
817 /* If the device is not likely to connect or report,
818 * remove it from the whitelist.
819 */
820 if (!pend_conn && !pend_report) {
821 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
822 continue;
823 }
824
825 /* White list can not be used with RPAs */
826 if (!allow_rpa &&
827 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
828 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
829 return 0x00;
830 }
831
832 num_entries++;
833 }
834
835 /* Since all no longer valid white list entries have been
836 * removed, walk through the list of pending connections
837 * and ensure that any new device gets programmed into
838 * the controller.
839 *
840 * If the list of the devices is larger than the list of
841 * available white list entries in the controller, then
842 * just abort and return filer policy value to not use the
843 * white list.
844 */
845 list_for_each_entry(params, &hdev->pend_le_conns, action) {
846 if (add_to_white_list(req, params, &num_entries, allow_rpa))
847 return 0x00;
848 }
849
850 /* After adding all new pending connections, walk through
851 * the list of pending reports and also add these to the
852 * white list if there is still space. Abort if space runs out.
853 */
854 list_for_each_entry(params, &hdev->pend_le_reports, action) {
855 if (add_to_white_list(req, params, &num_entries, allow_rpa))
856 return 0x00;
857 }
858
859 /* Once the controller offloading of advertisement monitor is in place,
860 * the if condition should include the support of MSFT extension
861 * support. If suspend is ongoing, whitelist should be the default to
862 * prevent waking by random advertisements.
863 */
864 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
865 return 0x00;
866
867 /* Select filter policy to use white list */
868 return 0x01;
869 }
870
scan_use_rpa(struct hci_dev * hdev)871 static bool scan_use_rpa(struct hci_dev *hdev)
872 {
873 return hci_dev_test_flag(hdev, HCI_PRIVACY);
874 }
875
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)876 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
877 u16 window, u8 own_addr_type, u8 filter_policy,
878 bool addr_resolv)
879 {
880 struct hci_dev *hdev = req->hdev;
881
882 if (hdev->scanning_paused) {
883 bt_dev_dbg(hdev, "Scanning is paused for suspend");
884 return;
885 }
886
887 if (use_ll_privacy(hdev) &&
888 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
889 addr_resolv) {
890 u8 enable = 0x01;
891
892 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
893 }
894
895 /* Use ext scanning if set ext scan param and ext scan enable is
896 * supported
897 */
898 if (use_ext_scan(hdev)) {
899 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
900 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
901 struct hci_cp_le_scan_phy_params *phy_params;
902 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
903 u32 plen;
904
905 ext_param_cp = (void *)data;
906 phy_params = (void *)ext_param_cp->data;
907
908 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
909 ext_param_cp->own_addr_type = own_addr_type;
910 ext_param_cp->filter_policy = filter_policy;
911
912 plen = sizeof(*ext_param_cp);
913
914 if (scan_1m(hdev) || scan_2m(hdev)) {
915 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
916
917 memset(phy_params, 0, sizeof(*phy_params));
918 phy_params->type = type;
919 phy_params->interval = cpu_to_le16(interval);
920 phy_params->window = cpu_to_le16(window);
921
922 plen += sizeof(*phy_params);
923 phy_params++;
924 }
925
926 if (scan_coded(hdev)) {
927 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
928
929 memset(phy_params, 0, sizeof(*phy_params));
930 phy_params->type = type;
931 phy_params->interval = cpu_to_le16(interval);
932 phy_params->window = cpu_to_le16(window);
933
934 plen += sizeof(*phy_params);
935 phy_params++;
936 }
937
938 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
939 plen, ext_param_cp);
940
941 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
942 ext_enable_cp.enable = LE_SCAN_ENABLE;
943 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
944
945 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
946 sizeof(ext_enable_cp), &ext_enable_cp);
947 } else {
948 struct hci_cp_le_set_scan_param param_cp;
949 struct hci_cp_le_set_scan_enable enable_cp;
950
951 memset(¶m_cp, 0, sizeof(param_cp));
952 param_cp.type = type;
953 param_cp.interval = cpu_to_le16(interval);
954 param_cp.window = cpu_to_le16(window);
955 param_cp.own_address_type = own_addr_type;
956 param_cp.filter_policy = filter_policy;
957 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
958 ¶m_cp);
959
960 memset(&enable_cp, 0, sizeof(enable_cp));
961 enable_cp.enable = LE_SCAN_ENABLE;
962 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
963 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
964 &enable_cp);
965 }
966 }
967
968 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)969 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
970 {
971 struct hci_conn_hash *h = &hdev->conn_hash;
972 struct hci_conn *c;
973
974 rcu_read_lock();
975
976 list_for_each_entry_rcu(c, &h->list, list) {
977 if (c->type == LE_LINK && c->state == BT_CONNECT &&
978 test_bit(HCI_CONN_SCANNING, &c->flags)) {
979 rcu_read_unlock();
980 return true;
981 }
982 }
983
984 rcu_read_unlock();
985
986 return false;
987 }
988
989 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
990 * controller based address resolution to be able to reconfigure
991 * resolving list.
992 */
hci_req_add_le_passive_scan(struct hci_request * req)993 void hci_req_add_le_passive_scan(struct hci_request *req)
994 {
995 struct hci_dev *hdev = req->hdev;
996 u8 own_addr_type;
997 u8 filter_policy;
998 u16 window, interval;
999 /* Background scanning should run with address resolution */
1000 bool addr_resolv = true;
1001
1002 if (hdev->scanning_paused) {
1003 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1004 return;
1005 }
1006
1007 /* Set require_privacy to false since no SCAN_REQ are send
1008 * during passive scanning. Not using an non-resolvable address
1009 * here is important so that peer devices using direct
1010 * advertising with our address will be correctly reported
1011 * by the controller.
1012 */
1013 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1014 &own_addr_type))
1015 return;
1016
1017 /* Adding or removing entries from the white list must
1018 * happen before enabling scanning. The controller does
1019 * not allow white list modification while scanning.
1020 */
1021 filter_policy = update_white_list(req);
1022
1023 /* When the controller is using random resolvable addresses and
1024 * with that having LE privacy enabled, then controllers with
1025 * Extended Scanner Filter Policies support can now enable support
1026 * for handling directed advertising.
1027 *
1028 * So instead of using filter polices 0x00 (no whitelist)
1029 * and 0x01 (whitelist enabled) use the new filter policies
1030 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1031 */
1032 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1033 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1034 filter_policy |= 0x02;
1035
1036 if (hdev->suspended) {
1037 window = hdev->le_scan_window_suspend;
1038 interval = hdev->le_scan_int_suspend;
1039 } else if (hci_is_le_conn_scanning(hdev)) {
1040 window = hdev->le_scan_window_connect;
1041 interval = hdev->le_scan_int_connect;
1042 } else if (hci_is_adv_monitoring(hdev)) {
1043 window = hdev->le_scan_window_adv_monitor;
1044 interval = hdev->le_scan_int_adv_monitor;
1045 } else {
1046 window = hdev->le_scan_window;
1047 interval = hdev->le_scan_interval;
1048 }
1049
1050 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1051 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1052 own_addr_type, filter_policy, addr_resolv);
1053 }
1054
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1055 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1056 {
1057 struct adv_info *adv_instance;
1058
1059 /* Instance 0x00 always set local name */
1060 if (instance == 0x00)
1061 return 1;
1062
1063 adv_instance = hci_find_adv_instance(hdev, instance);
1064 if (!adv_instance)
1065 return 0;
1066
1067 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1068 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1069 return 1;
1070
1071 return adv_instance->scan_rsp_len;
1072 }
1073
hci_req_clear_event_filter(struct hci_request * req)1074 static void hci_req_clear_event_filter(struct hci_request *req)
1075 {
1076 struct hci_cp_set_event_filter f;
1077
1078 memset(&f, 0, sizeof(f));
1079 f.flt_type = HCI_FLT_CLEAR_ALL;
1080 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1081
1082 /* Update page scan state (since we may have modified it when setting
1083 * the event filter).
1084 */
1085 __hci_req_update_scan(req);
1086 }
1087
hci_req_set_event_filter(struct hci_request * req)1088 static void hci_req_set_event_filter(struct hci_request *req)
1089 {
1090 struct bdaddr_list_with_flags *b;
1091 struct hci_cp_set_event_filter f;
1092 struct hci_dev *hdev = req->hdev;
1093 u8 scan = SCAN_DISABLED;
1094
1095 /* Always clear event filter when starting */
1096 hci_req_clear_event_filter(req);
1097
1098 list_for_each_entry(b, &hdev->whitelist, list) {
1099 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1100 b->current_flags))
1101 continue;
1102
1103 memset(&f, 0, sizeof(f));
1104 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1105 f.flt_type = HCI_FLT_CONN_SETUP;
1106 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1107 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1108
1109 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1110 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1111 scan = SCAN_PAGE;
1112 }
1113
1114 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1115 }
1116
hci_req_config_le_suspend_scan(struct hci_request * req)1117 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1118 {
1119 /* Before changing params disable scan if enabled */
1120 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1121 hci_req_add_le_scan_disable(req, false);
1122
1123 /* Configure params and enable scanning */
1124 hci_req_add_le_passive_scan(req);
1125
1126 /* Block suspend notifier on response */
1127 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1128 }
1129
cancel_adv_timeout(struct hci_dev * hdev)1130 static void cancel_adv_timeout(struct hci_dev *hdev)
1131 {
1132 if (hdev->adv_instance_timeout) {
1133 hdev->adv_instance_timeout = 0;
1134 cancel_delayed_work(&hdev->adv_instance_expire);
1135 }
1136 }
1137
1138 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1139 static void hci_suspend_adv_instances(struct hci_request *req)
1140 {
1141 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1142
1143 /* Call to disable any advertisements active on the controller.
1144 * This will succeed even if no advertisements are configured.
1145 */
1146 __hci_req_disable_advertising(req);
1147
1148 /* If we are using software rotation, pause the loop */
1149 if (!ext_adv_capable(req->hdev))
1150 cancel_adv_timeout(req->hdev);
1151 }
1152
1153 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1154 static void hci_resume_adv_instances(struct hci_request *req)
1155 {
1156 struct adv_info *adv;
1157
1158 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1159
1160 if (ext_adv_capable(req->hdev)) {
1161 /* Call for each tracked instance to be re-enabled */
1162 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1163 __hci_req_enable_ext_advertising(req,
1164 adv->instance);
1165 }
1166
1167 } else {
1168 /* Schedule for most recent instance to be restarted and begin
1169 * the software rotation loop
1170 */
1171 __hci_req_schedule_adv_instance(req,
1172 req->hdev->cur_adv_instance,
1173 true);
1174 }
1175 }
1176
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1177 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1178 {
1179 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1180 status);
1181 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1182 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1183 wake_up(&hdev->suspend_wait_q);
1184 }
1185 }
1186
1187 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1188 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1189 {
1190 int old_state;
1191 struct hci_conn *conn;
1192 struct hci_request req;
1193 u8 page_scan;
1194 int disconnect_counter;
1195
1196 if (next == hdev->suspend_state) {
1197 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1198 goto done;
1199 }
1200
1201 hdev->suspend_state = next;
1202 hci_req_init(&req, hdev);
1203
1204 if (next == BT_SUSPEND_DISCONNECT) {
1205 /* Mark device as suspended */
1206 hdev->suspended = true;
1207
1208 /* Pause discovery if not already stopped */
1209 old_state = hdev->discovery.state;
1210 if (old_state != DISCOVERY_STOPPED) {
1211 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1212 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1213 queue_work(hdev->req_workqueue, &hdev->discov_update);
1214 }
1215
1216 hdev->discovery_paused = true;
1217 hdev->discovery_old_state = old_state;
1218
1219 /* Stop directed advertising */
1220 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1221 if (old_state) {
1222 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1223 cancel_delayed_work(&hdev->discov_off);
1224 queue_delayed_work(hdev->req_workqueue,
1225 &hdev->discov_off, 0);
1226 }
1227
1228 /* Pause other advertisements */
1229 if (hdev->adv_instance_cnt)
1230 hci_suspend_adv_instances(&req);
1231
1232 hdev->advertising_paused = true;
1233 hdev->advertising_old_state = old_state;
1234 /* Disable page scan */
1235 page_scan = SCAN_DISABLED;
1236 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1237
1238 /* Disable LE passive scan if enabled */
1239 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1240 hci_req_add_le_scan_disable(&req, false);
1241
1242 /* Mark task needing completion */
1243 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1244
1245 /* Prevent disconnects from causing scanning to be re-enabled */
1246 hdev->scanning_paused = true;
1247
1248 /* Run commands before disconnecting */
1249 hci_req_run(&req, suspend_req_complete);
1250
1251 disconnect_counter = 0;
1252 /* Soft disconnect everything (power off) */
1253 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1254 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1255 disconnect_counter++;
1256 }
1257
1258 if (disconnect_counter > 0) {
1259 bt_dev_dbg(hdev,
1260 "Had %d disconnects. Will wait on them",
1261 disconnect_counter);
1262 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1263 }
1264 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1265 /* Unpause to take care of updating scanning params */
1266 hdev->scanning_paused = false;
1267 /* Enable event filter for paired devices */
1268 hci_req_set_event_filter(&req);
1269 /* Enable passive scan at lower duty cycle */
1270 hci_req_config_le_suspend_scan(&req);
1271 /* Pause scan changes again. */
1272 hdev->scanning_paused = true;
1273 hci_req_run(&req, suspend_req_complete);
1274 } else {
1275 hdev->suspended = false;
1276 hdev->scanning_paused = false;
1277
1278 hci_req_clear_event_filter(&req);
1279 /* Reset passive/background scanning to normal */
1280 hci_req_config_le_suspend_scan(&req);
1281
1282 /* Unpause directed advertising */
1283 hdev->advertising_paused = false;
1284 if (hdev->advertising_old_state) {
1285 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1286 hdev->suspend_tasks);
1287 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1288 queue_work(hdev->req_workqueue,
1289 &hdev->discoverable_update);
1290 hdev->advertising_old_state = 0;
1291 }
1292
1293 /* Resume other advertisements */
1294 if (hdev->adv_instance_cnt)
1295 hci_resume_adv_instances(&req);
1296
1297 /* Unpause discovery */
1298 hdev->discovery_paused = false;
1299 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1300 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1301 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1302 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1303 queue_work(hdev->req_workqueue, &hdev->discov_update);
1304 }
1305
1306 hci_req_run(&req, suspend_req_complete);
1307 }
1308
1309 hdev->suspend_state = next;
1310
1311 done:
1312 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1313 wake_up(&hdev->suspend_wait_q);
1314 }
1315
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1316 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1317 {
1318 u8 instance = hdev->cur_adv_instance;
1319 struct adv_info *adv_instance;
1320
1321 /* Instance 0x00 always set local name */
1322 if (instance == 0x00)
1323 return 1;
1324
1325 adv_instance = hci_find_adv_instance(hdev, instance);
1326 if (!adv_instance)
1327 return 0;
1328
1329 /* TODO: Take into account the "appearance" and "local-name" flags here.
1330 * These are currently being ignored as they are not supported.
1331 */
1332 return adv_instance->scan_rsp_len;
1333 }
1334
__hci_req_disable_advertising(struct hci_request * req)1335 void __hci_req_disable_advertising(struct hci_request *req)
1336 {
1337 if (ext_adv_capable(req->hdev)) {
1338 __hci_req_disable_ext_adv_instance(req, 0x00);
1339
1340 } else {
1341 u8 enable = 0x00;
1342
1343 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1344 }
1345 }
1346
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1347 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1348 {
1349 u32 flags;
1350 struct adv_info *adv_instance;
1351
1352 if (instance == 0x00) {
1353 /* Instance 0 always manages the "Tx Power" and "Flags"
1354 * fields
1355 */
1356 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1357
1358 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1359 * corresponds to the "connectable" instance flag.
1360 */
1361 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1362 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1363
1364 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1365 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1366 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1367 flags |= MGMT_ADV_FLAG_DISCOV;
1368
1369 return flags;
1370 }
1371
1372 adv_instance = hci_find_adv_instance(hdev, instance);
1373
1374 /* Return 0 when we got an invalid instance identifier. */
1375 if (!adv_instance)
1376 return 0;
1377
1378 return adv_instance->flags;
1379 }
1380
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1381 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1382 {
1383 /* If privacy is not enabled don't use RPA */
1384 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1385 return false;
1386
1387 /* If basic privacy mode is enabled use RPA */
1388 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1389 return true;
1390
1391 /* If limited privacy mode is enabled don't use RPA if we're
1392 * both discoverable and bondable.
1393 */
1394 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1395 hci_dev_test_flag(hdev, HCI_BONDABLE))
1396 return false;
1397
1398 /* We're neither bondable nor discoverable in the limited
1399 * privacy mode, therefore use RPA.
1400 */
1401 return true;
1402 }
1403
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1404 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1405 {
1406 /* If there is no connection we are OK to advertise. */
1407 if (hci_conn_num(hdev, LE_LINK) == 0)
1408 return true;
1409
1410 /* Check le_states if there is any connection in slave role. */
1411 if (hdev->conn_hash.le_num_slave > 0) {
1412 /* Slave connection state and non connectable mode bit 20. */
1413 if (!connectable && !(hdev->le_states[2] & 0x10))
1414 return false;
1415
1416 /* Slave connection state and connectable mode bit 38
1417 * and scannable bit 21.
1418 */
1419 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1420 !(hdev->le_states[2] & 0x20)))
1421 return false;
1422 }
1423
1424 /* Check le_states if there is any connection in master role. */
1425 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1426 /* Master connection state and non connectable mode bit 18. */
1427 if (!connectable && !(hdev->le_states[2] & 0x02))
1428 return false;
1429
1430 /* Master connection state and connectable mode bit 35 and
1431 * scannable 19.
1432 */
1433 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1434 !(hdev->le_states[2] & 0x08)))
1435 return false;
1436 }
1437
1438 return true;
1439 }
1440
__hci_req_enable_advertising(struct hci_request * req)1441 void __hci_req_enable_advertising(struct hci_request *req)
1442 {
1443 struct hci_dev *hdev = req->hdev;
1444 struct hci_cp_le_set_adv_param cp;
1445 u8 own_addr_type, enable = 0x01;
1446 bool connectable;
1447 u16 adv_min_interval, adv_max_interval;
1448 u32 flags;
1449
1450 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1451
1452 /* If the "connectable" instance flag was not set, then choose between
1453 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1454 */
1455 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1456 mgmt_get_connectable(hdev);
1457
1458 if (!is_advertising_allowed(hdev, connectable))
1459 return;
1460
1461 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1462 __hci_req_disable_advertising(req);
1463
1464 /* Clear the HCI_LE_ADV bit temporarily so that the
1465 * hci_update_random_address knows that it's safe to go ahead
1466 * and write a new random address. The flag will be set back on
1467 * as soon as the SET_ADV_ENABLE HCI command completes.
1468 */
1469 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1470
1471 /* Set require_privacy to true only when non-connectable
1472 * advertising is used. In that case it is fine to use a
1473 * non-resolvable private address.
1474 */
1475 if (hci_update_random_address(req, !connectable,
1476 adv_use_rpa(hdev, flags),
1477 &own_addr_type) < 0)
1478 return;
1479
1480 memset(&cp, 0, sizeof(cp));
1481
1482 if (connectable) {
1483 cp.type = LE_ADV_IND;
1484
1485 adv_min_interval = hdev->le_adv_min_interval;
1486 adv_max_interval = hdev->le_adv_max_interval;
1487 } else {
1488 if (get_cur_adv_instance_scan_rsp_len(hdev))
1489 cp.type = LE_ADV_SCAN_IND;
1490 else
1491 cp.type = LE_ADV_NONCONN_IND;
1492
1493 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1494 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1495 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1496 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1497 } else {
1498 adv_min_interval = hdev->le_adv_min_interval;
1499 adv_max_interval = hdev->le_adv_max_interval;
1500 }
1501 }
1502
1503 cp.min_interval = cpu_to_le16(adv_min_interval);
1504 cp.max_interval = cpu_to_le16(adv_max_interval);
1505 cp.own_address_type = own_addr_type;
1506 cp.channel_map = hdev->le_adv_channel_map;
1507
1508 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1509
1510 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1511 }
1512
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1513 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1514 {
1515 size_t short_len;
1516 size_t complete_len;
1517
1518 /* no space left for name (+ NULL + type + len) */
1519 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1520 return ad_len;
1521
1522 /* use complete name if present and fits */
1523 complete_len = strlen(hdev->dev_name);
1524 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1525 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1526 hdev->dev_name, complete_len + 1);
1527
1528 /* use short name if present */
1529 short_len = strlen(hdev->short_name);
1530 if (short_len)
1531 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1532 hdev->short_name, short_len + 1);
1533
1534 /* use shortened full name if present, we already know that name
1535 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1536 */
1537 if (complete_len) {
1538 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1539
1540 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1541 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1542
1543 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1544 sizeof(name));
1545 }
1546
1547 return ad_len;
1548 }
1549
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1550 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1551 {
1552 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1553 }
1554
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1555 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1556 {
1557 u8 scan_rsp_len = 0;
1558
1559 if (hdev->appearance) {
1560 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1561 }
1562
1563 return append_local_name(hdev, ptr, scan_rsp_len);
1564 }
1565
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1566 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1567 u8 *ptr)
1568 {
1569 struct adv_info *adv_instance;
1570 u32 instance_flags;
1571 u8 scan_rsp_len = 0;
1572
1573 adv_instance = hci_find_adv_instance(hdev, instance);
1574 if (!adv_instance)
1575 return 0;
1576
1577 instance_flags = adv_instance->flags;
1578
1579 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1580 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1581 }
1582
1583 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1584 adv_instance->scan_rsp_len);
1585
1586 scan_rsp_len += adv_instance->scan_rsp_len;
1587
1588 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1589 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1590
1591 return scan_rsp_len;
1592 }
1593
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1594 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1595 {
1596 struct hci_dev *hdev = req->hdev;
1597 u8 len;
1598
1599 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1600 return;
1601
1602 if (ext_adv_capable(hdev)) {
1603 struct hci_cp_le_set_ext_scan_rsp_data cp;
1604
1605 memset(&cp, 0, sizeof(cp));
1606
1607 if (instance)
1608 len = create_instance_scan_rsp_data(hdev, instance,
1609 cp.data);
1610 else
1611 len = create_default_scan_rsp_data(hdev, cp.data);
1612
1613 if (hdev->scan_rsp_data_len == len &&
1614 !memcmp(cp.data, hdev->scan_rsp_data, len))
1615 return;
1616
1617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1618 hdev->scan_rsp_data_len = len;
1619
1620 cp.handle = instance;
1621 cp.length = len;
1622 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1623 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1624
1625 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1626 &cp);
1627 } else {
1628 struct hci_cp_le_set_scan_rsp_data cp;
1629
1630 memset(&cp, 0, sizeof(cp));
1631
1632 if (instance)
1633 len = create_instance_scan_rsp_data(hdev, instance,
1634 cp.data);
1635 else
1636 len = create_default_scan_rsp_data(hdev, cp.data);
1637
1638 if (hdev->scan_rsp_data_len == len &&
1639 !memcmp(cp.data, hdev->scan_rsp_data, len))
1640 return;
1641
1642 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1643 hdev->scan_rsp_data_len = len;
1644
1645 cp.length = len;
1646
1647 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1648 }
1649 }
1650
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1651 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1652 {
1653 struct adv_info *adv_instance = NULL;
1654 u8 ad_len = 0, flags = 0;
1655 u32 instance_flags;
1656
1657 /* Return 0 when the current instance identifier is invalid. */
1658 if (instance) {
1659 adv_instance = hci_find_adv_instance(hdev, instance);
1660 if (!adv_instance)
1661 return 0;
1662 }
1663
1664 instance_flags = get_adv_instance_flags(hdev, instance);
1665
1666 /* If instance already has the flags set skip adding it once
1667 * again.
1668 */
1669 if (adv_instance && eir_get_data(adv_instance->adv_data,
1670 adv_instance->adv_data_len, EIR_FLAGS,
1671 NULL))
1672 goto skip_flags;
1673
1674 /* The Add Advertising command allows userspace to set both the general
1675 * and limited discoverable flags.
1676 */
1677 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1678 flags |= LE_AD_GENERAL;
1679
1680 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1681 flags |= LE_AD_LIMITED;
1682
1683 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1684 flags |= LE_AD_NO_BREDR;
1685
1686 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1687 /* If a discovery flag wasn't provided, simply use the global
1688 * settings.
1689 */
1690 if (!flags)
1691 flags |= mgmt_get_adv_discov_flags(hdev);
1692
1693 /* If flags would still be empty, then there is no need to
1694 * include the "Flags" AD field".
1695 */
1696 if (flags) {
1697 ptr[0] = 0x02;
1698 ptr[1] = EIR_FLAGS;
1699 ptr[2] = flags;
1700
1701 ad_len += 3;
1702 ptr += 3;
1703 }
1704 }
1705
1706 skip_flags:
1707 if (adv_instance) {
1708 memcpy(ptr, adv_instance->adv_data,
1709 adv_instance->adv_data_len);
1710 ad_len += adv_instance->adv_data_len;
1711 ptr += adv_instance->adv_data_len;
1712 }
1713
1714 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1715 s8 adv_tx_power;
1716
1717 if (ext_adv_capable(hdev)) {
1718 if (adv_instance)
1719 adv_tx_power = adv_instance->tx_power;
1720 else
1721 adv_tx_power = hdev->adv_tx_power;
1722 } else {
1723 adv_tx_power = hdev->adv_tx_power;
1724 }
1725
1726 /* Provide Tx Power only if we can provide a valid value for it */
1727 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1728 ptr[0] = 0x02;
1729 ptr[1] = EIR_TX_POWER;
1730 ptr[2] = (u8)adv_tx_power;
1731
1732 ad_len += 3;
1733 ptr += 3;
1734 }
1735 }
1736
1737 return ad_len;
1738 }
1739
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1740 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1741 {
1742 struct hci_dev *hdev = req->hdev;
1743 u8 len;
1744
1745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1746 return;
1747
1748 if (ext_adv_capable(hdev)) {
1749 struct hci_cp_le_set_ext_adv_data cp;
1750
1751 memset(&cp, 0, sizeof(cp));
1752
1753 len = create_instance_adv_data(hdev, instance, cp.data);
1754
1755 /* There's nothing to do if the data hasn't changed */
1756 if (hdev->adv_data_len == len &&
1757 memcmp(cp.data, hdev->adv_data, len) == 0)
1758 return;
1759
1760 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1761 hdev->adv_data_len = len;
1762
1763 cp.length = len;
1764 cp.handle = instance;
1765 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1766 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1767
1768 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1769 } else {
1770 struct hci_cp_le_set_adv_data cp;
1771
1772 memset(&cp, 0, sizeof(cp));
1773
1774 len = create_instance_adv_data(hdev, instance, cp.data);
1775
1776 /* There's nothing to do if the data hasn't changed */
1777 if (hdev->adv_data_len == len &&
1778 memcmp(cp.data, hdev->adv_data, len) == 0)
1779 return;
1780
1781 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1782 hdev->adv_data_len = len;
1783
1784 cp.length = len;
1785
1786 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1787 }
1788 }
1789
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1790 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1791 {
1792 struct hci_request req;
1793
1794 hci_req_init(&req, hdev);
1795 __hci_req_update_adv_data(&req, instance);
1796
1797 return hci_req_run(&req, NULL);
1798 }
1799
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1800 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1801 u16 opcode)
1802 {
1803 BT_DBG("%s status %u", hdev->name, status);
1804 }
1805
hci_req_disable_address_resolution(struct hci_dev * hdev)1806 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1807 {
1808 struct hci_request req;
1809 __u8 enable = 0x00;
1810
1811 if (!use_ll_privacy(hdev) &&
1812 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1813 return;
1814
1815 hci_req_init(&req, hdev);
1816
1817 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1818
1819 hci_req_run(&req, enable_addr_resolution_complete);
1820 }
1821
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1822 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1823 {
1824 BT_DBG("%s status %u", hdev->name, status);
1825 }
1826
hci_req_reenable_advertising(struct hci_dev * hdev)1827 void hci_req_reenable_advertising(struct hci_dev *hdev)
1828 {
1829 struct hci_request req;
1830
1831 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1832 list_empty(&hdev->adv_instances))
1833 return;
1834
1835 hci_req_init(&req, hdev);
1836
1837 if (hdev->cur_adv_instance) {
1838 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1839 true);
1840 } else {
1841 if (ext_adv_capable(hdev)) {
1842 __hci_req_start_ext_adv(&req, 0x00);
1843 } else {
1844 __hci_req_update_adv_data(&req, 0x00);
1845 __hci_req_update_scan_rsp_data(&req, 0x00);
1846 __hci_req_enable_advertising(&req);
1847 }
1848 }
1849
1850 hci_req_run(&req, adv_enable_complete);
1851 }
1852
adv_timeout_expire(struct work_struct * work)1853 static void adv_timeout_expire(struct work_struct *work)
1854 {
1855 struct hci_dev *hdev = container_of(work, struct hci_dev,
1856 adv_instance_expire.work);
1857
1858 struct hci_request req;
1859 u8 instance;
1860
1861 BT_DBG("%s", hdev->name);
1862
1863 hci_dev_lock(hdev);
1864
1865 hdev->adv_instance_timeout = 0;
1866
1867 instance = hdev->cur_adv_instance;
1868 if (instance == 0x00)
1869 goto unlock;
1870
1871 hci_req_init(&req, hdev);
1872
1873 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1874
1875 if (list_empty(&hdev->adv_instances))
1876 __hci_req_disable_advertising(&req);
1877
1878 hci_req_run(&req, NULL);
1879
1880 unlock:
1881 hci_dev_unlock(hdev);
1882 }
1883
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1884 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1885 bool use_rpa, struct adv_info *adv_instance,
1886 u8 *own_addr_type, bdaddr_t *rand_addr)
1887 {
1888 int err;
1889
1890 bacpy(rand_addr, BDADDR_ANY);
1891
1892 /* If privacy is enabled use a resolvable private address. If
1893 * current RPA has expired then generate a new one.
1894 */
1895 if (use_rpa) {
1896 int to;
1897
1898 /* If Controller supports LL Privacy use own address type is
1899 * 0x03
1900 */
1901 if (use_ll_privacy(hdev))
1902 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1903 else
1904 *own_addr_type = ADDR_LE_DEV_RANDOM;
1905
1906 if (adv_instance) {
1907 if (!adv_instance->rpa_expired &&
1908 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1909 return 0;
1910
1911 adv_instance->rpa_expired = false;
1912 } else {
1913 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1914 !bacmp(&hdev->random_addr, &hdev->rpa))
1915 return 0;
1916 }
1917
1918 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1919 if (err < 0) {
1920 bt_dev_err(hdev, "failed to generate new RPA");
1921 return err;
1922 }
1923
1924 bacpy(rand_addr, &hdev->rpa);
1925
1926 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1927 if (adv_instance)
1928 queue_delayed_work(hdev->workqueue,
1929 &adv_instance->rpa_expired_cb, to);
1930 else
1931 queue_delayed_work(hdev->workqueue,
1932 &hdev->rpa_expired, to);
1933
1934 return 0;
1935 }
1936
1937 /* In case of required privacy without resolvable private address,
1938 * use an non-resolvable private address. This is useful for
1939 * non-connectable advertising.
1940 */
1941 if (require_privacy) {
1942 bdaddr_t nrpa;
1943
1944 while (true) {
1945 /* The non-resolvable private address is generated
1946 * from random six bytes with the two most significant
1947 * bits cleared.
1948 */
1949 get_random_bytes(&nrpa, 6);
1950 nrpa.b[5] &= 0x3f;
1951
1952 /* The non-resolvable private address shall not be
1953 * equal to the public address.
1954 */
1955 if (bacmp(&hdev->bdaddr, &nrpa))
1956 break;
1957 }
1958
1959 *own_addr_type = ADDR_LE_DEV_RANDOM;
1960 bacpy(rand_addr, &nrpa);
1961
1962 return 0;
1963 }
1964
1965 /* No privacy so use a public address. */
1966 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1967
1968 return 0;
1969 }
1970
__hci_req_clear_ext_adv_sets(struct hci_request * req)1971 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1972 {
1973 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1974 }
1975
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1976 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1977 {
1978 struct hci_cp_le_set_ext_adv_params cp;
1979 struct hci_dev *hdev = req->hdev;
1980 bool connectable;
1981 u32 flags;
1982 bdaddr_t random_addr;
1983 u8 own_addr_type;
1984 int err;
1985 struct adv_info *adv_instance;
1986 bool secondary_adv;
1987
1988 if (instance > 0) {
1989 adv_instance = hci_find_adv_instance(hdev, instance);
1990 if (!adv_instance)
1991 return -EINVAL;
1992 } else {
1993 adv_instance = NULL;
1994 }
1995
1996 flags = get_adv_instance_flags(hdev, instance);
1997
1998 /* If the "connectable" instance flag was not set, then choose between
1999 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2000 */
2001 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2002 mgmt_get_connectable(hdev);
2003
2004 if (!is_advertising_allowed(hdev, connectable))
2005 return -EPERM;
2006
2007 /* Set require_privacy to true only when non-connectable
2008 * advertising is used. In that case it is fine to use a
2009 * non-resolvable private address.
2010 */
2011 err = hci_get_random_address(hdev, !connectable,
2012 adv_use_rpa(hdev, flags), adv_instance,
2013 &own_addr_type, &random_addr);
2014 if (err < 0)
2015 return err;
2016
2017 memset(&cp, 0, sizeof(cp));
2018
2019 /* In ext adv set param interval is 3 octets */
2020 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2021 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2022
2023 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2024
2025 if (connectable) {
2026 if (secondary_adv)
2027 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2028 else
2029 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2030 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2031 if (secondary_adv)
2032 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2033 else
2034 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2035 } else {
2036 if (secondary_adv)
2037 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2038 else
2039 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2040 }
2041
2042 cp.own_addr_type = own_addr_type;
2043 cp.channel_map = hdev->le_adv_channel_map;
2044 cp.tx_power = 127;
2045 cp.handle = instance;
2046
2047 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2048 cp.primary_phy = HCI_ADV_PHY_1M;
2049 cp.secondary_phy = HCI_ADV_PHY_2M;
2050 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2051 cp.primary_phy = HCI_ADV_PHY_CODED;
2052 cp.secondary_phy = HCI_ADV_PHY_CODED;
2053 } else {
2054 /* In all other cases use 1M */
2055 cp.primary_phy = HCI_ADV_PHY_1M;
2056 cp.secondary_phy = HCI_ADV_PHY_1M;
2057 }
2058
2059 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2060
2061 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2062 bacmp(&random_addr, BDADDR_ANY)) {
2063 struct hci_cp_le_set_adv_set_rand_addr cp;
2064
2065 /* Check if random address need to be updated */
2066 if (adv_instance) {
2067 if (!bacmp(&random_addr, &adv_instance->random_addr))
2068 return 0;
2069 } else {
2070 if (!bacmp(&random_addr, &hdev->random_addr))
2071 return 0;
2072 }
2073
2074 memset(&cp, 0, sizeof(cp));
2075
2076 cp.handle = instance;
2077 bacpy(&cp.bdaddr, &random_addr);
2078
2079 hci_req_add(req,
2080 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2081 sizeof(cp), &cp);
2082 }
2083
2084 return 0;
2085 }
2086
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2087 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2088 {
2089 struct hci_dev *hdev = req->hdev;
2090 struct hci_cp_le_set_ext_adv_enable *cp;
2091 struct hci_cp_ext_adv_set *adv_set;
2092 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2093 struct adv_info *adv_instance;
2094
2095 if (instance > 0) {
2096 adv_instance = hci_find_adv_instance(hdev, instance);
2097 if (!adv_instance)
2098 return -EINVAL;
2099 } else {
2100 adv_instance = NULL;
2101 }
2102
2103 cp = (void *) data;
2104 adv_set = (void *) cp->data;
2105
2106 memset(cp, 0, sizeof(*cp));
2107
2108 cp->enable = 0x01;
2109 cp->num_of_sets = 0x01;
2110
2111 memset(adv_set, 0, sizeof(*adv_set));
2112
2113 adv_set->handle = instance;
2114
2115 /* Set duration per instance since controller is responsible for
2116 * scheduling it.
2117 */
2118 if (adv_instance && adv_instance->timeout) {
2119 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2120
2121 /* Time = N * 10 ms */
2122 adv_set->duration = cpu_to_le16(duration / 10);
2123 }
2124
2125 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2126 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2127 data);
2128
2129 return 0;
2130 }
2131
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2132 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2133 {
2134 struct hci_dev *hdev = req->hdev;
2135 struct hci_cp_le_set_ext_adv_enable *cp;
2136 struct hci_cp_ext_adv_set *adv_set;
2137 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2138 u8 req_size;
2139
2140 /* If request specifies an instance that doesn't exist, fail */
2141 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2142 return -EINVAL;
2143
2144 memset(data, 0, sizeof(data));
2145
2146 cp = (void *)data;
2147 adv_set = (void *)cp->data;
2148
2149 /* Instance 0x00 indicates all advertising instances will be disabled */
2150 cp->num_of_sets = !!instance;
2151 cp->enable = 0x00;
2152
2153 adv_set->handle = instance;
2154
2155 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2156 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2157
2158 return 0;
2159 }
2160
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2161 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2162 {
2163 struct hci_dev *hdev = req->hdev;
2164
2165 /* If request specifies an instance that doesn't exist, fail */
2166 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2167 return -EINVAL;
2168
2169 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2170
2171 return 0;
2172 }
2173
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2174 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2175 {
2176 struct hci_dev *hdev = req->hdev;
2177 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2178 int err;
2179
2180 /* If instance isn't pending, the chip knows about it, and it's safe to
2181 * disable
2182 */
2183 if (adv_instance && !adv_instance->pending)
2184 __hci_req_disable_ext_adv_instance(req, instance);
2185
2186 err = __hci_req_setup_ext_adv_instance(req, instance);
2187 if (err < 0)
2188 return err;
2189
2190 __hci_req_update_scan_rsp_data(req, instance);
2191 __hci_req_enable_ext_advertising(req, instance);
2192
2193 return 0;
2194 }
2195
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2196 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2197 bool force)
2198 {
2199 struct hci_dev *hdev = req->hdev;
2200 struct adv_info *adv_instance = NULL;
2201 u16 timeout;
2202
2203 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2204 list_empty(&hdev->adv_instances))
2205 return -EPERM;
2206
2207 if (hdev->adv_instance_timeout)
2208 return -EBUSY;
2209
2210 adv_instance = hci_find_adv_instance(hdev, instance);
2211 if (!adv_instance)
2212 return -ENOENT;
2213
2214 /* A zero timeout means unlimited advertising. As long as there is
2215 * only one instance, duration should be ignored. We still set a timeout
2216 * in case further instances are being added later on.
2217 *
2218 * If the remaining lifetime of the instance is more than the duration
2219 * then the timeout corresponds to the duration, otherwise it will be
2220 * reduced to the remaining instance lifetime.
2221 */
2222 if (adv_instance->timeout == 0 ||
2223 adv_instance->duration <= adv_instance->remaining_time)
2224 timeout = adv_instance->duration;
2225 else
2226 timeout = adv_instance->remaining_time;
2227
2228 /* The remaining time is being reduced unless the instance is being
2229 * advertised without time limit.
2230 */
2231 if (adv_instance->timeout)
2232 adv_instance->remaining_time =
2233 adv_instance->remaining_time - timeout;
2234
2235 /* Only use work for scheduling instances with legacy advertising */
2236 if (!ext_adv_capable(hdev)) {
2237 hdev->adv_instance_timeout = timeout;
2238 queue_delayed_work(hdev->req_workqueue,
2239 &hdev->adv_instance_expire,
2240 msecs_to_jiffies(timeout * 1000));
2241 }
2242
2243 /* If we're just re-scheduling the same instance again then do not
2244 * execute any HCI commands. This happens when a single instance is
2245 * being advertised.
2246 */
2247 if (!force && hdev->cur_adv_instance == instance &&
2248 hci_dev_test_flag(hdev, HCI_LE_ADV))
2249 return 0;
2250
2251 hdev->cur_adv_instance = instance;
2252 if (ext_adv_capable(hdev)) {
2253 __hci_req_start_ext_adv(req, instance);
2254 } else {
2255 __hci_req_update_adv_data(req, instance);
2256 __hci_req_update_scan_rsp_data(req, instance);
2257 __hci_req_enable_advertising(req);
2258 }
2259
2260 return 0;
2261 }
2262
2263 /* For a single instance:
2264 * - force == true: The instance will be removed even when its remaining
2265 * lifetime is not zero.
2266 * - force == false: the instance will be deactivated but kept stored unless
2267 * the remaining lifetime is zero.
2268 *
2269 * For instance == 0x00:
2270 * - force == true: All instances will be removed regardless of their timeout
2271 * setting.
2272 * - force == false: Only instances that have a timeout will be removed.
2273 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2274 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2275 struct hci_request *req, u8 instance,
2276 bool force)
2277 {
2278 struct adv_info *adv_instance, *n, *next_instance = NULL;
2279 int err;
2280 u8 rem_inst;
2281
2282 /* Cancel any timeout concerning the removed instance(s). */
2283 if (!instance || hdev->cur_adv_instance == instance)
2284 cancel_adv_timeout(hdev);
2285
2286 /* Get the next instance to advertise BEFORE we remove
2287 * the current one. This can be the same instance again
2288 * if there is only one instance.
2289 */
2290 if (instance && hdev->cur_adv_instance == instance)
2291 next_instance = hci_get_next_instance(hdev, instance);
2292
2293 if (instance == 0x00) {
2294 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2295 list) {
2296 if (!(force || adv_instance->timeout))
2297 continue;
2298
2299 rem_inst = adv_instance->instance;
2300 err = hci_remove_adv_instance(hdev, rem_inst);
2301 if (!err)
2302 mgmt_advertising_removed(sk, hdev, rem_inst);
2303 }
2304 } else {
2305 adv_instance = hci_find_adv_instance(hdev, instance);
2306
2307 if (force || (adv_instance && adv_instance->timeout &&
2308 !adv_instance->remaining_time)) {
2309 /* Don't advertise a removed instance. */
2310 if (next_instance &&
2311 next_instance->instance == instance)
2312 next_instance = NULL;
2313
2314 err = hci_remove_adv_instance(hdev, instance);
2315 if (!err)
2316 mgmt_advertising_removed(sk, hdev, instance);
2317 }
2318 }
2319
2320 if (!req || !hdev_is_powered(hdev) ||
2321 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2322 return;
2323
2324 if (next_instance && !ext_adv_capable(hdev))
2325 __hci_req_schedule_adv_instance(req, next_instance->instance,
2326 false);
2327 }
2328
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2329 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2330 {
2331 struct hci_dev *hdev = req->hdev;
2332
2333 /* If we're advertising or initiating an LE connection we can't
2334 * go ahead and change the random address at this time. This is
2335 * because the eventual initiator address used for the
2336 * subsequently created connection will be undefined (some
2337 * controllers use the new address and others the one we had
2338 * when the operation started).
2339 *
2340 * In this kind of scenario skip the update and let the random
2341 * address be updated at the next cycle.
2342 */
2343 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2344 hci_lookup_le_connect(hdev)) {
2345 BT_DBG("Deferring random address update");
2346 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2347 return;
2348 }
2349
2350 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2351 }
2352
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2353 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2354 bool use_rpa, u8 *own_addr_type)
2355 {
2356 struct hci_dev *hdev = req->hdev;
2357 int err;
2358
2359 /* If privacy is enabled use a resolvable private address. If
2360 * current RPA has expired or there is something else than
2361 * the current RPA in use, then generate a new one.
2362 */
2363 if (use_rpa) {
2364 int to;
2365
2366 /* If Controller supports LL Privacy use own address type is
2367 * 0x03
2368 */
2369 if (use_ll_privacy(hdev))
2370 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2371 else
2372 *own_addr_type = ADDR_LE_DEV_RANDOM;
2373
2374 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2375 !bacmp(&hdev->random_addr, &hdev->rpa))
2376 return 0;
2377
2378 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2379 if (err < 0) {
2380 bt_dev_err(hdev, "failed to generate new RPA");
2381 return err;
2382 }
2383
2384 set_random_addr(req, &hdev->rpa);
2385
2386 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2387 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2388
2389 return 0;
2390 }
2391
2392 /* In case of required privacy without resolvable private address,
2393 * use an non-resolvable private address. This is useful for active
2394 * scanning and non-connectable advertising.
2395 */
2396 if (require_privacy) {
2397 bdaddr_t nrpa;
2398
2399 while (true) {
2400 /* The non-resolvable private address is generated
2401 * from random six bytes with the two most significant
2402 * bits cleared.
2403 */
2404 get_random_bytes(&nrpa, 6);
2405 nrpa.b[5] &= 0x3f;
2406
2407 /* The non-resolvable private address shall not be
2408 * equal to the public address.
2409 */
2410 if (bacmp(&hdev->bdaddr, &nrpa))
2411 break;
2412 }
2413
2414 *own_addr_type = ADDR_LE_DEV_RANDOM;
2415 set_random_addr(req, &nrpa);
2416 return 0;
2417 }
2418
2419 /* If forcing static address is in use or there is no public
2420 * address use the static address as random address (but skip
2421 * the HCI command if the current random address is already the
2422 * static one.
2423 *
2424 * In case BR/EDR has been disabled on a dual-mode controller
2425 * and a static address has been configured, then use that
2426 * address instead of the public BR/EDR address.
2427 */
2428 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2429 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2430 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2431 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2432 *own_addr_type = ADDR_LE_DEV_RANDOM;
2433 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2434 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2435 &hdev->static_addr);
2436 return 0;
2437 }
2438
2439 /* Neither privacy nor static address is being used so use a
2440 * public address.
2441 */
2442 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2443
2444 return 0;
2445 }
2446
disconnected_whitelist_entries(struct hci_dev * hdev)2447 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2448 {
2449 struct bdaddr_list *b;
2450
2451 list_for_each_entry(b, &hdev->whitelist, list) {
2452 struct hci_conn *conn;
2453
2454 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2455 if (!conn)
2456 return true;
2457
2458 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2459 return true;
2460 }
2461
2462 return false;
2463 }
2464
__hci_req_update_scan(struct hci_request * req)2465 void __hci_req_update_scan(struct hci_request *req)
2466 {
2467 struct hci_dev *hdev = req->hdev;
2468 u8 scan;
2469
2470 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2471 return;
2472
2473 if (!hdev_is_powered(hdev))
2474 return;
2475
2476 if (mgmt_powering_down(hdev))
2477 return;
2478
2479 if (hdev->scanning_paused)
2480 return;
2481
2482 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2483 disconnected_whitelist_entries(hdev))
2484 scan = SCAN_PAGE;
2485 else
2486 scan = SCAN_DISABLED;
2487
2488 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2489 scan |= SCAN_INQUIRY;
2490
2491 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2492 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2493 return;
2494
2495 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2496 }
2497
update_scan(struct hci_request * req,unsigned long opt)2498 static int update_scan(struct hci_request *req, unsigned long opt)
2499 {
2500 hci_dev_lock(req->hdev);
2501 __hci_req_update_scan(req);
2502 hci_dev_unlock(req->hdev);
2503 return 0;
2504 }
2505
scan_update_work(struct work_struct * work)2506 static void scan_update_work(struct work_struct *work)
2507 {
2508 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2509
2510 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2511 }
2512
connectable_update(struct hci_request * req,unsigned long opt)2513 static int connectable_update(struct hci_request *req, unsigned long opt)
2514 {
2515 struct hci_dev *hdev = req->hdev;
2516
2517 hci_dev_lock(hdev);
2518
2519 __hci_req_update_scan(req);
2520
2521 /* If BR/EDR is not enabled and we disable advertising as a
2522 * by-product of disabling connectable, we need to update the
2523 * advertising flags.
2524 */
2525 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2526 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2527
2528 /* Update the advertising parameters if necessary */
2529 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2530 !list_empty(&hdev->adv_instances)) {
2531 if (ext_adv_capable(hdev))
2532 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2533 else
2534 __hci_req_enable_advertising(req);
2535 }
2536
2537 __hci_update_background_scan(req);
2538
2539 hci_dev_unlock(hdev);
2540
2541 return 0;
2542 }
2543
connectable_update_work(struct work_struct * work)2544 static void connectable_update_work(struct work_struct *work)
2545 {
2546 struct hci_dev *hdev = container_of(work, struct hci_dev,
2547 connectable_update);
2548 u8 status;
2549
2550 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2551 mgmt_set_connectable_complete(hdev, status);
2552 }
2553
get_service_classes(struct hci_dev * hdev)2554 static u8 get_service_classes(struct hci_dev *hdev)
2555 {
2556 struct bt_uuid *uuid;
2557 u8 val = 0;
2558
2559 list_for_each_entry(uuid, &hdev->uuids, list)
2560 val |= uuid->svc_hint;
2561
2562 return val;
2563 }
2564
__hci_req_update_class(struct hci_request * req)2565 void __hci_req_update_class(struct hci_request *req)
2566 {
2567 struct hci_dev *hdev = req->hdev;
2568 u8 cod[3];
2569
2570 BT_DBG("%s", hdev->name);
2571
2572 if (!hdev_is_powered(hdev))
2573 return;
2574
2575 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2576 return;
2577
2578 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2579 return;
2580
2581 cod[0] = hdev->minor_class;
2582 cod[1] = hdev->major_class;
2583 cod[2] = get_service_classes(hdev);
2584
2585 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2586 cod[1] |= 0x20;
2587
2588 if (memcmp(cod, hdev->dev_class, 3) == 0)
2589 return;
2590
2591 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2592 }
2593
write_iac(struct hci_request * req)2594 static void write_iac(struct hci_request *req)
2595 {
2596 struct hci_dev *hdev = req->hdev;
2597 struct hci_cp_write_current_iac_lap cp;
2598
2599 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2600 return;
2601
2602 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2603 /* Limited discoverable mode */
2604 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2605 cp.iac_lap[0] = 0x00; /* LIAC */
2606 cp.iac_lap[1] = 0x8b;
2607 cp.iac_lap[2] = 0x9e;
2608 cp.iac_lap[3] = 0x33; /* GIAC */
2609 cp.iac_lap[4] = 0x8b;
2610 cp.iac_lap[5] = 0x9e;
2611 } else {
2612 /* General discoverable mode */
2613 cp.num_iac = 1;
2614 cp.iac_lap[0] = 0x33; /* GIAC */
2615 cp.iac_lap[1] = 0x8b;
2616 cp.iac_lap[2] = 0x9e;
2617 }
2618
2619 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2620 (cp.num_iac * 3) + 1, &cp);
2621 }
2622
discoverable_update(struct hci_request * req,unsigned long opt)2623 static int discoverable_update(struct hci_request *req, unsigned long opt)
2624 {
2625 struct hci_dev *hdev = req->hdev;
2626
2627 hci_dev_lock(hdev);
2628
2629 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2630 write_iac(req);
2631 __hci_req_update_scan(req);
2632 __hci_req_update_class(req);
2633 }
2634
2635 /* Advertising instances don't use the global discoverable setting, so
2636 * only update AD if advertising was enabled using Set Advertising.
2637 */
2638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2639 __hci_req_update_adv_data(req, 0x00);
2640
2641 /* Discoverable mode affects the local advertising
2642 * address in limited privacy mode.
2643 */
2644 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2645 if (ext_adv_capable(hdev))
2646 __hci_req_start_ext_adv(req, 0x00);
2647 else
2648 __hci_req_enable_advertising(req);
2649 }
2650 }
2651
2652 hci_dev_unlock(hdev);
2653
2654 return 0;
2655 }
2656
discoverable_update_work(struct work_struct * work)2657 static void discoverable_update_work(struct work_struct *work)
2658 {
2659 struct hci_dev *hdev = container_of(work, struct hci_dev,
2660 discoverable_update);
2661 u8 status;
2662
2663 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2664 mgmt_set_discoverable_complete(hdev, status);
2665 }
2666
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2667 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2668 u8 reason)
2669 {
2670 switch (conn->state) {
2671 case BT_CONNECTED:
2672 case BT_CONFIG:
2673 if (conn->type == AMP_LINK) {
2674 struct hci_cp_disconn_phy_link cp;
2675
2676 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2677 cp.reason = reason;
2678 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2679 &cp);
2680 } else {
2681 struct hci_cp_disconnect dc;
2682
2683 dc.handle = cpu_to_le16(conn->handle);
2684 dc.reason = reason;
2685 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2686 }
2687
2688 conn->state = BT_DISCONN;
2689
2690 break;
2691 case BT_CONNECT:
2692 if (conn->type == LE_LINK) {
2693 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2694 break;
2695 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2696 0, NULL);
2697 } else if (conn->type == ACL_LINK) {
2698 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2699 break;
2700 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2701 6, &conn->dst);
2702 }
2703 break;
2704 case BT_CONNECT2:
2705 if (conn->type == ACL_LINK) {
2706 struct hci_cp_reject_conn_req rej;
2707
2708 bacpy(&rej.bdaddr, &conn->dst);
2709 rej.reason = reason;
2710
2711 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2712 sizeof(rej), &rej);
2713 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2714 struct hci_cp_reject_sync_conn_req rej;
2715
2716 bacpy(&rej.bdaddr, &conn->dst);
2717
2718 /* SCO rejection has its own limited set of
2719 * allowed error values (0x0D-0x0F) which isn't
2720 * compatible with most values passed to this
2721 * function. To be safe hard-code one of the
2722 * values that's suitable for SCO.
2723 */
2724 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2725
2726 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2727 sizeof(rej), &rej);
2728 }
2729 break;
2730 default:
2731 conn->state = BT_CLOSED;
2732 break;
2733 }
2734 }
2735
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2736 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2737 {
2738 if (status)
2739 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2740 }
2741
hci_abort_conn(struct hci_conn * conn,u8 reason)2742 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2743 {
2744 struct hci_request req;
2745 int err;
2746
2747 hci_req_init(&req, conn->hdev);
2748
2749 __hci_abort_conn(&req, conn, reason);
2750
2751 err = hci_req_run(&req, abort_conn_complete);
2752 if (err && err != -ENODATA) {
2753 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2754 return err;
2755 }
2756
2757 return 0;
2758 }
2759
update_bg_scan(struct hci_request * req,unsigned long opt)2760 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2761 {
2762 hci_dev_lock(req->hdev);
2763 __hci_update_background_scan(req);
2764 hci_dev_unlock(req->hdev);
2765 return 0;
2766 }
2767
bg_scan_update(struct work_struct * work)2768 static void bg_scan_update(struct work_struct *work)
2769 {
2770 struct hci_dev *hdev = container_of(work, struct hci_dev,
2771 bg_scan_update);
2772 struct hci_conn *conn;
2773 u8 status;
2774 int err;
2775
2776 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2777 if (!err)
2778 return;
2779
2780 hci_dev_lock(hdev);
2781
2782 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2783 if (conn)
2784 hci_le_conn_failed(conn, status);
2785
2786 hci_dev_unlock(hdev);
2787 }
2788
le_scan_disable(struct hci_request * req,unsigned long opt)2789 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2790 {
2791 hci_req_add_le_scan_disable(req, false);
2792 return 0;
2793 }
2794
bredr_inquiry(struct hci_request * req,unsigned long opt)2795 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2796 {
2797 u8 length = opt;
2798 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2799 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2800 struct hci_cp_inquiry cp;
2801
2802 BT_DBG("%s", req->hdev->name);
2803
2804 hci_dev_lock(req->hdev);
2805 hci_inquiry_cache_flush(req->hdev);
2806 hci_dev_unlock(req->hdev);
2807
2808 memset(&cp, 0, sizeof(cp));
2809
2810 if (req->hdev->discovery.limited)
2811 memcpy(&cp.lap, liac, sizeof(cp.lap));
2812 else
2813 memcpy(&cp.lap, giac, sizeof(cp.lap));
2814
2815 cp.length = length;
2816
2817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2818
2819 return 0;
2820 }
2821
le_scan_disable_work(struct work_struct * work)2822 static void le_scan_disable_work(struct work_struct *work)
2823 {
2824 struct hci_dev *hdev = container_of(work, struct hci_dev,
2825 le_scan_disable.work);
2826 u8 status;
2827
2828 BT_DBG("%s", hdev->name);
2829
2830 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2831 return;
2832
2833 cancel_delayed_work(&hdev->le_scan_restart);
2834
2835 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2836 if (status) {
2837 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2838 status);
2839 return;
2840 }
2841
2842 hdev->discovery.scan_start = 0;
2843
2844 /* If we were running LE only scan, change discovery state. If
2845 * we were running both LE and BR/EDR inquiry simultaneously,
2846 * and BR/EDR inquiry is already finished, stop discovery,
2847 * otherwise BR/EDR inquiry will stop discovery when finished.
2848 * If we will resolve remote device name, do not change
2849 * discovery state.
2850 */
2851
2852 if (hdev->discovery.type == DISCOV_TYPE_LE)
2853 goto discov_stopped;
2854
2855 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2856 return;
2857
2858 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2859 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2860 hdev->discovery.state != DISCOVERY_RESOLVING)
2861 goto discov_stopped;
2862
2863 return;
2864 }
2865
2866 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2867 HCI_CMD_TIMEOUT, &status);
2868 if (status) {
2869 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2870 goto discov_stopped;
2871 }
2872
2873 return;
2874
2875 discov_stopped:
2876 hci_dev_lock(hdev);
2877 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2878 hci_dev_unlock(hdev);
2879 }
2880
le_scan_restart(struct hci_request * req,unsigned long opt)2881 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2882 {
2883 struct hci_dev *hdev = req->hdev;
2884
2885 /* If controller is not scanning we are done. */
2886 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2887 return 0;
2888
2889 if (hdev->scanning_paused) {
2890 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2891 return 0;
2892 }
2893
2894 hci_req_add_le_scan_disable(req, false);
2895
2896 if (use_ext_scan(hdev)) {
2897 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2898
2899 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2900 ext_enable_cp.enable = LE_SCAN_ENABLE;
2901 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2902
2903 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2904 sizeof(ext_enable_cp), &ext_enable_cp);
2905 } else {
2906 struct hci_cp_le_set_scan_enable cp;
2907
2908 memset(&cp, 0, sizeof(cp));
2909 cp.enable = LE_SCAN_ENABLE;
2910 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2911 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2912 }
2913
2914 return 0;
2915 }
2916
le_scan_restart_work(struct work_struct * work)2917 static void le_scan_restart_work(struct work_struct *work)
2918 {
2919 struct hci_dev *hdev = container_of(work, struct hci_dev,
2920 le_scan_restart.work);
2921 unsigned long timeout, duration, scan_start, now;
2922 u8 status;
2923
2924 BT_DBG("%s", hdev->name);
2925
2926 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2927 if (status) {
2928 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2929 status);
2930 return;
2931 }
2932
2933 hci_dev_lock(hdev);
2934
2935 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2936 !hdev->discovery.scan_start)
2937 goto unlock;
2938
2939 /* When the scan was started, hdev->le_scan_disable has been queued
2940 * after duration from scan_start. During scan restart this job
2941 * has been canceled, and we need to queue it again after proper
2942 * timeout, to make sure that scan does not run indefinitely.
2943 */
2944 duration = hdev->discovery.scan_duration;
2945 scan_start = hdev->discovery.scan_start;
2946 now = jiffies;
2947 if (now - scan_start <= duration) {
2948 int elapsed;
2949
2950 if (now >= scan_start)
2951 elapsed = now - scan_start;
2952 else
2953 elapsed = ULONG_MAX - scan_start + now;
2954
2955 timeout = duration - elapsed;
2956 } else {
2957 timeout = 0;
2958 }
2959
2960 queue_delayed_work(hdev->req_workqueue,
2961 &hdev->le_scan_disable, timeout);
2962
2963 unlock:
2964 hci_dev_unlock(hdev);
2965 }
2966
active_scan(struct hci_request * req,unsigned long opt)2967 static int active_scan(struct hci_request *req, unsigned long opt)
2968 {
2969 uint16_t interval = opt;
2970 struct hci_dev *hdev = req->hdev;
2971 u8 own_addr_type;
2972 /* White list is not used for discovery */
2973 u8 filter_policy = 0x00;
2974 /* Discovery doesn't require controller address resolution */
2975 bool addr_resolv = false;
2976 int err;
2977
2978 BT_DBG("%s", hdev->name);
2979
2980 /* If controller is scanning, it means the background scanning is
2981 * running. Thus, we should temporarily stop it in order to set the
2982 * discovery scanning parameters.
2983 */
2984 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2985 hci_req_add_le_scan_disable(req, false);
2986
2987 /* All active scans will be done with either a resolvable private
2988 * address (when privacy feature has been enabled) or non-resolvable
2989 * private address.
2990 */
2991 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2992 &own_addr_type);
2993 if (err < 0)
2994 own_addr_type = ADDR_LE_DEV_PUBLIC;
2995
2996 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2997 hdev->le_scan_window_discovery, own_addr_type,
2998 filter_policy, addr_resolv);
2999 return 0;
3000 }
3001
interleaved_discov(struct hci_request * req,unsigned long opt)3002 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3003 {
3004 int err;
3005
3006 BT_DBG("%s", req->hdev->name);
3007
3008 err = active_scan(req, opt);
3009 if (err)
3010 return err;
3011
3012 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3013 }
3014
start_discovery(struct hci_dev * hdev,u8 * status)3015 static void start_discovery(struct hci_dev *hdev, u8 *status)
3016 {
3017 unsigned long timeout;
3018
3019 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3020
3021 switch (hdev->discovery.type) {
3022 case DISCOV_TYPE_BREDR:
3023 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3024 hci_req_sync(hdev, bredr_inquiry,
3025 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3026 status);
3027 return;
3028 case DISCOV_TYPE_INTERLEAVED:
3029 /* When running simultaneous discovery, the LE scanning time
3030 * should occupy the whole discovery time sine BR/EDR inquiry
3031 * and LE scanning are scheduled by the controller.
3032 *
3033 * For interleaving discovery in comparison, BR/EDR inquiry
3034 * and LE scanning are done sequentially with separate
3035 * timeouts.
3036 */
3037 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3038 &hdev->quirks)) {
3039 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3040 /* During simultaneous discovery, we double LE scan
3041 * interval. We must leave some time for the controller
3042 * to do BR/EDR inquiry.
3043 */
3044 hci_req_sync(hdev, interleaved_discov,
3045 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3046 status);
3047 break;
3048 }
3049
3050 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3051 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3052 HCI_CMD_TIMEOUT, status);
3053 break;
3054 case DISCOV_TYPE_LE:
3055 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3056 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3057 HCI_CMD_TIMEOUT, status);
3058 break;
3059 default:
3060 *status = HCI_ERROR_UNSPECIFIED;
3061 return;
3062 }
3063
3064 if (*status)
3065 return;
3066
3067 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3068
3069 /* When service discovery is used and the controller has a
3070 * strict duplicate filter, it is important to remember the
3071 * start and duration of the scan. This is required for
3072 * restarting scanning during the discovery phase.
3073 */
3074 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3075 hdev->discovery.result_filtering) {
3076 hdev->discovery.scan_start = jiffies;
3077 hdev->discovery.scan_duration = timeout;
3078 }
3079
3080 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3081 timeout);
3082 }
3083
hci_req_stop_discovery(struct hci_request * req)3084 bool hci_req_stop_discovery(struct hci_request *req)
3085 {
3086 struct hci_dev *hdev = req->hdev;
3087 struct discovery_state *d = &hdev->discovery;
3088 struct hci_cp_remote_name_req_cancel cp;
3089 struct inquiry_entry *e;
3090 bool ret = false;
3091
3092 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3093
3094 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3095 if (test_bit(HCI_INQUIRY, &hdev->flags))
3096 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3097
3098 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3099 cancel_delayed_work(&hdev->le_scan_disable);
3100 hci_req_add_le_scan_disable(req, false);
3101 }
3102
3103 ret = true;
3104 } else {
3105 /* Passive scanning */
3106 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3107 hci_req_add_le_scan_disable(req, false);
3108 ret = true;
3109 }
3110 }
3111
3112 /* No further actions needed for LE-only discovery */
3113 if (d->type == DISCOV_TYPE_LE)
3114 return ret;
3115
3116 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3117 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3118 NAME_PENDING);
3119 if (!e)
3120 return ret;
3121
3122 bacpy(&cp.bdaddr, &e->data.bdaddr);
3123 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3124 &cp);
3125 ret = true;
3126 }
3127
3128 return ret;
3129 }
3130
stop_discovery(struct hci_request * req,unsigned long opt)3131 static int stop_discovery(struct hci_request *req, unsigned long opt)
3132 {
3133 hci_dev_lock(req->hdev);
3134 hci_req_stop_discovery(req);
3135 hci_dev_unlock(req->hdev);
3136
3137 return 0;
3138 }
3139
discov_update(struct work_struct * work)3140 static void discov_update(struct work_struct *work)
3141 {
3142 struct hci_dev *hdev = container_of(work, struct hci_dev,
3143 discov_update);
3144 u8 status = 0;
3145
3146 switch (hdev->discovery.state) {
3147 case DISCOVERY_STARTING:
3148 start_discovery(hdev, &status);
3149 mgmt_start_discovery_complete(hdev, status);
3150 if (status)
3151 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3152 else
3153 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3154 break;
3155 case DISCOVERY_STOPPING:
3156 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3157 mgmt_stop_discovery_complete(hdev, status);
3158 if (!status)
3159 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3160 break;
3161 case DISCOVERY_STOPPED:
3162 default:
3163 return;
3164 }
3165 }
3166
discov_off(struct work_struct * work)3167 static void discov_off(struct work_struct *work)
3168 {
3169 struct hci_dev *hdev = container_of(work, struct hci_dev,
3170 discov_off.work);
3171
3172 BT_DBG("%s", hdev->name);
3173
3174 hci_dev_lock(hdev);
3175
3176 /* When discoverable timeout triggers, then just make sure
3177 * the limited discoverable flag is cleared. Even in the case
3178 * of a timeout triggered from general discoverable, it is
3179 * safe to unconditionally clear the flag.
3180 */
3181 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3182 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3183 hdev->discov_timeout = 0;
3184
3185 hci_dev_unlock(hdev);
3186
3187 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3188 mgmt_new_settings(hdev);
3189 }
3190
powered_update_hci(struct hci_request * req,unsigned long opt)3191 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3192 {
3193 struct hci_dev *hdev = req->hdev;
3194 u8 link_sec;
3195
3196 hci_dev_lock(hdev);
3197
3198 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3199 !lmp_host_ssp_capable(hdev)) {
3200 u8 mode = 0x01;
3201
3202 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3203
3204 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3205 u8 support = 0x01;
3206
3207 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3208 sizeof(support), &support);
3209 }
3210 }
3211
3212 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3213 lmp_bredr_capable(hdev)) {
3214 struct hci_cp_write_le_host_supported cp;
3215
3216 cp.le = 0x01;
3217 cp.simul = 0x00;
3218
3219 /* Check first if we already have the right
3220 * host state (host features set)
3221 */
3222 if (cp.le != lmp_host_le_capable(hdev) ||
3223 cp.simul != lmp_host_le_br_capable(hdev))
3224 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3225 sizeof(cp), &cp);
3226 }
3227
3228 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3229 /* Make sure the controller has a good default for
3230 * advertising data. This also applies to the case
3231 * where BR/EDR was toggled during the AUTO_OFF phase.
3232 */
3233 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3234 list_empty(&hdev->adv_instances)) {
3235 int err;
3236
3237 if (ext_adv_capable(hdev)) {
3238 err = __hci_req_setup_ext_adv_instance(req,
3239 0x00);
3240 if (!err)
3241 __hci_req_update_scan_rsp_data(req,
3242 0x00);
3243 } else {
3244 err = 0;
3245 __hci_req_update_adv_data(req, 0x00);
3246 __hci_req_update_scan_rsp_data(req, 0x00);
3247 }
3248
3249 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3250 if (!ext_adv_capable(hdev))
3251 __hci_req_enable_advertising(req);
3252 else if (!err)
3253 __hci_req_enable_ext_advertising(req,
3254 0x00);
3255 }
3256 } else if (!list_empty(&hdev->adv_instances)) {
3257 struct adv_info *adv_instance;
3258
3259 adv_instance = list_first_entry(&hdev->adv_instances,
3260 struct adv_info, list);
3261 __hci_req_schedule_adv_instance(req,
3262 adv_instance->instance,
3263 true);
3264 }
3265 }
3266
3267 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3268 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3269 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3270 sizeof(link_sec), &link_sec);
3271
3272 if (lmp_bredr_capable(hdev)) {
3273 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3274 __hci_req_write_fast_connectable(req, true);
3275 else
3276 __hci_req_write_fast_connectable(req, false);
3277 __hci_req_update_scan(req);
3278 __hci_req_update_class(req);
3279 __hci_req_update_name(req);
3280 __hci_req_update_eir(req);
3281 }
3282
3283 hci_dev_unlock(hdev);
3284 return 0;
3285 }
3286
__hci_req_hci_power_on(struct hci_dev * hdev)3287 int __hci_req_hci_power_on(struct hci_dev *hdev)
3288 {
3289 /* Register the available SMP channels (BR/EDR and LE) only when
3290 * successfully powering on the controller. This late
3291 * registration is required so that LE SMP can clearly decide if
3292 * the public address or static address is used.
3293 */
3294 smp_register(hdev);
3295
3296 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3297 NULL);
3298 }
3299
hci_request_setup(struct hci_dev * hdev)3300 void hci_request_setup(struct hci_dev *hdev)
3301 {
3302 INIT_WORK(&hdev->discov_update, discov_update);
3303 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3304 INIT_WORK(&hdev->scan_update, scan_update_work);
3305 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3306 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3307 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3308 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3309 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3310 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3311 }
3312
hci_request_cancel_all(struct hci_dev * hdev)3313 void hci_request_cancel_all(struct hci_dev *hdev)
3314 {
3315 hci_req_sync_cancel(hdev, ENODEV);
3316
3317 cancel_work_sync(&hdev->discov_update);
3318 cancel_work_sync(&hdev->bg_scan_update);
3319 cancel_work_sync(&hdev->scan_update);
3320 cancel_work_sync(&hdev->connectable_update);
3321 cancel_work_sync(&hdev->discoverable_update);
3322 cancel_delayed_work_sync(&hdev->discov_off);
3323 cancel_delayed_work_sync(&hdev->le_scan_disable);
3324 cancel_delayed_work_sync(&hdev->le_scan_restart);
3325
3326 if (hdev->adv_instance_timeout) {
3327 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3328 hdev->adv_instance_timeout = 0;
3329 }
3330 }
3331