1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114 }
115
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125 }
126
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129 {
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186 {
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266 }
267
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 int ret;
273
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
278 * gets removed.
279 */
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 else
283 ret = -ENETDOWN;
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287 }
288
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291 {
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 skb_put_data(skb, param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318 {
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344 }
345
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348 {
349 hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = hdev->def_page_scan_type;
371 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
372 }
373
374 acp.window = cpu_to_le16(hdev->def_page_scan_window);
375
376 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
377 __cpu_to_le16(hdev->page_scan_window) != acp.window)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
379 sizeof(acp), &acp);
380
381 if (hdev->page_scan_type != type)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
383 }
384
start_interleave_scan(struct hci_dev * hdev)385 static void start_interleave_scan(struct hci_dev *hdev)
386 {
387 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
388 queue_delayed_work(hdev->req_workqueue,
389 &hdev->interleave_scan, 0);
390 }
391
is_interleave_scanning(struct hci_dev * hdev)392 static bool is_interleave_scanning(struct hci_dev *hdev)
393 {
394 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
395 }
396
cancel_interleave_scan(struct hci_dev * hdev)397 static void cancel_interleave_scan(struct hci_dev *hdev)
398 {
399 bt_dev_dbg(hdev, "cancelling interleave scan");
400
401 cancel_delayed_work_sync(&hdev->interleave_scan);
402
403 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
404 }
405
406 /* Return true if interleave_scan wasn't started until exiting this function,
407 * otherwise, return false
408 */
__hci_update_interleaved_scan(struct hci_dev * hdev)409 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
410 {
411 /* If there is at least one ADV monitors and one pending LE connection
412 * or one device to be scanned for, we should alternate between
413 * allowlist scan and one without any filters to save power.
414 */
415 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 !(list_empty(&hdev->pend_le_conns) &&
417 list_empty(&hdev->pend_le_reports));
418 bool is_interleaving = is_interleave_scanning(hdev);
419
420 if (use_interleaving && !is_interleaving) {
421 start_interleave_scan(hdev);
422 bt_dev_dbg(hdev, "starting interleave scan");
423 return true;
424 }
425
426 if (!use_interleaving && is_interleaving)
427 cancel_interleave_scan(hdev);
428
429 return false;
430 }
431
432 /* This function controls the background scanning based on hdev->pend_le_conns
433 * list. If there are pending LE connection we start the background scanning,
434 * otherwise we stop it.
435 *
436 * This function requires the caller holds hdev->lock.
437 */
__hci_update_background_scan(struct hci_request * req)438 static void __hci_update_background_scan(struct hci_request *req)
439 {
440 struct hci_dev *hdev = req->hdev;
441
442 if (!test_bit(HCI_UP, &hdev->flags) ||
443 test_bit(HCI_INIT, &hdev->flags) ||
444 hci_dev_test_flag(hdev, HCI_SETUP) ||
445 hci_dev_test_flag(hdev, HCI_CONFIG) ||
446 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
447 hci_dev_test_flag(hdev, HCI_UNREGISTER))
448 return;
449
450 /* No point in doing scanning if LE support hasn't been enabled */
451 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
452 return;
453
454 /* If discovery is active don't interfere with it */
455 if (hdev->discovery.state != DISCOVERY_STOPPED)
456 return;
457
458 /* Reset RSSI and UUID filters when starting background scanning
459 * since these filters are meant for service discovery only.
460 *
461 * The Start Discovery and Start Service Discovery operations
462 * ensure to set proper values for RSSI threshold and UUID
463 * filter list. So it is safe to just reset them here.
464 */
465 hci_discovery_filter_clear(hdev);
466
467 BT_DBG("%s ADV monitoring is %s", hdev->name,
468 hci_is_adv_monitoring(hdev) ? "on" : "off");
469
470 if (list_empty(&hdev->pend_le_conns) &&
471 list_empty(&hdev->pend_le_reports) &&
472 !hci_is_adv_monitoring(hdev)) {
473 /* If there is no pending LE connections or devices
474 * to be scanned for or no ADV monitors, we should stop the
475 * background scanning.
476 */
477
478 /* If controller is not scanning we are done. */
479 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
480 return;
481
482 hci_req_add_le_scan_disable(req, false);
483
484 BT_DBG("%s stopping background scanning", hdev->name);
485 } else {
486 /* If there is at least one pending LE connection, we should
487 * keep the background scan running.
488 */
489
490 /* If controller is connecting, we should not start scanning
491 * since some controllers are not able to scan and connect at
492 * the same time.
493 */
494 if (hci_lookup_le_connect(hdev))
495 return;
496
497 /* If controller is currently scanning, we stop it to ensure we
498 * don't miss any advertising (due to duplicates filter).
499 */
500 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
501 hci_req_add_le_scan_disable(req, false);
502
503 hci_req_add_le_passive_scan(req);
504 bt_dev_dbg(hdev, "starting background scanning");
505 }
506 }
507
__hci_req_update_name(struct hci_request * req)508 void __hci_req_update_name(struct hci_request *req)
509 {
510 struct hci_dev *hdev = req->hdev;
511 struct hci_cp_write_local_name cp;
512
513 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
514
515 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
516 }
517
518 #define PNP_INFO_SVCLASS_ID 0x1200
519
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)520 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
521 {
522 u8 *ptr = data, *uuids_start = NULL;
523 struct bt_uuid *uuid;
524
525 if (len < 4)
526 return ptr;
527
528 list_for_each_entry(uuid, &hdev->uuids, list) {
529 u16 uuid16;
530
531 if (uuid->size != 16)
532 continue;
533
534 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
535 if (uuid16 < 0x1100)
536 continue;
537
538 if (uuid16 == PNP_INFO_SVCLASS_ID)
539 continue;
540
541 if (!uuids_start) {
542 uuids_start = ptr;
543 uuids_start[0] = 1;
544 uuids_start[1] = EIR_UUID16_ALL;
545 ptr += 2;
546 }
547
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + sizeof(u16) > len) {
550 uuids_start[1] = EIR_UUID16_SOME;
551 break;
552 }
553
554 *ptr++ = (uuid16 & 0x00ff);
555 *ptr++ = (uuid16 & 0xff00) >> 8;
556 uuids_start[0] += sizeof(uuid16);
557 }
558
559 return ptr;
560 }
561
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)562 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
563 {
564 u8 *ptr = data, *uuids_start = NULL;
565 struct bt_uuid *uuid;
566
567 if (len < 6)
568 return ptr;
569
570 list_for_each_entry(uuid, &hdev->uuids, list) {
571 if (uuid->size != 32)
572 continue;
573
574 if (!uuids_start) {
575 uuids_start = ptr;
576 uuids_start[0] = 1;
577 uuids_start[1] = EIR_UUID32_ALL;
578 ptr += 2;
579 }
580
581 /* Stop if not enough space to put next UUID */
582 if ((ptr - data) + sizeof(u32) > len) {
583 uuids_start[1] = EIR_UUID32_SOME;
584 break;
585 }
586
587 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
588 ptr += sizeof(u32);
589 uuids_start[0] += sizeof(u32);
590 }
591
592 return ptr;
593 }
594
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)595 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
596 {
597 u8 *ptr = data, *uuids_start = NULL;
598 struct bt_uuid *uuid;
599
600 if (len < 18)
601 return ptr;
602
603 list_for_each_entry(uuid, &hdev->uuids, list) {
604 if (uuid->size != 128)
605 continue;
606
607 if (!uuids_start) {
608 uuids_start = ptr;
609 uuids_start[0] = 1;
610 uuids_start[1] = EIR_UUID128_ALL;
611 ptr += 2;
612 }
613
614 /* Stop if not enough space to put next UUID */
615 if ((ptr - data) + 16 > len) {
616 uuids_start[1] = EIR_UUID128_SOME;
617 break;
618 }
619
620 memcpy(ptr, uuid->uuid, 16);
621 ptr += 16;
622 uuids_start[0] += 16;
623 }
624
625 return ptr;
626 }
627
create_eir(struct hci_dev * hdev,u8 * data)628 static void create_eir(struct hci_dev *hdev, u8 *data)
629 {
630 u8 *ptr = data;
631 size_t name_len;
632
633 name_len = strlen(hdev->dev_name);
634
635 if (name_len > 0) {
636 /* EIR Data type */
637 if (name_len > 48) {
638 name_len = 48;
639 ptr[1] = EIR_NAME_SHORT;
640 } else
641 ptr[1] = EIR_NAME_COMPLETE;
642
643 /* EIR Data length */
644 ptr[0] = name_len + 1;
645
646 memcpy(ptr + 2, hdev->dev_name, name_len);
647
648 ptr += (name_len + 2);
649 }
650
651 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
652 ptr[0] = 2;
653 ptr[1] = EIR_TX_POWER;
654 ptr[2] = (u8) hdev->inq_tx_power;
655
656 ptr += 3;
657 }
658
659 if (hdev->devid_source > 0) {
660 ptr[0] = 9;
661 ptr[1] = EIR_DEVICE_ID;
662
663 put_unaligned_le16(hdev->devid_source, ptr + 2);
664 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
665 put_unaligned_le16(hdev->devid_product, ptr + 6);
666 put_unaligned_le16(hdev->devid_version, ptr + 8);
667
668 ptr += 10;
669 }
670
671 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
672 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
673 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 }
675
__hci_req_update_eir(struct hci_request * req)676 void __hci_req_update_eir(struct hci_request *req)
677 {
678 struct hci_dev *hdev = req->hdev;
679 struct hci_cp_write_eir cp;
680
681 if (!hdev_is_powered(hdev))
682 return;
683
684 if (!lmp_ext_inq_capable(hdev))
685 return;
686
687 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
688 return;
689
690 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
691 return;
692
693 memset(&cp, 0, sizeof(cp));
694
695 create_eir(hdev, cp.data);
696
697 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
698 return;
699
700 memcpy(hdev->eir, cp.data, sizeof(cp.data));
701
702 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
703 }
704
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)705 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
706 {
707 struct hci_dev *hdev = req->hdev;
708
709 if (hdev->scanning_paused) {
710 bt_dev_dbg(hdev, "Scanning is paused for suspend");
711 return;
712 }
713
714 if (use_ext_scan(hdev)) {
715 struct hci_cp_le_set_ext_scan_enable cp;
716
717 memset(&cp, 0, sizeof(cp));
718 cp.enable = LE_SCAN_DISABLE;
719 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
720 &cp);
721 } else {
722 struct hci_cp_le_set_scan_enable cp;
723
724 memset(&cp, 0, sizeof(cp));
725 cp.enable = LE_SCAN_DISABLE;
726 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
727 }
728
729 /* Disable address resolution */
730 if (use_ll_privacy(hdev) &&
731 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
732 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
733 __u8 enable = 0x00;
734
735 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
736 }
737 }
738
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)739 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
740 u8 bdaddr_type)
741 {
742 struct hci_cp_le_del_from_accept_list cp;
743
744 cp.bdaddr_type = bdaddr_type;
745 bacpy(&cp.bdaddr, bdaddr);
746
747 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
748 cp.bdaddr_type);
749 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
750
751 if (use_ll_privacy(req->hdev) &&
752 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
753 struct smp_irk *irk;
754
755 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
756 if (irk) {
757 struct hci_cp_le_del_from_resolv_list cp;
758
759 cp.bdaddr_type = bdaddr_type;
760 bacpy(&cp.bdaddr, bdaddr);
761
762 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
763 sizeof(cp), &cp);
764 }
765 }
766 }
767
768 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)769 static int add_to_accept_list(struct hci_request *req,
770 struct hci_conn_params *params, u8 *num_entries,
771 bool allow_rpa)
772 {
773 struct hci_cp_le_add_to_accept_list cp;
774 struct hci_dev *hdev = req->hdev;
775
776 /* Already in accept list */
777 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
778 params->addr_type))
779 return 0;
780
781 /* Select filter policy to accept all advertising */
782 if (*num_entries >= hdev->le_accept_list_size)
783 return -1;
784
785 /* Accept list can not be used with RPAs */
786 if (!allow_rpa &&
787 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
788 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
789 return -1;
790 }
791
792 /* During suspend, only wakeable devices can be in accept list */
793 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
794 params->current_flags))
795 return 0;
796
797 *num_entries += 1;
798 cp.bdaddr_type = params->addr_type;
799 bacpy(&cp.bdaddr, ¶ms->addr);
800
801 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
802 cp.bdaddr_type);
803 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
804
805 if (use_ll_privacy(hdev) &&
806 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
807 struct smp_irk *irk;
808
809 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
810 params->addr_type);
811 if (irk) {
812 struct hci_cp_le_add_to_resolv_list cp;
813
814 cp.bdaddr_type = params->addr_type;
815 bacpy(&cp.bdaddr, ¶ms->addr);
816 memcpy(cp.peer_irk, irk->val, 16);
817
818 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
819 memcpy(cp.local_irk, hdev->irk, 16);
820 else
821 memset(cp.local_irk, 0, 16);
822
823 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
824 sizeof(cp), &cp);
825 }
826 }
827
828 return 0;
829 }
830
update_accept_list(struct hci_request * req)831 static u8 update_accept_list(struct hci_request *req)
832 {
833 struct hci_dev *hdev = req->hdev;
834 struct hci_conn_params *params;
835 struct bdaddr_list *b;
836 u8 num_entries = 0;
837 bool pend_conn, pend_report;
838 /* We allow usage of accept list even with RPAs in suspend. In the worst
839 * case, we won't be able to wake from devices that use the privacy1.2
840 * features. Additionally, once we support privacy1.2 and IRK
841 * offloading, we can update this to also check for those conditions.
842 */
843 bool allow_rpa = hdev->suspended;
844
845 if (use_ll_privacy(hdev) &&
846 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
847 allow_rpa = true;
848
849 /* Go through the current accept list programmed into the
850 * controller one by one and check if that address is still
851 * in the list of pending connections or list of devices to
852 * report. If not present in either list, then queue the
853 * command to remove it from the controller.
854 */
855 list_for_each_entry(b, &hdev->le_accept_list, list) {
856 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
857 &b->bdaddr,
858 b->bdaddr_type);
859 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
860 &b->bdaddr,
861 b->bdaddr_type);
862
863 /* If the device is not likely to connect or report,
864 * remove it from the accept list.
865 */
866 if (!pend_conn && !pend_report) {
867 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
868 continue;
869 }
870
871 /* Accept list can not be used with RPAs */
872 if (!allow_rpa &&
873 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
874 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
875 return 0x00;
876 }
877
878 num_entries++;
879 }
880
881 /* Since all no longer valid accept list entries have been
882 * removed, walk through the list of pending connections
883 * and ensure that any new device gets programmed into
884 * the controller.
885 *
886 * If the list of the devices is larger than the list of
887 * available accept list entries in the controller, then
888 * just abort and return filer policy value to not use the
889 * accept list.
890 */
891 list_for_each_entry(params, &hdev->pend_le_conns, action) {
892 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
893 return 0x00;
894 }
895
896 /* After adding all new pending connections, walk through
897 * the list of pending reports and also add these to the
898 * accept list if there is still space. Abort if space runs out.
899 */
900 list_for_each_entry(params, &hdev->pend_le_reports, action) {
901 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
902 return 0x00;
903 }
904
905 /* Use the allowlist unless the following conditions are all true:
906 * - We are not currently suspending
907 * - There are 1 or more ADV monitors registered
908 * - Interleaved scanning is not currently using the allowlist
909 *
910 * Once the controller offloading of advertisement monitor is in place,
911 * the above condition should include the support of MSFT extension
912 * support.
913 */
914 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
915 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
916 return 0x00;
917
918 /* Select filter policy to use accept list */
919 return 0x01;
920 }
921
scan_use_rpa(struct hci_dev * hdev)922 static bool scan_use_rpa(struct hci_dev *hdev)
923 {
924 return hci_dev_test_flag(hdev, HCI_PRIVACY);
925 }
926
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool addr_resolv)927 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
928 u16 window, u8 own_addr_type, u8 filter_policy,
929 bool addr_resolv)
930 {
931 struct hci_dev *hdev = req->hdev;
932
933 if (hdev->scanning_paused) {
934 bt_dev_dbg(hdev, "Scanning is paused for suspend");
935 return;
936 }
937
938 if (use_ll_privacy(hdev) &&
939 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
940 addr_resolv) {
941 u8 enable = 0x01;
942
943 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
944 }
945
946 /* Use ext scanning if set ext scan param and ext scan enable is
947 * supported
948 */
949 if (use_ext_scan(hdev)) {
950 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
951 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
952 struct hci_cp_le_scan_phy_params *phy_params;
953 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
954 u32 plen;
955
956 ext_param_cp = (void *)data;
957 phy_params = (void *)ext_param_cp->data;
958
959 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
960 ext_param_cp->own_addr_type = own_addr_type;
961 ext_param_cp->filter_policy = filter_policy;
962
963 plen = sizeof(*ext_param_cp);
964
965 if (scan_1m(hdev) || scan_2m(hdev)) {
966 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
967
968 memset(phy_params, 0, sizeof(*phy_params));
969 phy_params->type = type;
970 phy_params->interval = cpu_to_le16(interval);
971 phy_params->window = cpu_to_le16(window);
972
973 plen += sizeof(*phy_params);
974 phy_params++;
975 }
976
977 if (scan_coded(hdev)) {
978 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
979
980 memset(phy_params, 0, sizeof(*phy_params));
981 phy_params->type = type;
982 phy_params->interval = cpu_to_le16(interval);
983 phy_params->window = cpu_to_le16(window);
984
985 plen += sizeof(*phy_params);
986 phy_params++;
987 }
988
989 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
990 plen, ext_param_cp);
991
992 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
993 ext_enable_cp.enable = LE_SCAN_ENABLE;
994 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
995
996 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
997 sizeof(ext_enable_cp), &ext_enable_cp);
998 } else {
999 struct hci_cp_le_set_scan_param param_cp;
1000 struct hci_cp_le_set_scan_enable enable_cp;
1001
1002 memset(¶m_cp, 0, sizeof(param_cp));
1003 param_cp.type = type;
1004 param_cp.interval = cpu_to_le16(interval);
1005 param_cp.window = cpu_to_le16(window);
1006 param_cp.own_address_type = own_addr_type;
1007 param_cp.filter_policy = filter_policy;
1008 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1009 ¶m_cp);
1010
1011 memset(&enable_cp, 0, sizeof(enable_cp));
1012 enable_cp.enable = LE_SCAN_ENABLE;
1013 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1014 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1015 &enable_cp);
1016 }
1017 }
1018
1019 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)1020 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1021 {
1022 struct hci_conn_hash *h = &hdev->conn_hash;
1023 struct hci_conn *c;
1024
1025 rcu_read_lock();
1026
1027 list_for_each_entry_rcu(c, &h->list, list) {
1028 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1029 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1030 rcu_read_unlock();
1031 return true;
1032 }
1033 }
1034
1035 rcu_read_unlock();
1036
1037 return false;
1038 }
1039
1040 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1041 * controller based address resolution to be able to reconfigure
1042 * resolving list.
1043 */
hci_req_add_le_passive_scan(struct hci_request * req)1044 void hci_req_add_le_passive_scan(struct hci_request *req)
1045 {
1046 struct hci_dev *hdev = req->hdev;
1047 u8 own_addr_type;
1048 u8 filter_policy;
1049 u16 window, interval;
1050 /* Background scanning should run with address resolution */
1051 bool addr_resolv = true;
1052
1053 if (hdev->scanning_paused) {
1054 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1055 return;
1056 }
1057
1058 /* Set require_privacy to false since no SCAN_REQ are send
1059 * during passive scanning. Not using an non-resolvable address
1060 * here is important so that peer devices using direct
1061 * advertising with our address will be correctly reported
1062 * by the controller.
1063 */
1064 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1065 &own_addr_type))
1066 return;
1067
1068 if (__hci_update_interleaved_scan(hdev))
1069 return;
1070
1071 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1072 /* Adding or removing entries from the accept list must
1073 * happen before enabling scanning. The controller does
1074 * not allow accept list modification while scanning.
1075 */
1076 filter_policy = update_accept_list(req);
1077
1078 /* When the controller is using random resolvable addresses and
1079 * with that having LE privacy enabled, then controllers with
1080 * Extended Scanner Filter Policies support can now enable support
1081 * for handling directed advertising.
1082 *
1083 * So instead of using filter polices 0x00 (no accept list)
1084 * and 0x01 (accept list enabled) use the new filter policies
1085 * 0x02 (no accept list) and 0x03 (accept list enabled).
1086 */
1087 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1088 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1089 filter_policy |= 0x02;
1090
1091 if (hdev->suspended) {
1092 window = hdev->le_scan_window_suspend;
1093 interval = hdev->le_scan_int_suspend;
1094 } else if (hci_is_le_conn_scanning(hdev)) {
1095 window = hdev->le_scan_window_connect;
1096 interval = hdev->le_scan_int_connect;
1097 } else if (hci_is_adv_monitoring(hdev)) {
1098 window = hdev->le_scan_window_adv_monitor;
1099 interval = hdev->le_scan_int_adv_monitor;
1100 } else {
1101 window = hdev->le_scan_window;
1102 interval = hdev->le_scan_interval;
1103 }
1104
1105 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1106 filter_policy);
1107 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1108 own_addr_type, filter_policy, addr_resolv);
1109 }
1110
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)1111 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1112 {
1113 struct adv_info *adv_instance;
1114
1115 /* Instance 0x00 always set local name */
1116 if (instance == 0x00)
1117 return 1;
1118
1119 adv_instance = hci_find_adv_instance(hdev, instance);
1120 if (!adv_instance)
1121 return 0;
1122
1123 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1125 return 1;
1126
1127 return adv_instance->scan_rsp_len;
1128 }
1129
hci_req_clear_event_filter(struct hci_request * req)1130 static void hci_req_clear_event_filter(struct hci_request *req)
1131 {
1132 struct hci_cp_set_event_filter f;
1133
1134 memset(&f, 0, sizeof(f));
1135 f.flt_type = HCI_FLT_CLEAR_ALL;
1136 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1137
1138 /* Update page scan state (since we may have modified it when setting
1139 * the event filter).
1140 */
1141 __hci_req_update_scan(req);
1142 }
1143
hci_req_set_event_filter(struct hci_request * req)1144 static void hci_req_set_event_filter(struct hci_request *req)
1145 {
1146 struct bdaddr_list_with_flags *b;
1147 struct hci_cp_set_event_filter f;
1148 struct hci_dev *hdev = req->hdev;
1149 u8 scan = SCAN_DISABLED;
1150
1151 /* Always clear event filter when starting */
1152 hci_req_clear_event_filter(req);
1153
1154 list_for_each_entry(b, &hdev->accept_list, list) {
1155 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1156 b->current_flags))
1157 continue;
1158
1159 memset(&f, 0, sizeof(f));
1160 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1161 f.flt_type = HCI_FLT_CONN_SETUP;
1162 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1163 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1164
1165 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1166 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1167 scan = SCAN_PAGE;
1168 }
1169
1170 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1171 }
1172
hci_req_config_le_suspend_scan(struct hci_request * req)1173 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1174 {
1175 /* Before changing params disable scan if enabled */
1176 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1177 hci_req_add_le_scan_disable(req, false);
1178
1179 /* Configure params and enable scanning */
1180 hci_req_add_le_passive_scan(req);
1181
1182 /* Block suspend notifier on response */
1183 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1184 }
1185
cancel_adv_timeout(struct hci_dev * hdev)1186 static void cancel_adv_timeout(struct hci_dev *hdev)
1187 {
1188 if (hdev->adv_instance_timeout) {
1189 hdev->adv_instance_timeout = 0;
1190 cancel_delayed_work(&hdev->adv_instance_expire);
1191 }
1192 }
1193
1194 /* This function requires the caller holds hdev->lock */
hci_suspend_adv_instances(struct hci_request * req)1195 static void hci_suspend_adv_instances(struct hci_request *req)
1196 {
1197 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1198
1199 /* Call to disable any advertisements active on the controller.
1200 * This will succeed even if no advertisements are configured.
1201 */
1202 __hci_req_disable_advertising(req);
1203
1204 /* If we are using software rotation, pause the loop */
1205 if (!ext_adv_capable(req->hdev))
1206 cancel_adv_timeout(req->hdev);
1207 }
1208
1209 /* This function requires the caller holds hdev->lock */
hci_resume_adv_instances(struct hci_request * req)1210 static void hci_resume_adv_instances(struct hci_request *req)
1211 {
1212 struct adv_info *adv;
1213
1214 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1215
1216 if (ext_adv_capable(req->hdev)) {
1217 /* Call for each tracked instance to be re-enabled */
1218 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1219 __hci_req_enable_ext_advertising(req,
1220 adv->instance);
1221 }
1222
1223 } else {
1224 /* Schedule for most recent instance to be restarted and begin
1225 * the software rotation loop
1226 */
1227 __hci_req_schedule_adv_instance(req,
1228 req->hdev->cur_adv_instance,
1229 true);
1230 }
1231 }
1232
suspend_req_complete(struct hci_dev * hdev,u8 status,u16 opcode)1233 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1234 {
1235 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1236 status);
1237 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1238 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1239 wake_up(&hdev->suspend_wait_q);
1240 }
1241 }
1242
1243 /* Call with hci_dev_lock */
hci_req_prepare_suspend(struct hci_dev * hdev,enum suspended_state next)1244 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1245 {
1246 int old_state;
1247 struct hci_conn *conn;
1248 struct hci_request req;
1249 u8 page_scan;
1250 int disconnect_counter;
1251
1252 if (next == hdev->suspend_state) {
1253 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1254 goto done;
1255 }
1256
1257 hdev->suspend_state = next;
1258 hci_req_init(&req, hdev);
1259
1260 if (next == BT_SUSPEND_DISCONNECT) {
1261 /* Mark device as suspended */
1262 hdev->suspended = true;
1263
1264 /* Pause discovery if not already stopped */
1265 old_state = hdev->discovery.state;
1266 if (old_state != DISCOVERY_STOPPED) {
1267 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1268 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1269 queue_work(hdev->req_workqueue, &hdev->discov_update);
1270 }
1271
1272 hdev->discovery_paused = true;
1273 hdev->discovery_old_state = old_state;
1274
1275 /* Stop directed advertising */
1276 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1277 if (old_state) {
1278 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1279 cancel_delayed_work(&hdev->discov_off);
1280 queue_delayed_work(hdev->req_workqueue,
1281 &hdev->discov_off, 0);
1282 }
1283
1284 /* Pause other advertisements */
1285 if (hdev->adv_instance_cnt)
1286 hci_suspend_adv_instances(&req);
1287
1288 hdev->advertising_paused = true;
1289 hdev->advertising_old_state = old_state;
1290 /* Disable page scan */
1291 page_scan = SCAN_DISABLED;
1292 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1293
1294 /* Disable LE passive scan if enabled */
1295 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1296 hci_req_add_le_scan_disable(&req, false);
1297
1298 /* Mark task needing completion */
1299 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1300
1301 /* Prevent disconnects from causing scanning to be re-enabled */
1302 hdev->scanning_paused = true;
1303
1304 /* Run commands before disconnecting */
1305 hci_req_run(&req, suspend_req_complete);
1306
1307 disconnect_counter = 0;
1308 /* Soft disconnect everything (power off) */
1309 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1310 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1311 disconnect_counter++;
1312 }
1313
1314 if (disconnect_counter > 0) {
1315 bt_dev_dbg(hdev,
1316 "Had %d disconnects. Will wait on them",
1317 disconnect_counter);
1318 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1319 }
1320 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1321 /* Unpause to take care of updating scanning params */
1322 hdev->scanning_paused = false;
1323 /* Enable event filter for paired devices */
1324 hci_req_set_event_filter(&req);
1325 /* Enable passive scan at lower duty cycle */
1326 hci_req_config_le_suspend_scan(&req);
1327 /* Pause scan changes again. */
1328 hdev->scanning_paused = true;
1329 hci_req_run(&req, suspend_req_complete);
1330 } else {
1331 hdev->suspended = false;
1332 hdev->scanning_paused = false;
1333
1334 hci_req_clear_event_filter(&req);
1335 /* Reset passive/background scanning to normal */
1336 hci_req_config_le_suspend_scan(&req);
1337
1338 /* Unpause directed advertising */
1339 hdev->advertising_paused = false;
1340 if (hdev->advertising_old_state) {
1341 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1342 hdev->suspend_tasks);
1343 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1344 queue_work(hdev->req_workqueue,
1345 &hdev->discoverable_update);
1346 hdev->advertising_old_state = 0;
1347 }
1348
1349 /* Resume other advertisements */
1350 if (hdev->adv_instance_cnt)
1351 hci_resume_adv_instances(&req);
1352
1353 /* Unpause discovery */
1354 hdev->discovery_paused = false;
1355 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1356 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1357 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1358 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1359 queue_work(hdev->req_workqueue, &hdev->discov_update);
1360 }
1361
1362 hci_req_run(&req, suspend_req_complete);
1363 }
1364
1365 hdev->suspend_state = next;
1366
1367 done:
1368 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1369 wake_up(&hdev->suspend_wait_q);
1370 }
1371
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1372 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1373 {
1374 u8 instance = hdev->cur_adv_instance;
1375 struct adv_info *adv_instance;
1376
1377 /* Instance 0x00 always set local name */
1378 if (instance == 0x00)
1379 return 1;
1380
1381 adv_instance = hci_find_adv_instance(hdev, instance);
1382 if (!adv_instance)
1383 return 0;
1384
1385 /* TODO: Take into account the "appearance" and "local-name" flags here.
1386 * These are currently being ignored as they are not supported.
1387 */
1388 return adv_instance->scan_rsp_len;
1389 }
1390
__hci_req_disable_advertising(struct hci_request * req)1391 void __hci_req_disable_advertising(struct hci_request *req)
1392 {
1393 if (ext_adv_capable(req->hdev)) {
1394 __hci_req_disable_ext_adv_instance(req, 0x00);
1395
1396 } else {
1397 u8 enable = 0x00;
1398
1399 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1400 }
1401 }
1402
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)1403 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1404 {
1405 u32 flags;
1406 struct adv_info *adv_instance;
1407
1408 if (instance == 0x00) {
1409 /* Instance 0 always manages the "Tx Power" and "Flags"
1410 * fields
1411 */
1412 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1413
1414 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1415 * corresponds to the "connectable" instance flag.
1416 */
1417 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1418 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1419
1420 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1421 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1422 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1423 flags |= MGMT_ADV_FLAG_DISCOV;
1424
1425 return flags;
1426 }
1427
1428 adv_instance = hci_find_adv_instance(hdev, instance);
1429
1430 /* Return 0 when we got an invalid instance identifier. */
1431 if (!adv_instance)
1432 return 0;
1433
1434 return adv_instance->flags;
1435 }
1436
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1437 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1438 {
1439 /* If privacy is not enabled don't use RPA */
1440 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1441 return false;
1442
1443 /* If basic privacy mode is enabled use RPA */
1444 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1445 return true;
1446
1447 /* If limited privacy mode is enabled don't use RPA if we're
1448 * both discoverable and bondable.
1449 */
1450 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1451 hci_dev_test_flag(hdev, HCI_BONDABLE))
1452 return false;
1453
1454 /* We're neither bondable nor discoverable in the limited
1455 * privacy mode, therefore use RPA.
1456 */
1457 return true;
1458 }
1459
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1460 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1461 {
1462 /* If there is no connection we are OK to advertise. */
1463 if (hci_conn_num(hdev, LE_LINK) == 0)
1464 return true;
1465
1466 /* Check le_states if there is any connection in slave role. */
1467 if (hdev->conn_hash.le_num_slave > 0) {
1468 /* Slave connection state and non connectable mode bit 20. */
1469 if (!connectable && !(hdev->le_states[2] & 0x10))
1470 return false;
1471
1472 /* Slave connection state and connectable mode bit 38
1473 * and scannable bit 21.
1474 */
1475 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1476 !(hdev->le_states[2] & 0x20)))
1477 return false;
1478 }
1479
1480 /* Check le_states if there is any connection in master role. */
1481 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1482 /* Master connection state and non connectable mode bit 18. */
1483 if (!connectable && !(hdev->le_states[2] & 0x02))
1484 return false;
1485
1486 /* Master connection state and connectable mode bit 35 and
1487 * scannable 19.
1488 */
1489 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1490 !(hdev->le_states[2] & 0x08)))
1491 return false;
1492 }
1493
1494 return true;
1495 }
1496
__hci_req_enable_advertising(struct hci_request * req)1497 void __hci_req_enable_advertising(struct hci_request *req)
1498 {
1499 struct hci_dev *hdev = req->hdev;
1500 struct hci_cp_le_set_adv_param cp;
1501 u8 own_addr_type, enable = 0x01;
1502 bool connectable;
1503 u16 adv_min_interval, adv_max_interval;
1504 u32 flags;
1505
1506 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1507
1508 /* If the "connectable" instance flag was not set, then choose between
1509 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1510 */
1511 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1512 mgmt_get_connectable(hdev);
1513
1514 if (!is_advertising_allowed(hdev, connectable))
1515 return;
1516
1517 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1518 __hci_req_disable_advertising(req);
1519
1520 /* Clear the HCI_LE_ADV bit temporarily so that the
1521 * hci_update_random_address knows that it's safe to go ahead
1522 * and write a new random address. The flag will be set back on
1523 * as soon as the SET_ADV_ENABLE HCI command completes.
1524 */
1525 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1526
1527 /* Set require_privacy to true only when non-connectable
1528 * advertising is used. In that case it is fine to use a
1529 * non-resolvable private address.
1530 */
1531 if (hci_update_random_address(req, !connectable,
1532 adv_use_rpa(hdev, flags),
1533 &own_addr_type) < 0)
1534 return;
1535
1536 memset(&cp, 0, sizeof(cp));
1537
1538 if (connectable) {
1539 cp.type = LE_ADV_IND;
1540
1541 adv_min_interval = hdev->le_adv_min_interval;
1542 adv_max_interval = hdev->le_adv_max_interval;
1543 } else {
1544 if (get_cur_adv_instance_scan_rsp_len(hdev))
1545 cp.type = LE_ADV_SCAN_IND;
1546 else
1547 cp.type = LE_ADV_NONCONN_IND;
1548
1549 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1550 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1551 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1552 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1553 } else {
1554 adv_min_interval = hdev->le_adv_min_interval;
1555 adv_max_interval = hdev->le_adv_max_interval;
1556 }
1557 }
1558
1559 cp.min_interval = cpu_to_le16(adv_min_interval);
1560 cp.max_interval = cpu_to_le16(adv_max_interval);
1561 cp.own_address_type = own_addr_type;
1562 cp.channel_map = hdev->le_adv_channel_map;
1563
1564 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1565
1566 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1567 }
1568
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1569 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1570 {
1571 size_t short_len;
1572 size_t complete_len;
1573
1574 /* no space left for name (+ NULL + type + len) */
1575 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1576 return ad_len;
1577
1578 /* use complete name if present and fits */
1579 complete_len = strlen(hdev->dev_name);
1580 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1581 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1582 hdev->dev_name, complete_len + 1);
1583
1584 /* use short name if present */
1585 short_len = strlen(hdev->short_name);
1586 if (short_len)
1587 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1588 hdev->short_name, short_len + 1);
1589
1590 /* use shortened full name if present, we already know that name
1591 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1592 */
1593 if (complete_len) {
1594 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1595
1596 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1597 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1598
1599 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1600 sizeof(name));
1601 }
1602
1603 return ad_len;
1604 }
1605
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1606 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1607 {
1608 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1609 }
1610
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1611 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1612 {
1613 u8 scan_rsp_len = 0;
1614
1615 if (hdev->appearance) {
1616 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1617 }
1618
1619 return append_local_name(hdev, ptr, scan_rsp_len);
1620 }
1621
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1622 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1623 u8 *ptr)
1624 {
1625 struct adv_info *adv_instance;
1626 u32 instance_flags;
1627 u8 scan_rsp_len = 0;
1628
1629 adv_instance = hci_find_adv_instance(hdev, instance);
1630 if (!adv_instance)
1631 return 0;
1632
1633 instance_flags = adv_instance->flags;
1634
1635 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1636 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1637 }
1638
1639 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1640 adv_instance->scan_rsp_len);
1641
1642 scan_rsp_len += adv_instance->scan_rsp_len;
1643
1644 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1645 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1646
1647 return scan_rsp_len;
1648 }
1649
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1650 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1651 {
1652 struct hci_dev *hdev = req->hdev;
1653 u8 len;
1654
1655 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1656 return;
1657
1658 if (ext_adv_capable(hdev)) {
1659 struct {
1660 struct hci_cp_le_set_ext_scan_rsp_data cp;
1661 u8 data[HCI_MAX_EXT_AD_LENGTH];
1662 } pdu;
1663
1664 memset(&pdu, 0, sizeof(pdu));
1665
1666 if (instance)
1667 len = create_instance_scan_rsp_data(hdev, instance,
1668 pdu.data);
1669 else
1670 len = create_default_scan_rsp_data(hdev, pdu.data);
1671
1672 if (hdev->scan_rsp_data_len == len &&
1673 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1674 return;
1675
1676 memcpy(hdev->scan_rsp_data, pdu.data, len);
1677 hdev->scan_rsp_data_len = len;
1678
1679 pdu.cp.handle = instance;
1680 pdu.cp.length = len;
1681 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1682 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1683
1684 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1685 sizeof(pdu.cp) + len, &pdu.cp);
1686 } else {
1687 struct hci_cp_le_set_scan_rsp_data cp;
1688
1689 memset(&cp, 0, sizeof(cp));
1690
1691 if (instance)
1692 len = create_instance_scan_rsp_data(hdev, instance,
1693 cp.data);
1694 else
1695 len = create_default_scan_rsp_data(hdev, cp.data);
1696
1697 if (hdev->scan_rsp_data_len == len &&
1698 !memcmp(cp.data, hdev->scan_rsp_data, len))
1699 return;
1700
1701 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1702 hdev->scan_rsp_data_len = len;
1703
1704 cp.length = len;
1705
1706 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1707 }
1708 }
1709
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1710 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1711 {
1712 struct adv_info *adv_instance = NULL;
1713 u8 ad_len = 0, flags = 0;
1714 u32 instance_flags;
1715
1716 /* Return 0 when the current instance identifier is invalid. */
1717 if (instance) {
1718 adv_instance = hci_find_adv_instance(hdev, instance);
1719 if (!adv_instance)
1720 return 0;
1721 }
1722
1723 instance_flags = get_adv_instance_flags(hdev, instance);
1724
1725 /* If instance already has the flags set skip adding it once
1726 * again.
1727 */
1728 if (adv_instance && eir_get_data(adv_instance->adv_data,
1729 adv_instance->adv_data_len, EIR_FLAGS,
1730 NULL))
1731 goto skip_flags;
1732
1733 /* The Add Advertising command allows userspace to set both the general
1734 * and limited discoverable flags.
1735 */
1736 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1737 flags |= LE_AD_GENERAL;
1738
1739 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1740 flags |= LE_AD_LIMITED;
1741
1742 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 flags |= LE_AD_NO_BREDR;
1744
1745 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1746 /* If a discovery flag wasn't provided, simply use the global
1747 * settings.
1748 */
1749 if (!flags)
1750 flags |= mgmt_get_adv_discov_flags(hdev);
1751
1752 /* If flags would still be empty, then there is no need to
1753 * include the "Flags" AD field".
1754 */
1755 if (flags) {
1756 ptr[0] = 0x02;
1757 ptr[1] = EIR_FLAGS;
1758 ptr[2] = flags;
1759
1760 ad_len += 3;
1761 ptr += 3;
1762 }
1763 }
1764
1765 skip_flags:
1766 if (adv_instance) {
1767 memcpy(ptr, adv_instance->adv_data,
1768 adv_instance->adv_data_len);
1769 ad_len += adv_instance->adv_data_len;
1770 ptr += adv_instance->adv_data_len;
1771 }
1772
1773 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1774 s8 adv_tx_power;
1775
1776 if (ext_adv_capable(hdev)) {
1777 if (adv_instance)
1778 adv_tx_power = adv_instance->tx_power;
1779 else
1780 adv_tx_power = hdev->adv_tx_power;
1781 } else {
1782 adv_tx_power = hdev->adv_tx_power;
1783 }
1784
1785 /* Provide Tx Power only if we can provide a valid value for it */
1786 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1787 ptr[0] = 0x02;
1788 ptr[1] = EIR_TX_POWER;
1789 ptr[2] = (u8)adv_tx_power;
1790
1791 ad_len += 3;
1792 ptr += 3;
1793 }
1794 }
1795
1796 return ad_len;
1797 }
1798
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1799 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1800 {
1801 struct hci_dev *hdev = req->hdev;
1802 u8 len;
1803
1804 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1805 return;
1806
1807 if (ext_adv_capable(hdev)) {
1808 struct {
1809 struct hci_cp_le_set_ext_adv_data cp;
1810 u8 data[HCI_MAX_EXT_AD_LENGTH];
1811 } pdu;
1812
1813 memset(&pdu, 0, sizeof(pdu));
1814
1815 len = create_instance_adv_data(hdev, instance, pdu.data);
1816
1817 /* There's nothing to do if the data hasn't changed */
1818 if (hdev->adv_data_len == len &&
1819 memcmp(pdu.data, hdev->adv_data, len) == 0)
1820 return;
1821
1822 memcpy(hdev->adv_data, pdu.data, len);
1823 hdev->adv_data_len = len;
1824
1825 pdu.cp.length = len;
1826 pdu.cp.handle = instance;
1827 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1828 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1829
1830 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1831 sizeof(pdu.cp) + len, &pdu.cp);
1832 } else {
1833 struct hci_cp_le_set_adv_data cp;
1834
1835 memset(&cp, 0, sizeof(cp));
1836
1837 len = create_instance_adv_data(hdev, instance, cp.data);
1838
1839 /* There's nothing to do if the data hasn't changed */
1840 if (hdev->adv_data_len == len &&
1841 memcmp(cp.data, hdev->adv_data, len) == 0)
1842 return;
1843
1844 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1845 hdev->adv_data_len = len;
1846
1847 cp.length = len;
1848
1849 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1850 }
1851 }
1852
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1853 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1854 {
1855 struct hci_request req;
1856
1857 hci_req_init(&req, hdev);
1858 __hci_req_update_adv_data(&req, instance);
1859
1860 return hci_req_run(&req, NULL);
1861 }
1862
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1863 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1864 u16 opcode)
1865 {
1866 BT_DBG("%s status %u", hdev->name, status);
1867 }
1868
hci_req_disable_address_resolution(struct hci_dev * hdev)1869 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1870 {
1871 struct hci_request req;
1872 __u8 enable = 0x00;
1873
1874 if (!use_ll_privacy(hdev) &&
1875 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1876 return;
1877
1878 hci_req_init(&req, hdev);
1879
1880 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1881
1882 hci_req_run(&req, enable_addr_resolution_complete);
1883 }
1884
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1885 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1886 {
1887 BT_DBG("%s status %u", hdev->name, status);
1888 }
1889
hci_req_reenable_advertising(struct hci_dev * hdev)1890 void hci_req_reenable_advertising(struct hci_dev *hdev)
1891 {
1892 struct hci_request req;
1893
1894 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1895 list_empty(&hdev->adv_instances))
1896 return;
1897
1898 hci_req_init(&req, hdev);
1899
1900 if (hdev->cur_adv_instance) {
1901 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1902 true);
1903 } else {
1904 if (ext_adv_capable(hdev)) {
1905 __hci_req_start_ext_adv(&req, 0x00);
1906 } else {
1907 __hci_req_update_adv_data(&req, 0x00);
1908 __hci_req_update_scan_rsp_data(&req, 0x00);
1909 __hci_req_enable_advertising(&req);
1910 }
1911 }
1912
1913 hci_req_run(&req, adv_enable_complete);
1914 }
1915
adv_timeout_expire(struct work_struct * work)1916 static void adv_timeout_expire(struct work_struct *work)
1917 {
1918 struct hci_dev *hdev = container_of(work, struct hci_dev,
1919 adv_instance_expire.work);
1920
1921 struct hci_request req;
1922 u8 instance;
1923
1924 BT_DBG("%s", hdev->name);
1925
1926 hci_dev_lock(hdev);
1927
1928 hdev->adv_instance_timeout = 0;
1929
1930 instance = hdev->cur_adv_instance;
1931 if (instance == 0x00)
1932 goto unlock;
1933
1934 hci_req_init(&req, hdev);
1935
1936 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1937
1938 if (list_empty(&hdev->adv_instances))
1939 __hci_req_disable_advertising(&req);
1940
1941 hci_req_run(&req, NULL);
1942
1943 unlock:
1944 hci_dev_unlock(hdev);
1945 }
1946
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)1947 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1948 unsigned long opt)
1949 {
1950 struct hci_dev *hdev = req->hdev;
1951 int ret = 0;
1952
1953 hci_dev_lock(hdev);
1954
1955 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1956 hci_req_add_le_scan_disable(req, false);
1957 hci_req_add_le_passive_scan(req);
1958
1959 switch (hdev->interleave_scan_state) {
1960 case INTERLEAVE_SCAN_ALLOWLIST:
1961 bt_dev_dbg(hdev, "next state: allowlist");
1962 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1963 break;
1964 case INTERLEAVE_SCAN_NO_FILTER:
1965 bt_dev_dbg(hdev, "next state: no filter");
1966 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1967 break;
1968 case INTERLEAVE_SCAN_NONE:
1969 BT_ERR("unexpected error");
1970 ret = -1;
1971 }
1972
1973 hci_dev_unlock(hdev);
1974
1975 return ret;
1976 }
1977
interleave_scan_work(struct work_struct * work)1978 static void interleave_scan_work(struct work_struct *work)
1979 {
1980 struct hci_dev *hdev = container_of(work, struct hci_dev,
1981 interleave_scan.work);
1982 u8 status;
1983 unsigned long timeout;
1984
1985 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1986 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1987 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1988 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1989 } else {
1990 bt_dev_err(hdev, "unexpected error");
1991 return;
1992 }
1993
1994 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1995 HCI_CMD_TIMEOUT, &status);
1996
1997 /* Don't continue interleaving if it was canceled */
1998 if (is_interleave_scanning(hdev))
1999 queue_delayed_work(hdev->req_workqueue,
2000 &hdev->interleave_scan, timeout);
2001 }
2002
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)2003 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2004 bool use_rpa, struct adv_info *adv_instance,
2005 u8 *own_addr_type, bdaddr_t *rand_addr)
2006 {
2007 int err;
2008
2009 bacpy(rand_addr, BDADDR_ANY);
2010
2011 /* If privacy is enabled use a resolvable private address. If
2012 * current RPA has expired then generate a new one.
2013 */
2014 if (use_rpa) {
2015 int to;
2016
2017 /* If Controller supports LL Privacy use own address type is
2018 * 0x03
2019 */
2020 if (use_ll_privacy(hdev))
2021 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2022 else
2023 *own_addr_type = ADDR_LE_DEV_RANDOM;
2024
2025 if (adv_instance) {
2026 if (!adv_instance->rpa_expired &&
2027 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2028 return 0;
2029
2030 adv_instance->rpa_expired = false;
2031 } else {
2032 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2033 !bacmp(&hdev->random_addr, &hdev->rpa))
2034 return 0;
2035 }
2036
2037 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2038 if (err < 0) {
2039 bt_dev_err(hdev, "failed to generate new RPA");
2040 return err;
2041 }
2042
2043 bacpy(rand_addr, &hdev->rpa);
2044
2045 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2046 if (adv_instance)
2047 queue_delayed_work(hdev->workqueue,
2048 &adv_instance->rpa_expired_cb, to);
2049 else
2050 queue_delayed_work(hdev->workqueue,
2051 &hdev->rpa_expired, to);
2052
2053 return 0;
2054 }
2055
2056 /* In case of required privacy without resolvable private address,
2057 * use an non-resolvable private address. This is useful for
2058 * non-connectable advertising.
2059 */
2060 if (require_privacy) {
2061 bdaddr_t nrpa;
2062
2063 while (true) {
2064 /* The non-resolvable private address is generated
2065 * from random six bytes with the two most significant
2066 * bits cleared.
2067 */
2068 get_random_bytes(&nrpa, 6);
2069 nrpa.b[5] &= 0x3f;
2070
2071 /* The non-resolvable private address shall not be
2072 * equal to the public address.
2073 */
2074 if (bacmp(&hdev->bdaddr, &nrpa))
2075 break;
2076 }
2077
2078 *own_addr_type = ADDR_LE_DEV_RANDOM;
2079 bacpy(rand_addr, &nrpa);
2080
2081 return 0;
2082 }
2083
2084 /* No privacy so use a public address. */
2085 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2086
2087 return 0;
2088 }
2089
__hci_req_clear_ext_adv_sets(struct hci_request * req)2090 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2091 {
2092 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2093 }
2094
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)2095 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2096 {
2097 struct hci_cp_le_set_ext_adv_params cp;
2098 struct hci_dev *hdev = req->hdev;
2099 bool connectable;
2100 u32 flags;
2101 bdaddr_t random_addr;
2102 u8 own_addr_type;
2103 int err;
2104 struct adv_info *adv_instance;
2105 bool secondary_adv;
2106
2107 if (instance > 0) {
2108 adv_instance = hci_find_adv_instance(hdev, instance);
2109 if (!adv_instance)
2110 return -EINVAL;
2111 } else {
2112 adv_instance = NULL;
2113 }
2114
2115 flags = get_adv_instance_flags(hdev, instance);
2116
2117 /* If the "connectable" instance flag was not set, then choose between
2118 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2119 */
2120 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2121 mgmt_get_connectable(hdev);
2122
2123 if (!is_advertising_allowed(hdev, connectable))
2124 return -EPERM;
2125
2126 /* Set require_privacy to true only when non-connectable
2127 * advertising is used. In that case it is fine to use a
2128 * non-resolvable private address.
2129 */
2130 err = hci_get_random_address(hdev, !connectable,
2131 adv_use_rpa(hdev, flags), adv_instance,
2132 &own_addr_type, &random_addr);
2133 if (err < 0)
2134 return err;
2135
2136 memset(&cp, 0, sizeof(cp));
2137
2138 /* In ext adv set param interval is 3 octets */
2139 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2140 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2141
2142 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2143
2144 if (connectable) {
2145 if (secondary_adv)
2146 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2147 else
2148 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2149 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2150 if (secondary_adv)
2151 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2152 else
2153 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2154 } else {
2155 if (secondary_adv)
2156 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2157 else
2158 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2159 }
2160
2161 cp.own_addr_type = own_addr_type;
2162 cp.channel_map = hdev->le_adv_channel_map;
2163 cp.tx_power = 127;
2164 cp.handle = instance;
2165
2166 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2167 cp.primary_phy = HCI_ADV_PHY_1M;
2168 cp.secondary_phy = HCI_ADV_PHY_2M;
2169 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2170 cp.primary_phy = HCI_ADV_PHY_CODED;
2171 cp.secondary_phy = HCI_ADV_PHY_CODED;
2172 } else {
2173 /* In all other cases use 1M */
2174 cp.primary_phy = HCI_ADV_PHY_1M;
2175 cp.secondary_phy = HCI_ADV_PHY_1M;
2176 }
2177
2178 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2179
2180 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2181 bacmp(&random_addr, BDADDR_ANY)) {
2182 struct hci_cp_le_set_adv_set_rand_addr cp;
2183
2184 /* Check if random address need to be updated */
2185 if (adv_instance) {
2186 if (!bacmp(&random_addr, &adv_instance->random_addr))
2187 return 0;
2188 } else {
2189 if (!bacmp(&random_addr, &hdev->random_addr))
2190 return 0;
2191 }
2192
2193 memset(&cp, 0, sizeof(cp));
2194
2195 cp.handle = instance;
2196 bacpy(&cp.bdaddr, &random_addr);
2197
2198 hci_req_add(req,
2199 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2200 sizeof(cp), &cp);
2201 }
2202
2203 return 0;
2204 }
2205
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)2206 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2207 {
2208 struct hci_dev *hdev = req->hdev;
2209 struct hci_cp_le_set_ext_adv_enable *cp;
2210 struct hci_cp_ext_adv_set *adv_set;
2211 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2212 struct adv_info *adv_instance;
2213
2214 if (instance > 0) {
2215 adv_instance = hci_find_adv_instance(hdev, instance);
2216 if (!adv_instance)
2217 return -EINVAL;
2218 } else {
2219 adv_instance = NULL;
2220 }
2221
2222 cp = (void *) data;
2223 adv_set = (void *) cp->data;
2224
2225 memset(cp, 0, sizeof(*cp));
2226
2227 cp->enable = 0x01;
2228 cp->num_of_sets = 0x01;
2229
2230 memset(adv_set, 0, sizeof(*adv_set));
2231
2232 adv_set->handle = instance;
2233
2234 /* Set duration per instance since controller is responsible for
2235 * scheduling it.
2236 */
2237 if (adv_instance && adv_instance->timeout) {
2238 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2239
2240 /* Time = N * 10 ms */
2241 adv_set->duration = cpu_to_le16(duration / 10);
2242 }
2243
2244 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2245 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2246 data);
2247
2248 return 0;
2249 }
2250
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)2251 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2252 {
2253 struct hci_dev *hdev = req->hdev;
2254 struct hci_cp_le_set_ext_adv_enable *cp;
2255 struct hci_cp_ext_adv_set *adv_set;
2256 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2257 u8 req_size;
2258
2259 /* If request specifies an instance that doesn't exist, fail */
2260 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2261 return -EINVAL;
2262
2263 memset(data, 0, sizeof(data));
2264
2265 cp = (void *)data;
2266 adv_set = (void *)cp->data;
2267
2268 /* Instance 0x00 indicates all advertising instances will be disabled */
2269 cp->num_of_sets = !!instance;
2270 cp->enable = 0x00;
2271
2272 adv_set->handle = instance;
2273
2274 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2275 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2276
2277 return 0;
2278 }
2279
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)2280 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2281 {
2282 struct hci_dev *hdev = req->hdev;
2283
2284 /* If request specifies an instance that doesn't exist, fail */
2285 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2286 return -EINVAL;
2287
2288 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2289
2290 return 0;
2291 }
2292
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)2293 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2294 {
2295 struct hci_dev *hdev = req->hdev;
2296 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2297 int err;
2298
2299 /* If instance isn't pending, the chip knows about it, and it's safe to
2300 * disable
2301 */
2302 if (adv_instance && !adv_instance->pending)
2303 __hci_req_disable_ext_adv_instance(req, instance);
2304
2305 err = __hci_req_setup_ext_adv_instance(req, instance);
2306 if (err < 0)
2307 return err;
2308
2309 __hci_req_update_scan_rsp_data(req, instance);
2310 __hci_req_enable_ext_advertising(req, instance);
2311
2312 return 0;
2313 }
2314
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)2315 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2316 bool force)
2317 {
2318 struct hci_dev *hdev = req->hdev;
2319 struct adv_info *adv_instance = NULL;
2320 u16 timeout;
2321
2322 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2323 list_empty(&hdev->adv_instances))
2324 return -EPERM;
2325
2326 if (hdev->adv_instance_timeout)
2327 return -EBUSY;
2328
2329 adv_instance = hci_find_adv_instance(hdev, instance);
2330 if (!adv_instance)
2331 return -ENOENT;
2332
2333 /* A zero timeout means unlimited advertising. As long as there is
2334 * only one instance, duration should be ignored. We still set a timeout
2335 * in case further instances are being added later on.
2336 *
2337 * If the remaining lifetime of the instance is more than the duration
2338 * then the timeout corresponds to the duration, otherwise it will be
2339 * reduced to the remaining instance lifetime.
2340 */
2341 if (adv_instance->timeout == 0 ||
2342 adv_instance->duration <= adv_instance->remaining_time)
2343 timeout = adv_instance->duration;
2344 else
2345 timeout = adv_instance->remaining_time;
2346
2347 /* The remaining time is being reduced unless the instance is being
2348 * advertised without time limit.
2349 */
2350 if (adv_instance->timeout)
2351 adv_instance->remaining_time =
2352 adv_instance->remaining_time - timeout;
2353
2354 /* Only use work for scheduling instances with legacy advertising */
2355 if (!ext_adv_capable(hdev)) {
2356 hdev->adv_instance_timeout = timeout;
2357 queue_delayed_work(hdev->req_workqueue,
2358 &hdev->adv_instance_expire,
2359 msecs_to_jiffies(timeout * 1000));
2360 }
2361
2362 /* If we're just re-scheduling the same instance again then do not
2363 * execute any HCI commands. This happens when a single instance is
2364 * being advertised.
2365 */
2366 if (!force && hdev->cur_adv_instance == instance &&
2367 hci_dev_test_flag(hdev, HCI_LE_ADV))
2368 return 0;
2369
2370 hdev->cur_adv_instance = instance;
2371 if (ext_adv_capable(hdev)) {
2372 __hci_req_start_ext_adv(req, instance);
2373 } else {
2374 __hci_req_update_adv_data(req, instance);
2375 __hci_req_update_scan_rsp_data(req, instance);
2376 __hci_req_enable_advertising(req);
2377 }
2378
2379 return 0;
2380 }
2381
2382 /* For a single instance:
2383 * - force == true: The instance will be removed even when its remaining
2384 * lifetime is not zero.
2385 * - force == false: the instance will be deactivated but kept stored unless
2386 * the remaining lifetime is zero.
2387 *
2388 * For instance == 0x00:
2389 * - force == true: All instances will be removed regardless of their timeout
2390 * setting.
2391 * - force == false: Only instances that have a timeout will be removed.
2392 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)2393 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2394 struct hci_request *req, u8 instance,
2395 bool force)
2396 {
2397 struct adv_info *adv_instance, *n, *next_instance = NULL;
2398 int err;
2399 u8 rem_inst;
2400
2401 /* Cancel any timeout concerning the removed instance(s). */
2402 if (!instance || hdev->cur_adv_instance == instance)
2403 cancel_adv_timeout(hdev);
2404
2405 /* Get the next instance to advertise BEFORE we remove
2406 * the current one. This can be the same instance again
2407 * if there is only one instance.
2408 */
2409 if (instance && hdev->cur_adv_instance == instance)
2410 next_instance = hci_get_next_instance(hdev, instance);
2411
2412 if (instance == 0x00) {
2413 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2414 list) {
2415 if (!(force || adv_instance->timeout))
2416 continue;
2417
2418 rem_inst = adv_instance->instance;
2419 err = hci_remove_adv_instance(hdev, rem_inst);
2420 if (!err)
2421 mgmt_advertising_removed(sk, hdev, rem_inst);
2422 }
2423 } else {
2424 adv_instance = hci_find_adv_instance(hdev, instance);
2425
2426 if (force || (adv_instance && adv_instance->timeout &&
2427 !adv_instance->remaining_time)) {
2428 /* Don't advertise a removed instance. */
2429 if (next_instance &&
2430 next_instance->instance == instance)
2431 next_instance = NULL;
2432
2433 err = hci_remove_adv_instance(hdev, instance);
2434 if (!err)
2435 mgmt_advertising_removed(sk, hdev, instance);
2436 }
2437 }
2438
2439 if (!req || !hdev_is_powered(hdev) ||
2440 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2441 return;
2442
2443 if (next_instance && !ext_adv_capable(hdev))
2444 __hci_req_schedule_adv_instance(req, next_instance->instance,
2445 false);
2446 }
2447
set_random_addr(struct hci_request * req,bdaddr_t * rpa)2448 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2449 {
2450 struct hci_dev *hdev = req->hdev;
2451
2452 /* If we're advertising or initiating an LE connection we can't
2453 * go ahead and change the random address at this time. This is
2454 * because the eventual initiator address used for the
2455 * subsequently created connection will be undefined (some
2456 * controllers use the new address and others the one we had
2457 * when the operation started).
2458 *
2459 * In this kind of scenario skip the update and let the random
2460 * address be updated at the next cycle.
2461 */
2462 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2463 hci_lookup_le_connect(hdev)) {
2464 BT_DBG("Deferring random address update");
2465 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2466 return;
2467 }
2468
2469 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2470 }
2471
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)2472 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2473 bool use_rpa, u8 *own_addr_type)
2474 {
2475 struct hci_dev *hdev = req->hdev;
2476 int err;
2477
2478 /* If privacy is enabled use a resolvable private address. If
2479 * current RPA has expired or there is something else than
2480 * the current RPA in use, then generate a new one.
2481 */
2482 if (use_rpa) {
2483 int to;
2484
2485 /* If Controller supports LL Privacy use own address type is
2486 * 0x03
2487 */
2488 if (use_ll_privacy(hdev))
2489 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2490 else
2491 *own_addr_type = ADDR_LE_DEV_RANDOM;
2492
2493 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2494 !bacmp(&hdev->random_addr, &hdev->rpa))
2495 return 0;
2496
2497 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2498 if (err < 0) {
2499 bt_dev_err(hdev, "failed to generate new RPA");
2500 return err;
2501 }
2502
2503 set_random_addr(req, &hdev->rpa);
2504
2505 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2506 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2507
2508 return 0;
2509 }
2510
2511 /* In case of required privacy without resolvable private address,
2512 * use an non-resolvable private address. This is useful for active
2513 * scanning and non-connectable advertising.
2514 */
2515 if (require_privacy) {
2516 bdaddr_t nrpa;
2517
2518 while (true) {
2519 /* The non-resolvable private address is generated
2520 * from random six bytes with the two most significant
2521 * bits cleared.
2522 */
2523 get_random_bytes(&nrpa, 6);
2524 nrpa.b[5] &= 0x3f;
2525
2526 /* The non-resolvable private address shall not be
2527 * equal to the public address.
2528 */
2529 if (bacmp(&hdev->bdaddr, &nrpa))
2530 break;
2531 }
2532
2533 *own_addr_type = ADDR_LE_DEV_RANDOM;
2534 set_random_addr(req, &nrpa);
2535 return 0;
2536 }
2537
2538 /* If forcing static address is in use or there is no public
2539 * address use the static address as random address (but skip
2540 * the HCI command if the current random address is already the
2541 * static one.
2542 *
2543 * In case BR/EDR has been disabled on a dual-mode controller
2544 * and a static address has been configured, then use that
2545 * address instead of the public BR/EDR address.
2546 */
2547 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2548 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2549 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2550 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2551 *own_addr_type = ADDR_LE_DEV_RANDOM;
2552 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2553 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2554 &hdev->static_addr);
2555 return 0;
2556 }
2557
2558 /* Neither privacy nor static address is being used so use a
2559 * public address.
2560 */
2561 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2562
2563 return 0;
2564 }
2565
disconnected_accept_list_entries(struct hci_dev * hdev)2566 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2567 {
2568 struct bdaddr_list *b;
2569
2570 list_for_each_entry(b, &hdev->accept_list, list) {
2571 struct hci_conn *conn;
2572
2573 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2574 if (!conn)
2575 return true;
2576
2577 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2578 return true;
2579 }
2580
2581 return false;
2582 }
2583
__hci_req_update_scan(struct hci_request * req)2584 void __hci_req_update_scan(struct hci_request *req)
2585 {
2586 struct hci_dev *hdev = req->hdev;
2587 u8 scan;
2588
2589 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2590 return;
2591
2592 if (!hdev_is_powered(hdev))
2593 return;
2594
2595 if (mgmt_powering_down(hdev))
2596 return;
2597
2598 if (hdev->scanning_paused)
2599 return;
2600
2601 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2602 disconnected_accept_list_entries(hdev))
2603 scan = SCAN_PAGE;
2604 else
2605 scan = SCAN_DISABLED;
2606
2607 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2608 scan |= SCAN_INQUIRY;
2609
2610 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2611 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2612 return;
2613
2614 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2615 }
2616
update_scan(struct hci_request * req,unsigned long opt)2617 static int update_scan(struct hci_request *req, unsigned long opt)
2618 {
2619 hci_dev_lock(req->hdev);
2620 __hci_req_update_scan(req);
2621 hci_dev_unlock(req->hdev);
2622 return 0;
2623 }
2624
scan_update_work(struct work_struct * work)2625 static void scan_update_work(struct work_struct *work)
2626 {
2627 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2628
2629 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2630 }
2631
connectable_update(struct hci_request * req,unsigned long opt)2632 static int connectable_update(struct hci_request *req, unsigned long opt)
2633 {
2634 struct hci_dev *hdev = req->hdev;
2635
2636 hci_dev_lock(hdev);
2637
2638 __hci_req_update_scan(req);
2639
2640 /* If BR/EDR is not enabled and we disable advertising as a
2641 * by-product of disabling connectable, we need to update the
2642 * advertising flags.
2643 */
2644 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2645 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2646
2647 /* Update the advertising parameters if necessary */
2648 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2649 !list_empty(&hdev->adv_instances)) {
2650 if (ext_adv_capable(hdev))
2651 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2652 else
2653 __hci_req_enable_advertising(req);
2654 }
2655
2656 __hci_update_background_scan(req);
2657
2658 hci_dev_unlock(hdev);
2659
2660 return 0;
2661 }
2662
connectable_update_work(struct work_struct * work)2663 static void connectable_update_work(struct work_struct *work)
2664 {
2665 struct hci_dev *hdev = container_of(work, struct hci_dev,
2666 connectable_update);
2667 u8 status;
2668
2669 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2670 mgmt_set_connectable_complete(hdev, status);
2671 }
2672
get_service_classes(struct hci_dev * hdev)2673 static u8 get_service_classes(struct hci_dev *hdev)
2674 {
2675 struct bt_uuid *uuid;
2676 u8 val = 0;
2677
2678 list_for_each_entry(uuid, &hdev->uuids, list)
2679 val |= uuid->svc_hint;
2680
2681 return val;
2682 }
2683
__hci_req_update_class(struct hci_request * req)2684 void __hci_req_update_class(struct hci_request *req)
2685 {
2686 struct hci_dev *hdev = req->hdev;
2687 u8 cod[3];
2688
2689 BT_DBG("%s", hdev->name);
2690
2691 if (!hdev_is_powered(hdev))
2692 return;
2693
2694 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2695 return;
2696
2697 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2698 return;
2699
2700 cod[0] = hdev->minor_class;
2701 cod[1] = hdev->major_class;
2702 cod[2] = get_service_classes(hdev);
2703
2704 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2705 cod[1] |= 0x20;
2706
2707 if (memcmp(cod, hdev->dev_class, 3) == 0)
2708 return;
2709
2710 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2711 }
2712
write_iac(struct hci_request * req)2713 static void write_iac(struct hci_request *req)
2714 {
2715 struct hci_dev *hdev = req->hdev;
2716 struct hci_cp_write_current_iac_lap cp;
2717
2718 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2719 return;
2720
2721 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2722 /* Limited discoverable mode */
2723 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2724 cp.iac_lap[0] = 0x00; /* LIAC */
2725 cp.iac_lap[1] = 0x8b;
2726 cp.iac_lap[2] = 0x9e;
2727 cp.iac_lap[3] = 0x33; /* GIAC */
2728 cp.iac_lap[4] = 0x8b;
2729 cp.iac_lap[5] = 0x9e;
2730 } else {
2731 /* General discoverable mode */
2732 cp.num_iac = 1;
2733 cp.iac_lap[0] = 0x33; /* GIAC */
2734 cp.iac_lap[1] = 0x8b;
2735 cp.iac_lap[2] = 0x9e;
2736 }
2737
2738 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2739 (cp.num_iac * 3) + 1, &cp);
2740 }
2741
discoverable_update(struct hci_request * req,unsigned long opt)2742 static int discoverable_update(struct hci_request *req, unsigned long opt)
2743 {
2744 struct hci_dev *hdev = req->hdev;
2745
2746 hci_dev_lock(hdev);
2747
2748 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2749 write_iac(req);
2750 __hci_req_update_scan(req);
2751 __hci_req_update_class(req);
2752 }
2753
2754 /* Advertising instances don't use the global discoverable setting, so
2755 * only update AD if advertising was enabled using Set Advertising.
2756 */
2757 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2758 __hci_req_update_adv_data(req, 0x00);
2759
2760 /* Discoverable mode affects the local advertising
2761 * address in limited privacy mode.
2762 */
2763 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2764 if (ext_adv_capable(hdev))
2765 __hci_req_start_ext_adv(req, 0x00);
2766 else
2767 __hci_req_enable_advertising(req);
2768 }
2769 }
2770
2771 hci_dev_unlock(hdev);
2772
2773 return 0;
2774 }
2775
discoverable_update_work(struct work_struct * work)2776 static void discoverable_update_work(struct work_struct *work)
2777 {
2778 struct hci_dev *hdev = container_of(work, struct hci_dev,
2779 discoverable_update);
2780 u8 status;
2781
2782 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2783 mgmt_set_discoverable_complete(hdev, status);
2784 }
2785
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2786 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2787 u8 reason)
2788 {
2789 switch (conn->state) {
2790 case BT_CONNECTED:
2791 case BT_CONFIG:
2792 if (conn->type == AMP_LINK) {
2793 struct hci_cp_disconn_phy_link cp;
2794
2795 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2796 cp.reason = reason;
2797 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2798 &cp);
2799 } else {
2800 struct hci_cp_disconnect dc;
2801
2802 dc.handle = cpu_to_le16(conn->handle);
2803 dc.reason = reason;
2804 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2805 }
2806
2807 conn->state = BT_DISCONN;
2808
2809 break;
2810 case BT_CONNECT:
2811 if (conn->type == LE_LINK) {
2812 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2813 break;
2814 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2815 0, NULL);
2816 } else if (conn->type == ACL_LINK) {
2817 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2818 break;
2819 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2820 6, &conn->dst);
2821 }
2822 break;
2823 case BT_CONNECT2:
2824 if (conn->type == ACL_LINK) {
2825 struct hci_cp_reject_conn_req rej;
2826
2827 bacpy(&rej.bdaddr, &conn->dst);
2828 rej.reason = reason;
2829
2830 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2831 sizeof(rej), &rej);
2832 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2833 struct hci_cp_reject_sync_conn_req rej;
2834
2835 bacpy(&rej.bdaddr, &conn->dst);
2836
2837 /* SCO rejection has its own limited set of
2838 * allowed error values (0x0D-0x0F) which isn't
2839 * compatible with most values passed to this
2840 * function. To be safe hard-code one of the
2841 * values that's suitable for SCO.
2842 */
2843 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2844
2845 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2846 sizeof(rej), &rej);
2847 }
2848 break;
2849 default:
2850 conn->state = BT_CLOSED;
2851 break;
2852 }
2853 }
2854
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2855 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2856 {
2857 if (status)
2858 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2859 }
2860
hci_abort_conn(struct hci_conn * conn,u8 reason)2861 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2862 {
2863 struct hci_request req;
2864 int err;
2865
2866 hci_req_init(&req, conn->hdev);
2867
2868 __hci_abort_conn(&req, conn, reason);
2869
2870 err = hci_req_run(&req, abort_conn_complete);
2871 if (err && err != -ENODATA) {
2872 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2873 return err;
2874 }
2875
2876 return 0;
2877 }
2878
update_bg_scan(struct hci_request * req,unsigned long opt)2879 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2880 {
2881 hci_dev_lock(req->hdev);
2882 __hci_update_background_scan(req);
2883 hci_dev_unlock(req->hdev);
2884 return 0;
2885 }
2886
bg_scan_update(struct work_struct * work)2887 static void bg_scan_update(struct work_struct *work)
2888 {
2889 struct hci_dev *hdev = container_of(work, struct hci_dev,
2890 bg_scan_update);
2891 struct hci_conn *conn;
2892 u8 status;
2893 int err;
2894
2895 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2896 if (!err)
2897 return;
2898
2899 hci_dev_lock(hdev);
2900
2901 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2902 if (conn)
2903 hci_le_conn_failed(conn, status);
2904
2905 hci_dev_unlock(hdev);
2906 }
2907
le_scan_disable(struct hci_request * req,unsigned long opt)2908 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2909 {
2910 hci_req_add_le_scan_disable(req, false);
2911 return 0;
2912 }
2913
bredr_inquiry(struct hci_request * req,unsigned long opt)2914 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2915 {
2916 u8 length = opt;
2917 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2918 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2919 struct hci_cp_inquiry cp;
2920
2921 BT_DBG("%s", req->hdev->name);
2922
2923 hci_dev_lock(req->hdev);
2924 hci_inquiry_cache_flush(req->hdev);
2925 hci_dev_unlock(req->hdev);
2926
2927 memset(&cp, 0, sizeof(cp));
2928
2929 if (req->hdev->discovery.limited)
2930 memcpy(&cp.lap, liac, sizeof(cp.lap));
2931 else
2932 memcpy(&cp.lap, giac, sizeof(cp.lap));
2933
2934 cp.length = length;
2935
2936 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2937
2938 return 0;
2939 }
2940
le_scan_disable_work(struct work_struct * work)2941 static void le_scan_disable_work(struct work_struct *work)
2942 {
2943 struct hci_dev *hdev = container_of(work, struct hci_dev,
2944 le_scan_disable.work);
2945 u8 status;
2946
2947 BT_DBG("%s", hdev->name);
2948
2949 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2950 return;
2951
2952 cancel_delayed_work(&hdev->le_scan_restart);
2953
2954 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2955 if (status) {
2956 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2957 status);
2958 return;
2959 }
2960
2961 hdev->discovery.scan_start = 0;
2962
2963 /* If we were running LE only scan, change discovery state. If
2964 * we were running both LE and BR/EDR inquiry simultaneously,
2965 * and BR/EDR inquiry is already finished, stop discovery,
2966 * otherwise BR/EDR inquiry will stop discovery when finished.
2967 * If we will resolve remote device name, do not change
2968 * discovery state.
2969 */
2970
2971 if (hdev->discovery.type == DISCOV_TYPE_LE)
2972 goto discov_stopped;
2973
2974 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2975 return;
2976
2977 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2978 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2979 hdev->discovery.state != DISCOVERY_RESOLVING)
2980 goto discov_stopped;
2981
2982 return;
2983 }
2984
2985 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2986 HCI_CMD_TIMEOUT, &status);
2987 if (status) {
2988 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2989 goto discov_stopped;
2990 }
2991
2992 return;
2993
2994 discov_stopped:
2995 hci_dev_lock(hdev);
2996 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2997 hci_dev_unlock(hdev);
2998 }
2999
le_scan_restart(struct hci_request * req,unsigned long opt)3000 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3001 {
3002 struct hci_dev *hdev = req->hdev;
3003
3004 /* If controller is not scanning we are done. */
3005 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3006 return 0;
3007
3008 if (hdev->scanning_paused) {
3009 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3010 return 0;
3011 }
3012
3013 hci_req_add_le_scan_disable(req, false);
3014
3015 if (use_ext_scan(hdev)) {
3016 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3017
3018 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3019 ext_enable_cp.enable = LE_SCAN_ENABLE;
3020 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3021
3022 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3023 sizeof(ext_enable_cp), &ext_enable_cp);
3024 } else {
3025 struct hci_cp_le_set_scan_enable cp;
3026
3027 memset(&cp, 0, sizeof(cp));
3028 cp.enable = LE_SCAN_ENABLE;
3029 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3030 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3031 }
3032
3033 return 0;
3034 }
3035
le_scan_restart_work(struct work_struct * work)3036 static void le_scan_restart_work(struct work_struct *work)
3037 {
3038 struct hci_dev *hdev = container_of(work, struct hci_dev,
3039 le_scan_restart.work);
3040 unsigned long timeout, duration, scan_start, now;
3041 u8 status;
3042
3043 BT_DBG("%s", hdev->name);
3044
3045 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3046 if (status) {
3047 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3048 status);
3049 return;
3050 }
3051
3052 hci_dev_lock(hdev);
3053
3054 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3055 !hdev->discovery.scan_start)
3056 goto unlock;
3057
3058 /* When the scan was started, hdev->le_scan_disable has been queued
3059 * after duration from scan_start. During scan restart this job
3060 * has been canceled, and we need to queue it again after proper
3061 * timeout, to make sure that scan does not run indefinitely.
3062 */
3063 duration = hdev->discovery.scan_duration;
3064 scan_start = hdev->discovery.scan_start;
3065 now = jiffies;
3066 if (now - scan_start <= duration) {
3067 int elapsed;
3068
3069 if (now >= scan_start)
3070 elapsed = now - scan_start;
3071 else
3072 elapsed = ULONG_MAX - scan_start + now;
3073
3074 timeout = duration - elapsed;
3075 } else {
3076 timeout = 0;
3077 }
3078
3079 queue_delayed_work(hdev->req_workqueue,
3080 &hdev->le_scan_disable, timeout);
3081
3082 unlock:
3083 hci_dev_unlock(hdev);
3084 }
3085
active_scan(struct hci_request * req,unsigned long opt)3086 static int active_scan(struct hci_request *req, unsigned long opt)
3087 {
3088 uint16_t interval = opt;
3089 struct hci_dev *hdev = req->hdev;
3090 u8 own_addr_type;
3091 /* Accept list is not used for discovery */
3092 u8 filter_policy = 0x00;
3093 /* Discovery doesn't require controller address resolution */
3094 bool addr_resolv = false;
3095 int err;
3096
3097 BT_DBG("%s", hdev->name);
3098
3099 /* If controller is scanning, it means the background scanning is
3100 * running. Thus, we should temporarily stop it in order to set the
3101 * discovery scanning parameters.
3102 */
3103 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3104 hci_req_add_le_scan_disable(req, false);
3105
3106 /* All active scans will be done with either a resolvable private
3107 * address (when privacy feature has been enabled) or non-resolvable
3108 * private address.
3109 */
3110 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3111 &own_addr_type);
3112 if (err < 0)
3113 own_addr_type = ADDR_LE_DEV_PUBLIC;
3114
3115 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3116 hdev->le_scan_window_discovery, own_addr_type,
3117 filter_policy, addr_resolv);
3118 return 0;
3119 }
3120
interleaved_discov(struct hci_request * req,unsigned long opt)3121 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3122 {
3123 int err;
3124
3125 BT_DBG("%s", req->hdev->name);
3126
3127 err = active_scan(req, opt);
3128 if (err)
3129 return err;
3130
3131 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3132 }
3133
start_discovery(struct hci_dev * hdev,u8 * status)3134 static void start_discovery(struct hci_dev *hdev, u8 *status)
3135 {
3136 unsigned long timeout;
3137
3138 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3139
3140 switch (hdev->discovery.type) {
3141 case DISCOV_TYPE_BREDR:
3142 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3143 hci_req_sync(hdev, bredr_inquiry,
3144 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3145 status);
3146 return;
3147 case DISCOV_TYPE_INTERLEAVED:
3148 /* When running simultaneous discovery, the LE scanning time
3149 * should occupy the whole discovery time sine BR/EDR inquiry
3150 * and LE scanning are scheduled by the controller.
3151 *
3152 * For interleaving discovery in comparison, BR/EDR inquiry
3153 * and LE scanning are done sequentially with separate
3154 * timeouts.
3155 */
3156 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3157 &hdev->quirks)) {
3158 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3159 /* During simultaneous discovery, we double LE scan
3160 * interval. We must leave some time for the controller
3161 * to do BR/EDR inquiry.
3162 */
3163 hci_req_sync(hdev, interleaved_discov,
3164 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3165 status);
3166 break;
3167 }
3168
3169 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3170 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3171 HCI_CMD_TIMEOUT, status);
3172 break;
3173 case DISCOV_TYPE_LE:
3174 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3175 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3176 HCI_CMD_TIMEOUT, status);
3177 break;
3178 default:
3179 *status = HCI_ERROR_UNSPECIFIED;
3180 return;
3181 }
3182
3183 if (*status)
3184 return;
3185
3186 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3187
3188 /* When service discovery is used and the controller has a
3189 * strict duplicate filter, it is important to remember the
3190 * start and duration of the scan. This is required for
3191 * restarting scanning during the discovery phase.
3192 */
3193 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3194 hdev->discovery.result_filtering) {
3195 hdev->discovery.scan_start = jiffies;
3196 hdev->discovery.scan_duration = timeout;
3197 }
3198
3199 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3200 timeout);
3201 }
3202
hci_req_stop_discovery(struct hci_request * req)3203 bool hci_req_stop_discovery(struct hci_request *req)
3204 {
3205 struct hci_dev *hdev = req->hdev;
3206 struct discovery_state *d = &hdev->discovery;
3207 struct hci_cp_remote_name_req_cancel cp;
3208 struct inquiry_entry *e;
3209 bool ret = false;
3210
3211 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3212
3213 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3214 if (test_bit(HCI_INQUIRY, &hdev->flags))
3215 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3216
3217 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3218 cancel_delayed_work(&hdev->le_scan_disable);
3219 hci_req_add_le_scan_disable(req, false);
3220 }
3221
3222 ret = true;
3223 } else {
3224 /* Passive scanning */
3225 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3226 hci_req_add_le_scan_disable(req, false);
3227 ret = true;
3228 }
3229 }
3230
3231 /* No further actions needed for LE-only discovery */
3232 if (d->type == DISCOV_TYPE_LE)
3233 return ret;
3234
3235 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3236 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3237 NAME_PENDING);
3238 if (!e)
3239 return ret;
3240
3241 bacpy(&cp.bdaddr, &e->data.bdaddr);
3242 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3243 &cp);
3244 ret = true;
3245 }
3246
3247 return ret;
3248 }
3249
stop_discovery(struct hci_request * req,unsigned long opt)3250 static int stop_discovery(struct hci_request *req, unsigned long opt)
3251 {
3252 hci_dev_lock(req->hdev);
3253 hci_req_stop_discovery(req);
3254 hci_dev_unlock(req->hdev);
3255
3256 return 0;
3257 }
3258
discov_update(struct work_struct * work)3259 static void discov_update(struct work_struct *work)
3260 {
3261 struct hci_dev *hdev = container_of(work, struct hci_dev,
3262 discov_update);
3263 u8 status = 0;
3264
3265 switch (hdev->discovery.state) {
3266 case DISCOVERY_STARTING:
3267 start_discovery(hdev, &status);
3268 mgmt_start_discovery_complete(hdev, status);
3269 if (status)
3270 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3271 else
3272 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3273 break;
3274 case DISCOVERY_STOPPING:
3275 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3276 mgmt_stop_discovery_complete(hdev, status);
3277 if (!status)
3278 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3279 break;
3280 case DISCOVERY_STOPPED:
3281 default:
3282 return;
3283 }
3284 }
3285
discov_off(struct work_struct * work)3286 static void discov_off(struct work_struct *work)
3287 {
3288 struct hci_dev *hdev = container_of(work, struct hci_dev,
3289 discov_off.work);
3290
3291 BT_DBG("%s", hdev->name);
3292
3293 hci_dev_lock(hdev);
3294
3295 /* When discoverable timeout triggers, then just make sure
3296 * the limited discoverable flag is cleared. Even in the case
3297 * of a timeout triggered from general discoverable, it is
3298 * safe to unconditionally clear the flag.
3299 */
3300 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3301 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3302 hdev->discov_timeout = 0;
3303
3304 hci_dev_unlock(hdev);
3305
3306 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3307 mgmt_new_settings(hdev);
3308 }
3309
powered_update_hci(struct hci_request * req,unsigned long opt)3310 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3311 {
3312 struct hci_dev *hdev = req->hdev;
3313 u8 link_sec;
3314
3315 hci_dev_lock(hdev);
3316
3317 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3318 !lmp_host_ssp_capable(hdev)) {
3319 u8 mode = 0x01;
3320
3321 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3322
3323 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3324 u8 support = 0x01;
3325
3326 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3327 sizeof(support), &support);
3328 }
3329 }
3330
3331 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3332 lmp_bredr_capable(hdev)) {
3333 struct hci_cp_write_le_host_supported cp;
3334
3335 cp.le = 0x01;
3336 cp.simul = 0x00;
3337
3338 /* Check first if we already have the right
3339 * host state (host features set)
3340 */
3341 if (cp.le != lmp_host_le_capable(hdev) ||
3342 cp.simul != lmp_host_le_br_capable(hdev))
3343 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3344 sizeof(cp), &cp);
3345 }
3346
3347 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3348 /* Make sure the controller has a good default for
3349 * advertising data. This also applies to the case
3350 * where BR/EDR was toggled during the AUTO_OFF phase.
3351 */
3352 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3353 list_empty(&hdev->adv_instances)) {
3354 int err;
3355
3356 if (ext_adv_capable(hdev)) {
3357 err = __hci_req_setup_ext_adv_instance(req,
3358 0x00);
3359 if (!err)
3360 __hci_req_update_scan_rsp_data(req,
3361 0x00);
3362 } else {
3363 err = 0;
3364 __hci_req_update_adv_data(req, 0x00);
3365 __hci_req_update_scan_rsp_data(req, 0x00);
3366 }
3367
3368 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3369 if (!ext_adv_capable(hdev))
3370 __hci_req_enable_advertising(req);
3371 else if (!err)
3372 __hci_req_enable_ext_advertising(req,
3373 0x00);
3374 }
3375 } else if (!list_empty(&hdev->adv_instances)) {
3376 struct adv_info *adv_instance;
3377
3378 adv_instance = list_first_entry(&hdev->adv_instances,
3379 struct adv_info, list);
3380 __hci_req_schedule_adv_instance(req,
3381 adv_instance->instance,
3382 true);
3383 }
3384 }
3385
3386 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3387 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3388 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3389 sizeof(link_sec), &link_sec);
3390
3391 if (lmp_bredr_capable(hdev)) {
3392 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3393 __hci_req_write_fast_connectable(req, true);
3394 else
3395 __hci_req_write_fast_connectable(req, false);
3396 __hci_req_update_scan(req);
3397 __hci_req_update_class(req);
3398 __hci_req_update_name(req);
3399 __hci_req_update_eir(req);
3400 }
3401
3402 hci_dev_unlock(hdev);
3403 return 0;
3404 }
3405
__hci_req_hci_power_on(struct hci_dev * hdev)3406 int __hci_req_hci_power_on(struct hci_dev *hdev)
3407 {
3408 /* Register the available SMP channels (BR/EDR and LE) only when
3409 * successfully powering on the controller. This late
3410 * registration is required so that LE SMP can clearly decide if
3411 * the public address or static address is used.
3412 */
3413 smp_register(hdev);
3414
3415 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3416 NULL);
3417 }
3418
hci_request_setup(struct hci_dev * hdev)3419 void hci_request_setup(struct hci_dev *hdev)
3420 {
3421 INIT_WORK(&hdev->discov_update, discov_update);
3422 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3423 INIT_WORK(&hdev->scan_update, scan_update_work);
3424 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3425 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3426 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3427 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3428 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3429 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3430 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3431 }
3432
hci_request_cancel_all(struct hci_dev * hdev)3433 void hci_request_cancel_all(struct hci_dev *hdev)
3434 {
3435 hci_req_sync_cancel(hdev, ENODEV);
3436
3437 cancel_work_sync(&hdev->discov_update);
3438 cancel_work_sync(&hdev->bg_scan_update);
3439 cancel_work_sync(&hdev->scan_update);
3440 cancel_work_sync(&hdev->connectable_update);
3441 cancel_work_sync(&hdev->discoverable_update);
3442 cancel_delayed_work_sync(&hdev->discov_off);
3443 cancel_delayed_work_sync(&hdev->le_scan_disable);
3444 cancel_delayed_work_sync(&hdev->le_scan_restart);
3445
3446 if (hdev->adv_instance_timeout) {
3447 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3448 hdev->adv_instance_timeout = 0;
3449 }
3450
3451 cancel_interleave_scan(hdev);
3452 }
3453