1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
46 {
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80 }
81
hci_req_run(struct hci_request * req,hci_req_complete_t complete)82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84 return req_run(req, complete, NULL);
85 }
86
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89 return req_run(req, NULL, complete);
90 }
91
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94 {
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104 }
105
hci_req_sync_cancel(struct hci_dev * hdev,int err)106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115 }
116
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119 {
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184 {
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
192 unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
229 return 0;
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
248 if (hci_status)
249 *hci_status = hdev->req_result;
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257
258 default:
259 err = -ETIMEDOUT;
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
262 break;
263 }
264
265 kfree_skb(hdev->req_skb);
266 hdev->req_skb = NULL;
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 return err;
272 }
273
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275 unsigned long opt),
276 unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278 int ret;
279
280 if (!test_bit(HCI_UP, &hdev->flags))
281 return -ENETDOWN;
282
283 /* Serialize all requests */
284 hci_req_sync_lock(hdev);
285 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286 hci_req_sync_unlock(hdev);
287
288 return ret;
289 }
290
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293 {
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
307 skb_put_data(skb, param, plen);
308
309 BT_DBG("skb len %d", skb->len);
310
311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
313
314 return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320 {
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
334 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335 hdev->name, opcode);
336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343 bt_cb(skb)->hci.req_event = event;
344
345 skb_queue_tail(&req->cmd_q, skb);
346 }
347
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350 {
351 hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
372 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374 /* default 1.28 sec page scan */
375 acp.interval = cpu_to_le16(0x0800);
376 }
377
378 acp.window = cpu_to_le16(0x0012);
379
380 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381 __cpu_to_le16(hdev->page_scan_window) != acp.window)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 sizeof(acp), &acp);
384
385 if (hdev->page_scan_type != type)
386 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390 * list. If there are pending LE connection we start the background scanning,
391 * otherwise we stop it.
392 *
393 * This function requires the caller holds hdev->lock.
394 */
__hci_update_background_scan(struct hci_request * req)395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397 struct hci_dev *hdev = req->hdev;
398
399 if (!test_bit(HCI_UP, &hdev->flags) ||
400 test_bit(HCI_INIT, &hdev->flags) ||
401 hci_dev_test_flag(hdev, HCI_SETUP) ||
402 hci_dev_test_flag(hdev, HCI_CONFIG) ||
403 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 return;
406
407 /* No point in doing scanning if LE support hasn't been enabled */
408 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 return;
410
411 /* If discovery is active don't interfere with it */
412 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 return;
414
415 /* Reset RSSI and UUID filters when starting background scanning
416 * since these filters are meant for service discovery only.
417 *
418 * The Start Discovery and Start Service Discovery operations
419 * ensure to set proper values for RSSI threshold and UUID
420 * filter list. So it is safe to just reset them here.
421 */
422 hci_discovery_filter_clear(hdev);
423
424 if (list_empty(&hdev->pend_le_conns) &&
425 list_empty(&hdev->pend_le_reports)) {
426 /* If there is no pending LE connections or devices
427 * to be scanned for, we should stop the background
428 * scanning.
429 */
430
431 /* If controller is not scanning we are done. */
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 return;
434
435 hci_req_add_le_scan_disable(req);
436
437 BT_DBG("%s stopping background scanning", hdev->name);
438 } else {
439 /* If there is at least one pending LE connection, we should
440 * keep the background scan running.
441 */
442
443 /* If controller is connecting, we should not start scanning
444 * since some controllers are not able to scan and connect at
445 * the same time.
446 */
447 if (hci_lookup_le_connect(hdev))
448 return;
449
450 /* If controller is currently scanning, we stop it to ensure we
451 * don't miss any advertising (due to duplicates filter).
452 */
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 hci_req_add_le_scan_disable(req);
455
456 hci_req_add_le_passive_scan(req);
457
458 BT_DBG("%s starting background scanning", hdev->name);
459 }
460 }
461
__hci_req_update_name(struct hci_request * req)462 void __hci_req_update_name(struct hci_request *req)
463 {
464 struct hci_dev *hdev = req->hdev;
465 struct hci_cp_write_local_name cp;
466
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID 0x1200
473
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
478
479 if (len < 4)
480 return ptr;
481
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 u16 uuid16;
484
485 if (uuid->size != 16)
486 continue;
487
488 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 < 0x1100)
490 continue;
491
492 if (uuid16 == PNP_INFO_SVCLASS_ID)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID16_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u16) > len) {
504 uuids_start[1] = EIR_UUID16_SOME;
505 break;
506 }
507
508 *ptr++ = (uuid16 & 0x00ff);
509 *ptr++ = (uuid16 & 0xff00) >> 8;
510 uuids_start[0] += sizeof(uuid16);
511 }
512
513 return ptr;
514 }
515
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 6)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 32)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID32_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + sizeof(u32) > len) {
537 uuids_start[1] = EIR_UUID32_SOME;
538 break;
539 }
540
541 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 ptr += sizeof(u32);
543 uuids_start[0] += sizeof(u32);
544 }
545
546 return ptr;
547 }
548
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551 u8 *ptr = data, *uuids_start = NULL;
552 struct bt_uuid *uuid;
553
554 if (len < 18)
555 return ptr;
556
557 list_for_each_entry(uuid, &hdev->uuids, list) {
558 if (uuid->size != 128)
559 continue;
560
561 if (!uuids_start) {
562 uuids_start = ptr;
563 uuids_start[0] = 1;
564 uuids_start[1] = EIR_UUID128_ALL;
565 ptr += 2;
566 }
567
568 /* Stop if not enough space to put next UUID */
569 if ((ptr - data) + 16 > len) {
570 uuids_start[1] = EIR_UUID128_SOME;
571 break;
572 }
573
574 memcpy(ptr, uuid->uuid, 16);
575 ptr += 16;
576 uuids_start[0] += 16;
577 }
578
579 return ptr;
580 }
581
create_eir(struct hci_dev * hdev,u8 * data)582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584 u8 *ptr = data;
585 size_t name_len;
586
587 name_len = strlen(hdev->dev_name);
588
589 if (name_len > 0) {
590 /* EIR Data type */
591 if (name_len > 48) {
592 name_len = 48;
593 ptr[1] = EIR_NAME_SHORT;
594 } else
595 ptr[1] = EIR_NAME_COMPLETE;
596
597 /* EIR Data length */
598 ptr[0] = name_len + 1;
599
600 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602 ptr += (name_len + 2);
603 }
604
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 ptr[0] = 2;
607 ptr[1] = EIR_TX_POWER;
608 ptr[2] = (u8) hdev->inq_tx_power;
609
610 ptr += 3;
611 }
612
613 if (hdev->devid_source > 0) {
614 ptr[0] = 9;
615 ptr[1] = EIR_DEVICE_ID;
616
617 put_unaligned_le16(hdev->devid_source, ptr + 2);
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 put_unaligned_le16(hdev->devid_product, ptr + 6);
620 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622 ptr += 10;
623 }
624
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
__hci_req_update_eir(struct hci_request * req)630 void __hci_req_update_eir(struct hci_request *req)
631 {
632 struct hci_dev *hdev = req->hdev;
633 struct hci_cp_write_eir cp;
634
635 if (!hdev_is_powered(hdev))
636 return;
637
638 if (!lmp_ext_inq_capable(hdev))
639 return;
640
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 return;
643
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 return;
646
647 memset(&cp, 0, sizeof(cp));
648
649 create_eir(hdev, cp.data);
650
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 return;
653
654 memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
hci_req_add_le_scan_disable(struct hci_request * req)659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661 struct hci_cp_le_set_scan_enable cp;
662
663 memset(&cp, 0, sizeof(cp));
664 cp.enable = LE_SCAN_DISABLE;
665 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
add_to_white_list(struct hci_request * req,struct hci_conn_params * params)668 static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670 {
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, ¶ms->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
update_white_list(struct hci_request * req)679 static u8 update_white_list(struct hci_request *req)
680 {
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
701
702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
704
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
707 continue;
708 }
709
710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
714
715 white_list_entries++;
716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 ¶ms->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 ¶ms->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774 }
775
scan_use_rpa(struct hci_dev * hdev)776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
hci_req_add_le_passive_scan(struct hci_request * req)781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783 struct hci_cp_le_set_scan_param param_cp;
784 struct hci_cp_le_set_scan_enable enable_cp;
785 struct hci_dev *hdev = req->hdev;
786 u8 own_addr_type;
787 u8 filter_policy;
788
789 /* Set require_privacy to false since no SCAN_REQ are send
790 * during passive scanning. Not using an non-resolvable address
791 * here is important so that peer devices using direct
792 * advertising with our address will be correctly reported
793 * by the controller.
794 */
795 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796 &own_addr_type))
797 return;
798
799 /* Adding or removing entries from the white list must
800 * happen before enabling scanning. The controller does
801 * not allow white list modification while scanning.
802 */
803 filter_policy = update_white_list(req);
804
805 /* When the controller is using random resolvable addresses and
806 * with that having LE privacy enabled, then controllers with
807 * Extended Scanner Filter Policies support can now enable support
808 * for handling directed advertising.
809 *
810 * So instead of using filter polices 0x00 (no whitelist)
811 * and 0x01 (whitelist enabled) use the new filter policies
812 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813 */
814 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816 filter_policy |= 0x02;
817
818 memset(¶m_cp, 0, sizeof(param_cp));
819 param_cp.type = LE_SCAN_PASSIVE;
820 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821 param_cp.window = cpu_to_le16(hdev->le_scan_window);
822 param_cp.own_address_type = own_addr_type;
823 param_cp.filter_policy = filter_policy;
824 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825 ¶m_cp);
826
827 memset(&enable_cp, 0, sizeof(enable_cp));
828 enable_cp.enable = LE_SCAN_ENABLE;
829 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831 &enable_cp);
832 }
833
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836 u8 instance = hdev->cur_adv_instance;
837 struct adv_info *adv_instance;
838
839 /* Ignore instance 0 */
840 if (instance == 0x00)
841 return 0;
842
843 adv_instance = hci_find_adv_instance(hdev, instance);
844 if (!adv_instance)
845 return 0;
846
847 /* TODO: Take into account the "appearance" and "local-name" flags here.
848 * These are currently being ignored as they are not supported.
849 */
850 return adv_instance->scan_rsp_len;
851 }
852
__hci_req_disable_advertising(struct hci_request * req)853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855 u8 enable = 0x00;
856
857 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862 u32 flags;
863 struct adv_info *adv_instance;
864
865 if (instance == 0x00) {
866 /* Instance 0 always manages the "Tx Power" and "Flags"
867 * fields
868 */
869 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872 * corresponds to the "connectable" instance flag.
873 */
874 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875 flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880 flags |= MGMT_ADV_FLAG_DISCOV;
881
882 return flags;
883 }
884
885 adv_instance = hci_find_adv_instance(hdev, instance);
886
887 /* Return 0 when we got an invalid instance identifier. */
888 if (!adv_instance)
889 return 0;
890
891 return adv_instance->flags;
892 }
893
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896 /* If privacy is not enabled don't use RPA */
897 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898 return false;
899
900 /* If basic privacy mode is enabled use RPA */
901 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902 return true;
903
904 /* If limited privacy mode is enabled don't use RPA if we're
905 * both discoverable and bondable.
906 */
907 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908 hci_dev_test_flag(hdev, HCI_BONDABLE))
909 return false;
910
911 /* We're neither bondable nor discoverable in the limited
912 * privacy mode, therefore use RPA.
913 */
914 return true;
915 }
916
__hci_req_enable_advertising(struct hci_request * req)917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919 struct hci_dev *hdev = req->hdev;
920 struct hci_cp_le_set_adv_param cp;
921 u8 own_addr_type, enable = 0x01;
922 bool connectable;
923 u32 flags;
924
925 if (hci_conn_num(hdev, LE_LINK) > 0)
926 return;
927
928 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929 __hci_req_disable_advertising(req);
930
931 /* Clear the HCI_LE_ADV bit temporarily so that the
932 * hci_update_random_address knows that it's safe to go ahead
933 * and write a new random address. The flag will be set back on
934 * as soon as the SET_ADV_ENABLE HCI command completes.
935 */
936 hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940 /* If the "connectable" instance flag was not set, then choose between
941 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942 */
943 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944 mgmt_get_connectable(hdev);
945
946 /* Set require_privacy to true only when non-connectable
947 * advertising is used. In that case it is fine to use a
948 * non-resolvable private address.
949 */
950 if (hci_update_random_address(req, !connectable,
951 adv_use_rpa(hdev, flags),
952 &own_addr_type) < 0)
953 return;
954
955 memset(&cp, 0, sizeof(cp));
956 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959 if (connectable)
960 cp.type = LE_ADV_IND;
961 else if (get_cur_adv_instance_scan_rsp_len(hdev))
962 cp.type = LE_ADV_SCAN_IND;
963 else
964 cp.type = LE_ADV_NONCONN_IND;
965
966 cp.own_address_type = own_addr_type;
967 cp.channel_map = hdev->le_adv_channel_map;
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)974 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976 size_t short_len;
977 size_t complete_len;
978
979 /* no space left for name (+ NULL + type + len) */
980 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
981 return ad_len;
982
983 /* use complete name if present and fits */
984 complete_len = strlen(hdev->dev_name);
985 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
986 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
987 hdev->dev_name, complete_len + 1);
988
989 /* use short name if present */
990 short_len = strlen(hdev->short_name);
991 if (short_len)
992 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
993 hdev->short_name, short_len + 1);
994
995 /* use shortened full name if present, we already know that name
996 * is longer then HCI_MAX_SHORT_NAME_LENGTH
997 */
998 if (complete_len) {
999 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1000
1001 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1002 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1003
1004 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1005 sizeof(name));
1006 }
1007
1008 return ad_len;
1009 }
1010
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1011 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1012 {
1013 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1014 }
1015
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1016 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1017 {
1018 u8 scan_rsp_len = 0;
1019
1020 if (hdev->appearance) {
1021 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1022 }
1023
1024 return append_local_name(hdev, ptr, scan_rsp_len);
1025 }
1026
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1027 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1028 u8 *ptr)
1029 {
1030 struct adv_info *adv_instance;
1031 u32 instance_flags;
1032 u8 scan_rsp_len = 0;
1033
1034 adv_instance = hci_find_adv_instance(hdev, instance);
1035 if (!adv_instance)
1036 return 0;
1037
1038 instance_flags = adv_instance->flags;
1039
1040 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1041 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1042 }
1043
1044 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1045 adv_instance->scan_rsp_len);
1046
1047 scan_rsp_len += adv_instance->scan_rsp_len;
1048
1049 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1050 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1051
1052 return scan_rsp_len;
1053 }
1054
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1055 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1056 {
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_scan_rsp_data cp;
1059 u8 len;
1060
1061 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1062 return;
1063
1064 memset(&cp, 0, sizeof(cp));
1065
1066 if (instance)
1067 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1068 else
1069 len = create_default_scan_rsp_data(hdev, cp.data);
1070
1071 if (hdev->scan_rsp_data_len == len &&
1072 !memcmp(cp.data, hdev->scan_rsp_data, len))
1073 return;
1074
1075 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1076 hdev->scan_rsp_data_len = len;
1077
1078 cp.length = len;
1079
1080 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1081 }
1082
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1083 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1084 {
1085 struct adv_info *adv_instance = NULL;
1086 u8 ad_len = 0, flags = 0;
1087 u32 instance_flags;
1088
1089 /* Return 0 when the current instance identifier is invalid. */
1090 if (instance) {
1091 adv_instance = hci_find_adv_instance(hdev, instance);
1092 if (!adv_instance)
1093 return 0;
1094 }
1095
1096 instance_flags = get_adv_instance_flags(hdev, instance);
1097
1098 /* If instance already has the flags set skip adding it once
1099 * again.
1100 */
1101 if (adv_instance && eir_get_data(adv_instance->adv_data,
1102 adv_instance->adv_data_len, EIR_FLAGS,
1103 NULL))
1104 goto skip_flags;
1105
1106 /* The Add Advertising command allows userspace to set both the general
1107 * and limited discoverable flags.
1108 */
1109 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1110 flags |= LE_AD_GENERAL;
1111
1112 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1113 flags |= LE_AD_LIMITED;
1114
1115 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1116 flags |= LE_AD_NO_BREDR;
1117
1118 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1119 /* If a discovery flag wasn't provided, simply use the global
1120 * settings.
1121 */
1122 if (!flags)
1123 flags |= mgmt_get_adv_discov_flags(hdev);
1124
1125 /* If flags would still be empty, then there is no need to
1126 * include the "Flags" AD field".
1127 */
1128 if (flags) {
1129 ptr[0] = 0x02;
1130 ptr[1] = EIR_FLAGS;
1131 ptr[2] = flags;
1132
1133 ad_len += 3;
1134 ptr += 3;
1135 }
1136 }
1137
1138 skip_flags:
1139 if (adv_instance) {
1140 memcpy(ptr, adv_instance->adv_data,
1141 adv_instance->adv_data_len);
1142 ad_len += adv_instance->adv_data_len;
1143 ptr += adv_instance->adv_data_len;
1144 }
1145
1146 /* Provide Tx Power only if we can provide a valid value for it */
1147 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1148 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1149 ptr[0] = 0x02;
1150 ptr[1] = EIR_TX_POWER;
1151 ptr[2] = (u8)hdev->adv_tx_power;
1152
1153 ad_len += 3;
1154 ptr += 3;
1155 }
1156
1157 return ad_len;
1158 }
1159
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1160 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1161 {
1162 struct hci_dev *hdev = req->hdev;
1163 struct hci_cp_le_set_adv_data cp;
1164 u8 len;
1165
1166 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1167 return;
1168
1169 memset(&cp, 0, sizeof(cp));
1170
1171 len = create_instance_adv_data(hdev, instance, cp.data);
1172
1173 /* There's nothing to do if the data hasn't changed */
1174 if (hdev->adv_data_len == len &&
1175 memcmp(cp.data, hdev->adv_data, len) == 0)
1176 return;
1177
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1180
1181 cp.length = len;
1182
1183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1184 }
1185
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1186 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1187 {
1188 struct hci_request req;
1189
1190 hci_req_init(&req, hdev);
1191 __hci_req_update_adv_data(&req, instance);
1192
1193 return hci_req_run(&req, NULL);
1194 }
1195
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1196 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1197 {
1198 BT_DBG("%s status %u", hdev->name, status);
1199 }
1200
hci_req_reenable_advertising(struct hci_dev * hdev)1201 void hci_req_reenable_advertising(struct hci_dev *hdev)
1202 {
1203 struct hci_request req;
1204
1205 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1206 list_empty(&hdev->adv_instances))
1207 return;
1208
1209 hci_req_init(&req, hdev);
1210
1211 if (hdev->cur_adv_instance) {
1212 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1213 true);
1214 } else {
1215 __hci_req_update_adv_data(&req, 0x00);
1216 __hci_req_update_scan_rsp_data(&req, 0x00);
1217 __hci_req_enable_advertising(&req);
1218 }
1219
1220 hci_req_run(&req, adv_enable_complete);
1221 }
1222
adv_timeout_expire(struct work_struct * work)1223 static void adv_timeout_expire(struct work_struct *work)
1224 {
1225 struct hci_dev *hdev = container_of(work, struct hci_dev,
1226 adv_instance_expire.work);
1227
1228 struct hci_request req;
1229 u8 instance;
1230
1231 BT_DBG("%s", hdev->name);
1232
1233 hci_dev_lock(hdev);
1234
1235 hdev->adv_instance_timeout = 0;
1236
1237 instance = hdev->cur_adv_instance;
1238 if (instance == 0x00)
1239 goto unlock;
1240
1241 hci_req_init(&req, hdev);
1242
1243 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1244
1245 if (list_empty(&hdev->adv_instances))
1246 __hci_req_disable_advertising(&req);
1247
1248 hci_req_run(&req, NULL);
1249
1250 unlock:
1251 hci_dev_unlock(hdev);
1252 }
1253
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1254 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1255 bool force)
1256 {
1257 struct hci_dev *hdev = req->hdev;
1258 struct adv_info *adv_instance = NULL;
1259 u16 timeout;
1260
1261 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1262 list_empty(&hdev->adv_instances))
1263 return -EPERM;
1264
1265 if (hdev->adv_instance_timeout)
1266 return -EBUSY;
1267
1268 adv_instance = hci_find_adv_instance(hdev, instance);
1269 if (!adv_instance)
1270 return -ENOENT;
1271
1272 /* A zero timeout means unlimited advertising. As long as there is
1273 * only one instance, duration should be ignored. We still set a timeout
1274 * in case further instances are being added later on.
1275 *
1276 * If the remaining lifetime of the instance is more than the duration
1277 * then the timeout corresponds to the duration, otherwise it will be
1278 * reduced to the remaining instance lifetime.
1279 */
1280 if (adv_instance->timeout == 0 ||
1281 adv_instance->duration <= adv_instance->remaining_time)
1282 timeout = adv_instance->duration;
1283 else
1284 timeout = adv_instance->remaining_time;
1285
1286 /* The remaining time is being reduced unless the instance is being
1287 * advertised without time limit.
1288 */
1289 if (adv_instance->timeout)
1290 adv_instance->remaining_time =
1291 adv_instance->remaining_time - timeout;
1292
1293 hdev->adv_instance_timeout = timeout;
1294 queue_delayed_work(hdev->req_workqueue,
1295 &hdev->adv_instance_expire,
1296 msecs_to_jiffies(timeout * 1000));
1297
1298 /* If we're just re-scheduling the same instance again then do not
1299 * execute any HCI commands. This happens when a single instance is
1300 * being advertised.
1301 */
1302 if (!force && hdev->cur_adv_instance == instance &&
1303 hci_dev_test_flag(hdev, HCI_LE_ADV))
1304 return 0;
1305
1306 hdev->cur_adv_instance = instance;
1307 __hci_req_update_adv_data(req, instance);
1308 __hci_req_update_scan_rsp_data(req, instance);
1309 __hci_req_enable_advertising(req);
1310
1311 return 0;
1312 }
1313
cancel_adv_timeout(struct hci_dev * hdev)1314 static void cancel_adv_timeout(struct hci_dev *hdev)
1315 {
1316 if (hdev->adv_instance_timeout) {
1317 hdev->adv_instance_timeout = 0;
1318 cancel_delayed_work(&hdev->adv_instance_expire);
1319 }
1320 }
1321
1322 /* For a single instance:
1323 * - force == true: The instance will be removed even when its remaining
1324 * lifetime is not zero.
1325 * - force == false: the instance will be deactivated but kept stored unless
1326 * the remaining lifetime is zero.
1327 *
1328 * For instance == 0x00:
1329 * - force == true: All instances will be removed regardless of their timeout
1330 * setting.
1331 * - force == false: Only instances that have a timeout will be removed.
1332 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)1333 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1334 struct hci_request *req, u8 instance,
1335 bool force)
1336 {
1337 struct adv_info *adv_instance, *n, *next_instance = NULL;
1338 int err;
1339 u8 rem_inst;
1340
1341 /* Cancel any timeout concerning the removed instance(s). */
1342 if (!instance || hdev->cur_adv_instance == instance)
1343 cancel_adv_timeout(hdev);
1344
1345 /* Get the next instance to advertise BEFORE we remove
1346 * the current one. This can be the same instance again
1347 * if there is only one instance.
1348 */
1349 if (instance && hdev->cur_adv_instance == instance)
1350 next_instance = hci_get_next_instance(hdev, instance);
1351
1352 if (instance == 0x00) {
1353 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1354 list) {
1355 if (!(force || adv_instance->timeout))
1356 continue;
1357
1358 rem_inst = adv_instance->instance;
1359 err = hci_remove_adv_instance(hdev, rem_inst);
1360 if (!err)
1361 mgmt_advertising_removed(sk, hdev, rem_inst);
1362 }
1363 } else {
1364 adv_instance = hci_find_adv_instance(hdev, instance);
1365
1366 if (force || (adv_instance && adv_instance->timeout &&
1367 !adv_instance->remaining_time)) {
1368 /* Don't advertise a removed instance. */
1369 if (next_instance &&
1370 next_instance->instance == instance)
1371 next_instance = NULL;
1372
1373 err = hci_remove_adv_instance(hdev, instance);
1374 if (!err)
1375 mgmt_advertising_removed(sk, hdev, instance);
1376 }
1377 }
1378
1379 if (!req || !hdev_is_powered(hdev) ||
1380 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1381 return;
1382
1383 if (next_instance)
1384 __hci_req_schedule_adv_instance(req, next_instance->instance,
1385 false);
1386 }
1387
set_random_addr(struct hci_request * req,bdaddr_t * rpa)1388 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1389 {
1390 struct hci_dev *hdev = req->hdev;
1391
1392 /* If we're advertising or initiating an LE connection we can't
1393 * go ahead and change the random address at this time. This is
1394 * because the eventual initiator address used for the
1395 * subsequently created connection will be undefined (some
1396 * controllers use the new address and others the one we had
1397 * when the operation started).
1398 *
1399 * In this kind of scenario skip the update and let the random
1400 * address be updated at the next cycle.
1401 */
1402 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1403 hci_lookup_le_connect(hdev)) {
1404 BT_DBG("Deferring random address update");
1405 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1406 return;
1407 }
1408
1409 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1410 }
1411
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)1412 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1413 bool use_rpa, u8 *own_addr_type)
1414 {
1415 struct hci_dev *hdev = req->hdev;
1416 int err;
1417
1418 /* If privacy is enabled use a resolvable private address. If
1419 * current RPA has expired or there is something else than
1420 * the current RPA in use, then generate a new one.
1421 */
1422 if (use_rpa) {
1423 int to;
1424
1425 *own_addr_type = ADDR_LE_DEV_RANDOM;
1426
1427 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1428 !bacmp(&hdev->random_addr, &hdev->rpa))
1429 return 0;
1430
1431 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1432 if (err < 0) {
1433 BT_ERR("%s failed to generate new RPA", hdev->name);
1434 return err;
1435 }
1436
1437 set_random_addr(req, &hdev->rpa);
1438
1439 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1440 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1441
1442 return 0;
1443 }
1444
1445 /* In case of required privacy without resolvable private address,
1446 * use an non-resolvable private address. This is useful for active
1447 * scanning and non-connectable advertising.
1448 */
1449 if (require_privacy) {
1450 bdaddr_t nrpa;
1451
1452 while (true) {
1453 /* The non-resolvable private address is generated
1454 * from random six bytes with the two most significant
1455 * bits cleared.
1456 */
1457 get_random_bytes(&nrpa, 6);
1458 nrpa.b[5] &= 0x3f;
1459
1460 /* The non-resolvable private address shall not be
1461 * equal to the public address.
1462 */
1463 if (bacmp(&hdev->bdaddr, &nrpa))
1464 break;
1465 }
1466
1467 *own_addr_type = ADDR_LE_DEV_RANDOM;
1468 set_random_addr(req, &nrpa);
1469 return 0;
1470 }
1471
1472 /* If forcing static address is in use or there is no public
1473 * address use the static address as random address (but skip
1474 * the HCI command if the current random address is already the
1475 * static one.
1476 *
1477 * In case BR/EDR has been disabled on a dual-mode controller
1478 * and a static address has been configured, then use that
1479 * address instead of the public BR/EDR address.
1480 */
1481 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1482 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1483 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1484 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1485 *own_addr_type = ADDR_LE_DEV_RANDOM;
1486 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1487 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1488 &hdev->static_addr);
1489 return 0;
1490 }
1491
1492 /* Neither privacy nor static address is being used so use a
1493 * public address.
1494 */
1495 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1496
1497 return 0;
1498 }
1499
disconnected_whitelist_entries(struct hci_dev * hdev)1500 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1501 {
1502 struct bdaddr_list *b;
1503
1504 list_for_each_entry(b, &hdev->whitelist, list) {
1505 struct hci_conn *conn;
1506
1507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1508 if (!conn)
1509 return true;
1510
1511 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1512 return true;
1513 }
1514
1515 return false;
1516 }
1517
__hci_req_update_scan(struct hci_request * req)1518 void __hci_req_update_scan(struct hci_request *req)
1519 {
1520 struct hci_dev *hdev = req->hdev;
1521 u8 scan;
1522
1523 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1524 return;
1525
1526 if (!hdev_is_powered(hdev))
1527 return;
1528
1529 if (mgmt_powering_down(hdev))
1530 return;
1531
1532 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1533 disconnected_whitelist_entries(hdev))
1534 scan = SCAN_PAGE;
1535 else
1536 scan = SCAN_DISABLED;
1537
1538 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1539 scan |= SCAN_INQUIRY;
1540
1541 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1542 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1543 return;
1544
1545 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1546 }
1547
update_scan(struct hci_request * req,unsigned long opt)1548 static int update_scan(struct hci_request *req, unsigned long opt)
1549 {
1550 hci_dev_lock(req->hdev);
1551 __hci_req_update_scan(req);
1552 hci_dev_unlock(req->hdev);
1553 return 0;
1554 }
1555
scan_update_work(struct work_struct * work)1556 static void scan_update_work(struct work_struct *work)
1557 {
1558 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1559
1560 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1561 }
1562
connectable_update(struct hci_request * req,unsigned long opt)1563 static int connectable_update(struct hci_request *req, unsigned long opt)
1564 {
1565 struct hci_dev *hdev = req->hdev;
1566
1567 hci_dev_lock(hdev);
1568
1569 __hci_req_update_scan(req);
1570
1571 /* If BR/EDR is not enabled and we disable advertising as a
1572 * by-product of disabling connectable, we need to update the
1573 * advertising flags.
1574 */
1575 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1577
1578 /* Update the advertising parameters if necessary */
1579 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1580 !list_empty(&hdev->adv_instances))
1581 __hci_req_enable_advertising(req);
1582
1583 __hci_update_background_scan(req);
1584
1585 hci_dev_unlock(hdev);
1586
1587 return 0;
1588 }
1589
connectable_update_work(struct work_struct * work)1590 static void connectable_update_work(struct work_struct *work)
1591 {
1592 struct hci_dev *hdev = container_of(work, struct hci_dev,
1593 connectable_update);
1594 u8 status;
1595
1596 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1597 mgmt_set_connectable_complete(hdev, status);
1598 }
1599
get_service_classes(struct hci_dev * hdev)1600 static u8 get_service_classes(struct hci_dev *hdev)
1601 {
1602 struct bt_uuid *uuid;
1603 u8 val = 0;
1604
1605 list_for_each_entry(uuid, &hdev->uuids, list)
1606 val |= uuid->svc_hint;
1607
1608 return val;
1609 }
1610
__hci_req_update_class(struct hci_request * req)1611 void __hci_req_update_class(struct hci_request *req)
1612 {
1613 struct hci_dev *hdev = req->hdev;
1614 u8 cod[3];
1615
1616 BT_DBG("%s", hdev->name);
1617
1618 if (!hdev_is_powered(hdev))
1619 return;
1620
1621 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1622 return;
1623
1624 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1625 return;
1626
1627 cod[0] = hdev->minor_class;
1628 cod[1] = hdev->major_class;
1629 cod[2] = get_service_classes(hdev);
1630
1631 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1632 cod[1] |= 0x20;
1633
1634 if (memcmp(cod, hdev->dev_class, 3) == 0)
1635 return;
1636
1637 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1638 }
1639
write_iac(struct hci_request * req)1640 static void write_iac(struct hci_request *req)
1641 {
1642 struct hci_dev *hdev = req->hdev;
1643 struct hci_cp_write_current_iac_lap cp;
1644
1645 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1646 return;
1647
1648 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1649 /* Limited discoverable mode */
1650 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1651 cp.iac_lap[0] = 0x00; /* LIAC */
1652 cp.iac_lap[1] = 0x8b;
1653 cp.iac_lap[2] = 0x9e;
1654 cp.iac_lap[3] = 0x33; /* GIAC */
1655 cp.iac_lap[4] = 0x8b;
1656 cp.iac_lap[5] = 0x9e;
1657 } else {
1658 /* General discoverable mode */
1659 cp.num_iac = 1;
1660 cp.iac_lap[0] = 0x33; /* GIAC */
1661 cp.iac_lap[1] = 0x8b;
1662 cp.iac_lap[2] = 0x9e;
1663 }
1664
1665 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1666 (cp.num_iac * 3) + 1, &cp);
1667 }
1668
discoverable_update(struct hci_request * req,unsigned long opt)1669 static int discoverable_update(struct hci_request *req, unsigned long opt)
1670 {
1671 struct hci_dev *hdev = req->hdev;
1672
1673 hci_dev_lock(hdev);
1674
1675 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1676 write_iac(req);
1677 __hci_req_update_scan(req);
1678 __hci_req_update_class(req);
1679 }
1680
1681 /* Advertising instances don't use the global discoverable setting, so
1682 * only update AD if advertising was enabled using Set Advertising.
1683 */
1684 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1685 __hci_req_update_adv_data(req, 0x00);
1686
1687 /* Discoverable mode affects the local advertising
1688 * address in limited privacy mode.
1689 */
1690 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1691 __hci_req_enable_advertising(req);
1692 }
1693
1694 hci_dev_unlock(hdev);
1695
1696 return 0;
1697 }
1698
discoverable_update_work(struct work_struct * work)1699 static void discoverable_update_work(struct work_struct *work)
1700 {
1701 struct hci_dev *hdev = container_of(work, struct hci_dev,
1702 discoverable_update);
1703 u8 status;
1704
1705 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1706 mgmt_set_discoverable_complete(hdev, status);
1707 }
1708
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)1709 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1710 u8 reason)
1711 {
1712 switch (conn->state) {
1713 case BT_CONNECTED:
1714 case BT_CONFIG:
1715 if (conn->type == AMP_LINK) {
1716 struct hci_cp_disconn_phy_link cp;
1717
1718 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1719 cp.reason = reason;
1720 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1721 &cp);
1722 } else {
1723 struct hci_cp_disconnect dc;
1724
1725 dc.handle = cpu_to_le16(conn->handle);
1726 dc.reason = reason;
1727 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1728 }
1729
1730 conn->state = BT_DISCONN;
1731
1732 break;
1733 case BT_CONNECT:
1734 if (conn->type == LE_LINK) {
1735 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1736 break;
1737 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1738 0, NULL);
1739 } else if (conn->type == ACL_LINK) {
1740 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1741 break;
1742 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1743 6, &conn->dst);
1744 }
1745 break;
1746 case BT_CONNECT2:
1747 if (conn->type == ACL_LINK) {
1748 struct hci_cp_reject_conn_req rej;
1749
1750 bacpy(&rej.bdaddr, &conn->dst);
1751 rej.reason = reason;
1752
1753 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1754 sizeof(rej), &rej);
1755 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1756 struct hci_cp_reject_sync_conn_req rej;
1757
1758 bacpy(&rej.bdaddr, &conn->dst);
1759
1760 /* SCO rejection has its own limited set of
1761 * allowed error values (0x0D-0x0F) which isn't
1762 * compatible with most values passed to this
1763 * function. To be safe hard-code one of the
1764 * values that's suitable for SCO.
1765 */
1766 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1767
1768 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1769 sizeof(rej), &rej);
1770 }
1771 break;
1772 default:
1773 conn->state = BT_CLOSED;
1774 break;
1775 }
1776 }
1777
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)1778 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1779 {
1780 if (status)
1781 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1782 }
1783
hci_abort_conn(struct hci_conn * conn,u8 reason)1784 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1785 {
1786 struct hci_request req;
1787 int err;
1788
1789 hci_req_init(&req, conn->hdev);
1790
1791 __hci_abort_conn(&req, conn, reason);
1792
1793 err = hci_req_run(&req, abort_conn_complete);
1794 if (err && err != -ENODATA) {
1795 BT_ERR("Failed to run HCI request: err %d", err);
1796 return err;
1797 }
1798
1799 return 0;
1800 }
1801
update_bg_scan(struct hci_request * req,unsigned long opt)1802 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1803 {
1804 hci_dev_lock(req->hdev);
1805 __hci_update_background_scan(req);
1806 hci_dev_unlock(req->hdev);
1807 return 0;
1808 }
1809
bg_scan_update(struct work_struct * work)1810 static void bg_scan_update(struct work_struct *work)
1811 {
1812 struct hci_dev *hdev = container_of(work, struct hci_dev,
1813 bg_scan_update);
1814 struct hci_conn *conn;
1815 u8 status;
1816 int err;
1817
1818 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1819 if (!err)
1820 return;
1821
1822 hci_dev_lock(hdev);
1823
1824 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1825 if (conn)
1826 hci_le_conn_failed(conn, status);
1827
1828 hci_dev_unlock(hdev);
1829 }
1830
le_scan_disable(struct hci_request * req,unsigned long opt)1831 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1832 {
1833 hci_req_add_le_scan_disable(req);
1834 return 0;
1835 }
1836
bredr_inquiry(struct hci_request * req,unsigned long opt)1837 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1838 {
1839 u8 length = opt;
1840 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1841 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1842 struct hci_cp_inquiry cp;
1843
1844 BT_DBG("%s", req->hdev->name);
1845
1846 hci_dev_lock(req->hdev);
1847 hci_inquiry_cache_flush(req->hdev);
1848 hci_dev_unlock(req->hdev);
1849
1850 memset(&cp, 0, sizeof(cp));
1851
1852 if (req->hdev->discovery.limited)
1853 memcpy(&cp.lap, liac, sizeof(cp.lap));
1854 else
1855 memcpy(&cp.lap, giac, sizeof(cp.lap));
1856
1857 cp.length = length;
1858
1859 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1860
1861 return 0;
1862 }
1863
le_scan_disable_work(struct work_struct * work)1864 static void le_scan_disable_work(struct work_struct *work)
1865 {
1866 struct hci_dev *hdev = container_of(work, struct hci_dev,
1867 le_scan_disable.work);
1868 u8 status;
1869
1870 BT_DBG("%s", hdev->name);
1871
1872 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1873 return;
1874
1875 cancel_delayed_work(&hdev->le_scan_restart);
1876
1877 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1878 if (status) {
1879 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1880 return;
1881 }
1882
1883 hdev->discovery.scan_start = 0;
1884
1885 /* If we were running LE only scan, change discovery state. If
1886 * we were running both LE and BR/EDR inquiry simultaneously,
1887 * and BR/EDR inquiry is already finished, stop discovery,
1888 * otherwise BR/EDR inquiry will stop discovery when finished.
1889 * If we will resolve remote device name, do not change
1890 * discovery state.
1891 */
1892
1893 if (hdev->discovery.type == DISCOV_TYPE_LE)
1894 goto discov_stopped;
1895
1896 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1897 return;
1898
1899 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1900 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1901 hdev->discovery.state != DISCOVERY_RESOLVING)
1902 goto discov_stopped;
1903
1904 return;
1905 }
1906
1907 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1908 HCI_CMD_TIMEOUT, &status);
1909 if (status) {
1910 BT_ERR("Inquiry failed: status 0x%02x", status);
1911 goto discov_stopped;
1912 }
1913
1914 return;
1915
1916 discov_stopped:
1917 hci_dev_lock(hdev);
1918 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1919 hci_dev_unlock(hdev);
1920 }
1921
le_scan_restart(struct hci_request * req,unsigned long opt)1922 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1923 {
1924 struct hci_dev *hdev = req->hdev;
1925 struct hci_cp_le_set_scan_enable cp;
1926
1927 /* If controller is not scanning we are done. */
1928 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1929 return 0;
1930
1931 hci_req_add_le_scan_disable(req);
1932
1933 memset(&cp, 0, sizeof(cp));
1934 cp.enable = LE_SCAN_ENABLE;
1935 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1936 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1937
1938 return 0;
1939 }
1940
le_scan_restart_work(struct work_struct * work)1941 static void le_scan_restart_work(struct work_struct *work)
1942 {
1943 struct hci_dev *hdev = container_of(work, struct hci_dev,
1944 le_scan_restart.work);
1945 unsigned long timeout, duration, scan_start, now;
1946 u8 status;
1947
1948 BT_DBG("%s", hdev->name);
1949
1950 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1951 if (status) {
1952 BT_ERR("Failed to restart LE scan: status %d", status);
1953 return;
1954 }
1955
1956 hci_dev_lock(hdev);
1957
1958 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1959 !hdev->discovery.scan_start)
1960 goto unlock;
1961
1962 /* When the scan was started, hdev->le_scan_disable has been queued
1963 * after duration from scan_start. During scan restart this job
1964 * has been canceled, and we need to queue it again after proper
1965 * timeout, to make sure that scan does not run indefinitely.
1966 */
1967 duration = hdev->discovery.scan_duration;
1968 scan_start = hdev->discovery.scan_start;
1969 now = jiffies;
1970 if (now - scan_start <= duration) {
1971 int elapsed;
1972
1973 if (now >= scan_start)
1974 elapsed = now - scan_start;
1975 else
1976 elapsed = ULONG_MAX - scan_start + now;
1977
1978 timeout = duration - elapsed;
1979 } else {
1980 timeout = 0;
1981 }
1982
1983 queue_delayed_work(hdev->req_workqueue,
1984 &hdev->le_scan_disable, timeout);
1985
1986 unlock:
1987 hci_dev_unlock(hdev);
1988 }
1989
disable_advertising(struct hci_request * req)1990 static void disable_advertising(struct hci_request *req)
1991 {
1992 u8 enable = 0x00;
1993
1994 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1995 }
1996
active_scan(struct hci_request * req,unsigned long opt)1997 static int active_scan(struct hci_request *req, unsigned long opt)
1998 {
1999 uint16_t interval = opt;
2000 struct hci_dev *hdev = req->hdev;
2001 struct hci_cp_le_set_scan_param param_cp;
2002 struct hci_cp_le_set_scan_enable enable_cp;
2003 u8 own_addr_type;
2004 int err;
2005
2006 BT_DBG("%s", hdev->name);
2007
2008 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2009 hci_dev_lock(hdev);
2010
2011 /* Don't let discovery abort an outgoing connection attempt
2012 * that's using directed advertising.
2013 */
2014 if (hci_lookup_le_connect(hdev)) {
2015 hci_dev_unlock(hdev);
2016 return -EBUSY;
2017 }
2018
2019 cancel_adv_timeout(hdev);
2020 hci_dev_unlock(hdev);
2021
2022 disable_advertising(req);
2023 }
2024
2025 /* If controller is scanning, it means the background scanning is
2026 * running. Thus, we should temporarily stop it in order to set the
2027 * discovery scanning parameters.
2028 */
2029 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2030 hci_req_add_le_scan_disable(req);
2031
2032 /* All active scans will be done with either a resolvable private
2033 * address (when privacy feature has been enabled) or non-resolvable
2034 * private address.
2035 */
2036 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2037 &own_addr_type);
2038 if (err < 0)
2039 own_addr_type = ADDR_LE_DEV_PUBLIC;
2040
2041 memset(¶m_cp, 0, sizeof(param_cp));
2042 param_cp.type = LE_SCAN_ACTIVE;
2043 param_cp.interval = cpu_to_le16(interval);
2044 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2045 param_cp.own_address_type = own_addr_type;
2046
2047 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2048 ¶m_cp);
2049
2050 memset(&enable_cp, 0, sizeof(enable_cp));
2051 enable_cp.enable = LE_SCAN_ENABLE;
2052 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2053
2054 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2055 &enable_cp);
2056
2057 return 0;
2058 }
2059
interleaved_discov(struct hci_request * req,unsigned long opt)2060 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2061 {
2062 int err;
2063
2064 BT_DBG("%s", req->hdev->name);
2065
2066 err = active_scan(req, opt);
2067 if (err)
2068 return err;
2069
2070 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2071 }
2072
start_discovery(struct hci_dev * hdev,u8 * status)2073 static void start_discovery(struct hci_dev *hdev, u8 *status)
2074 {
2075 unsigned long timeout;
2076
2077 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2078
2079 switch (hdev->discovery.type) {
2080 case DISCOV_TYPE_BREDR:
2081 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2082 hci_req_sync(hdev, bredr_inquiry,
2083 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2084 status);
2085 return;
2086 case DISCOV_TYPE_INTERLEAVED:
2087 /* When running simultaneous discovery, the LE scanning time
2088 * should occupy the whole discovery time sine BR/EDR inquiry
2089 * and LE scanning are scheduled by the controller.
2090 *
2091 * For interleaving discovery in comparison, BR/EDR inquiry
2092 * and LE scanning are done sequentially with separate
2093 * timeouts.
2094 */
2095 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2096 &hdev->quirks)) {
2097 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2098 /* During simultaneous discovery, we double LE scan
2099 * interval. We must leave some time for the controller
2100 * to do BR/EDR inquiry.
2101 */
2102 hci_req_sync(hdev, interleaved_discov,
2103 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2104 status);
2105 break;
2106 }
2107
2108 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2109 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2110 HCI_CMD_TIMEOUT, status);
2111 break;
2112 case DISCOV_TYPE_LE:
2113 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2114 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2115 HCI_CMD_TIMEOUT, status);
2116 break;
2117 default:
2118 *status = HCI_ERROR_UNSPECIFIED;
2119 return;
2120 }
2121
2122 if (*status)
2123 return;
2124
2125 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2126
2127 /* When service discovery is used and the controller has a
2128 * strict duplicate filter, it is important to remember the
2129 * start and duration of the scan. This is required for
2130 * restarting scanning during the discovery phase.
2131 */
2132 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2133 hdev->discovery.result_filtering) {
2134 hdev->discovery.scan_start = jiffies;
2135 hdev->discovery.scan_duration = timeout;
2136 }
2137
2138 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2139 timeout);
2140 }
2141
hci_req_stop_discovery(struct hci_request * req)2142 bool hci_req_stop_discovery(struct hci_request *req)
2143 {
2144 struct hci_dev *hdev = req->hdev;
2145 struct discovery_state *d = &hdev->discovery;
2146 struct hci_cp_remote_name_req_cancel cp;
2147 struct inquiry_entry *e;
2148 bool ret = false;
2149
2150 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2151
2152 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2153 if (test_bit(HCI_INQUIRY, &hdev->flags))
2154 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2155
2156 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2157 cancel_delayed_work(&hdev->le_scan_disable);
2158 hci_req_add_le_scan_disable(req);
2159 }
2160
2161 ret = true;
2162 } else {
2163 /* Passive scanning */
2164 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2165 hci_req_add_le_scan_disable(req);
2166 ret = true;
2167 }
2168 }
2169
2170 /* No further actions needed for LE-only discovery */
2171 if (d->type == DISCOV_TYPE_LE)
2172 return ret;
2173
2174 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2175 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2176 NAME_PENDING);
2177 if (!e)
2178 return ret;
2179
2180 bacpy(&cp.bdaddr, &e->data.bdaddr);
2181 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2182 &cp);
2183 ret = true;
2184 }
2185
2186 return ret;
2187 }
2188
stop_discovery(struct hci_request * req,unsigned long opt)2189 static int stop_discovery(struct hci_request *req, unsigned long opt)
2190 {
2191 hci_dev_lock(req->hdev);
2192 hci_req_stop_discovery(req);
2193 hci_dev_unlock(req->hdev);
2194
2195 return 0;
2196 }
2197
discov_update(struct work_struct * work)2198 static void discov_update(struct work_struct *work)
2199 {
2200 struct hci_dev *hdev = container_of(work, struct hci_dev,
2201 discov_update);
2202 u8 status = 0;
2203
2204 switch (hdev->discovery.state) {
2205 case DISCOVERY_STARTING:
2206 start_discovery(hdev, &status);
2207 mgmt_start_discovery_complete(hdev, status);
2208 if (status)
2209 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2210 else
2211 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2212 break;
2213 case DISCOVERY_STOPPING:
2214 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2215 mgmt_stop_discovery_complete(hdev, status);
2216 if (!status)
2217 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2218 break;
2219 case DISCOVERY_STOPPED:
2220 default:
2221 return;
2222 }
2223 }
2224
discov_off(struct work_struct * work)2225 static void discov_off(struct work_struct *work)
2226 {
2227 struct hci_dev *hdev = container_of(work, struct hci_dev,
2228 discov_off.work);
2229
2230 BT_DBG("%s", hdev->name);
2231
2232 hci_dev_lock(hdev);
2233
2234 /* When discoverable timeout triggers, then just make sure
2235 * the limited discoverable flag is cleared. Even in the case
2236 * of a timeout triggered from general discoverable, it is
2237 * safe to unconditionally clear the flag.
2238 */
2239 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2240 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2241 hdev->discov_timeout = 0;
2242
2243 hci_dev_unlock(hdev);
2244
2245 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2246 mgmt_new_settings(hdev);
2247 }
2248
powered_update_hci(struct hci_request * req,unsigned long opt)2249 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2250 {
2251 struct hci_dev *hdev = req->hdev;
2252 u8 link_sec;
2253
2254 hci_dev_lock(hdev);
2255
2256 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2257 !lmp_host_ssp_capable(hdev)) {
2258 u8 mode = 0x01;
2259
2260 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2261
2262 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2263 u8 support = 0x01;
2264
2265 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2266 sizeof(support), &support);
2267 }
2268 }
2269
2270 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2271 lmp_bredr_capable(hdev)) {
2272 struct hci_cp_write_le_host_supported cp;
2273
2274 cp.le = 0x01;
2275 cp.simul = 0x00;
2276
2277 /* Check first if we already have the right
2278 * host state (host features set)
2279 */
2280 if (cp.le != lmp_host_le_capable(hdev) ||
2281 cp.simul != lmp_host_le_br_capable(hdev))
2282 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2283 sizeof(cp), &cp);
2284 }
2285
2286 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2287 /* Make sure the controller has a good default for
2288 * advertising data. This also applies to the case
2289 * where BR/EDR was toggled during the AUTO_OFF phase.
2290 */
2291 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2292 list_empty(&hdev->adv_instances)) {
2293 __hci_req_update_adv_data(req, 0x00);
2294 __hci_req_update_scan_rsp_data(req, 0x00);
2295
2296 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2297 __hci_req_enable_advertising(req);
2298 } else if (!list_empty(&hdev->adv_instances)) {
2299 struct adv_info *adv_instance;
2300
2301 adv_instance = list_first_entry(&hdev->adv_instances,
2302 struct adv_info, list);
2303 __hci_req_schedule_adv_instance(req,
2304 adv_instance->instance,
2305 true);
2306 }
2307 }
2308
2309 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2310 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2311 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2312 sizeof(link_sec), &link_sec);
2313
2314 if (lmp_bredr_capable(hdev)) {
2315 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2316 __hci_req_write_fast_connectable(req, true);
2317 else
2318 __hci_req_write_fast_connectable(req, false);
2319 __hci_req_update_scan(req);
2320 __hci_req_update_class(req);
2321 __hci_req_update_name(req);
2322 __hci_req_update_eir(req);
2323 }
2324
2325 hci_dev_unlock(hdev);
2326 return 0;
2327 }
2328
__hci_req_hci_power_on(struct hci_dev * hdev)2329 int __hci_req_hci_power_on(struct hci_dev *hdev)
2330 {
2331 /* Register the available SMP channels (BR/EDR and LE) only when
2332 * successfully powering on the controller. This late
2333 * registration is required so that LE SMP can clearly decide if
2334 * the public address or static address is used.
2335 */
2336 smp_register(hdev);
2337
2338 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2339 NULL);
2340 }
2341
hci_request_setup(struct hci_dev * hdev)2342 void hci_request_setup(struct hci_dev *hdev)
2343 {
2344 INIT_WORK(&hdev->discov_update, discov_update);
2345 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2346 INIT_WORK(&hdev->scan_update, scan_update_work);
2347 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2348 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2349 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2350 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2351 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2352 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2353 }
2354
hci_request_cancel_all(struct hci_dev * hdev)2355 void hci_request_cancel_all(struct hci_dev *hdev)
2356 {
2357 hci_req_sync_cancel(hdev, ENODEV);
2358
2359 cancel_work_sync(&hdev->discov_update);
2360 cancel_work_sync(&hdev->bg_scan_update);
2361 cancel_work_sync(&hdev->scan_update);
2362 cancel_work_sync(&hdev->connectable_update);
2363 cancel_work_sync(&hdev->discoverable_update);
2364 cancel_delayed_work_sync(&hdev->discov_off);
2365 cancel_delayed_work_sync(&hdev->le_scan_disable);
2366 cancel_delayed_work_sync(&hdev->le_scan_restart);
2367
2368 if (hdev->adv_instance_timeout) {
2369 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2370 hdev->adv_instance_timeout = 0;
2371 }
2372 }
2373