1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114 }
115
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125 }
126
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129 {
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186 {
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266 }
267
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 int ret;
273
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
278 * gets removed.
279 */
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 else
283 ret = -ENETDOWN;
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287 }
288
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291 {
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 skb_put_data(skb, param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318 {
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344 }
345
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348 {
349 hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
387 /* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
__hci_update_background_scan(struct hci_request * req)393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458 }
459
__hci_req_update_name(struct hci_request * req)460 void __hci_req_update_name(struct hci_request *req)
461 {
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469
470 #define PNP_INFO_SVCLASS_ID 0x1200
471
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512 }
513
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545 }
546
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578 }
579
create_eir(struct hci_dev * hdev,u8 * data)580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627
__hci_req_update_eir(struct hci_request * req)628 void __hci_req_update_eir(struct hci_request *req)
629 {
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656
hci_req_add_le_scan_disable(struct hci_request * req)657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659 struct hci_dev *hdev = req->hdev;
660
661 if (use_ext_scan(hdev)) {
662 struct hci_cp_le_set_ext_scan_enable cp;
663
664 memset(&cp, 0, sizeof(cp));
665 cp.enable = LE_SCAN_DISABLE;
666 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
667 &cp);
668 } else {
669 struct hci_cp_le_set_scan_enable cp;
670
671 memset(&cp, 0, sizeof(cp));
672 cp.enable = LE_SCAN_DISABLE;
673 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
674 }
675 }
676
add_to_white_list(struct hci_request * req,struct hci_conn_params * params)677 static void add_to_white_list(struct hci_request *req,
678 struct hci_conn_params *params)
679 {
680 struct hci_cp_le_add_to_white_list cp;
681
682 cp.bdaddr_type = params->addr_type;
683 bacpy(&cp.bdaddr, ¶ms->addr);
684
685 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
686 }
687
update_white_list(struct hci_request * req)688 static u8 update_white_list(struct hci_request *req)
689 {
690 struct hci_dev *hdev = req->hdev;
691 struct hci_conn_params *params;
692 struct bdaddr_list *b;
693 uint8_t white_list_entries = 0;
694
695 /* Go through the current white list programmed into the
696 * controller one by one and check if that address is still
697 * in the list of pending connections or list of devices to
698 * report. If not present in either list, then queue the
699 * command to remove it from the controller.
700 */
701 list_for_each_entry(b, &hdev->le_white_list, list) {
702 /* If the device is neither in pend_le_conns nor
703 * pend_le_reports then remove it from the whitelist.
704 */
705 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
706 &b->bdaddr, b->bdaddr_type) &&
707 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
708 &b->bdaddr, b->bdaddr_type)) {
709 struct hci_cp_le_del_from_white_list cp;
710
711 cp.bdaddr_type = b->bdaddr_type;
712 bacpy(&cp.bdaddr, &b->bdaddr);
713
714 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
715 sizeof(cp), &cp);
716 continue;
717 }
718
719 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
720 /* White list can not be used with RPAs */
721 return 0x00;
722 }
723
724 white_list_entries++;
725 }
726
727 /* Since all no longer valid white list entries have been
728 * removed, walk through the list of pending connections
729 * and ensure that any new device gets programmed into
730 * the controller.
731 *
732 * If the list of the devices is larger than the list of
733 * available white list entries in the controller, then
734 * just abort and return filer policy value to not use the
735 * white list.
736 */
737 list_for_each_entry(params, &hdev->pend_le_conns, action) {
738 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
739 ¶ms->addr, params->addr_type))
740 continue;
741
742 if (white_list_entries >= hdev->le_white_list_size) {
743 /* Select filter policy to accept all advertising */
744 return 0x00;
745 }
746
747 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
748 params->addr_type)) {
749 /* White list can not be used with RPAs */
750 return 0x00;
751 }
752
753 white_list_entries++;
754 add_to_white_list(req, params);
755 }
756
757 /* After adding all new pending connections, walk through
758 * the list of pending reports and also add these to the
759 * white list if there is still space.
760 */
761 list_for_each_entry(params, &hdev->pend_le_reports, action) {
762 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
763 ¶ms->addr, params->addr_type))
764 continue;
765
766 if (white_list_entries >= hdev->le_white_list_size) {
767 /* Select filter policy to accept all advertising */
768 return 0x00;
769 }
770
771 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
772 params->addr_type)) {
773 /* White list can not be used with RPAs */
774 return 0x00;
775 }
776
777 white_list_entries++;
778 add_to_white_list(req, params);
779 }
780
781 /* Select filter policy to use white list */
782 return 0x01;
783 }
784
scan_use_rpa(struct hci_dev * hdev)785 static bool scan_use_rpa(struct hci_dev *hdev)
786 {
787 return hci_dev_test_flag(hdev, HCI_PRIVACY);
788 }
789
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)790 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
791 u16 window, u8 own_addr_type, u8 filter_policy)
792 {
793 struct hci_dev *hdev = req->hdev;
794
795 /* Use ext scanning if set ext scan param and ext scan enable is
796 * supported
797 */
798 if (use_ext_scan(hdev)) {
799 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 struct hci_cp_le_scan_phy_params *phy_params;
802 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
803 u32 plen;
804
805 ext_param_cp = (void *)data;
806 phy_params = (void *)ext_param_cp->data;
807
808 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 ext_param_cp->own_addr_type = own_addr_type;
810 ext_param_cp->filter_policy = filter_policy;
811
812 plen = sizeof(*ext_param_cp);
813
814 if (scan_1m(hdev) || scan_2m(hdev)) {
815 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
816
817 memset(phy_params, 0, sizeof(*phy_params));
818 phy_params->type = type;
819 phy_params->interval = cpu_to_le16(interval);
820 phy_params->window = cpu_to_le16(window);
821
822 plen += sizeof(*phy_params);
823 phy_params++;
824 }
825
826 if (scan_coded(hdev)) {
827 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
828
829 memset(phy_params, 0, sizeof(*phy_params));
830 phy_params->type = type;
831 phy_params->interval = cpu_to_le16(interval);
832 phy_params->window = cpu_to_le16(window);
833
834 plen += sizeof(*phy_params);
835 phy_params++;
836 }
837
838 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
839 plen, ext_param_cp);
840
841 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 ext_enable_cp.enable = LE_SCAN_ENABLE;
843 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
844
845 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 sizeof(ext_enable_cp), &ext_enable_cp);
847 } else {
848 struct hci_cp_le_set_scan_param param_cp;
849 struct hci_cp_le_set_scan_enable enable_cp;
850
851 memset(¶m_cp, 0, sizeof(param_cp));
852 param_cp.type = type;
853 param_cp.interval = cpu_to_le16(interval);
854 param_cp.window = cpu_to_le16(window);
855 param_cp.own_address_type = own_addr_type;
856 param_cp.filter_policy = filter_policy;
857 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
858 ¶m_cp);
859
860 memset(&enable_cp, 0, sizeof(enable_cp));
861 enable_cp.enable = LE_SCAN_ENABLE;
862 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
863 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 &enable_cp);
865 }
866 }
867
hci_req_add_le_passive_scan(struct hci_request * req)868 void hci_req_add_le_passive_scan(struct hci_request *req)
869 {
870 struct hci_dev *hdev = req->hdev;
871 u8 own_addr_type;
872 u8 filter_policy;
873
874 /* Set require_privacy to false since no SCAN_REQ are send
875 * during passive scanning. Not using an non-resolvable address
876 * here is important so that peer devices using direct
877 * advertising with our address will be correctly reported
878 * by the controller.
879 */
880 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
881 &own_addr_type))
882 return;
883
884 /* Adding or removing entries from the white list must
885 * happen before enabling scanning. The controller does
886 * not allow white list modification while scanning.
887 */
888 filter_policy = update_white_list(req);
889
890 /* When the controller is using random resolvable addresses and
891 * with that having LE privacy enabled, then controllers with
892 * Extended Scanner Filter Policies support can now enable support
893 * for handling directed advertising.
894 *
895 * So instead of using filter polices 0x00 (no whitelist)
896 * and 0x01 (whitelist enabled) use the new filter policies
897 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
898 */
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
900 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
901 filter_policy |= 0x02;
902
903 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
904 hdev->le_scan_window, own_addr_type, filter_policy);
905 }
906
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)907 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
908 {
909 struct adv_info *adv_instance;
910
911 /* Ignore instance 0 */
912 if (instance == 0x00)
913 return 0;
914
915 adv_instance = hci_find_adv_instance(hdev, instance);
916 if (!adv_instance)
917 return 0;
918
919 /* TODO: Take into account the "appearance" and "local-name" flags here.
920 * These are currently being ignored as they are not supported.
921 */
922 return adv_instance->scan_rsp_len;
923 }
924
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)925 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
926 {
927 u8 instance = hdev->cur_adv_instance;
928 struct adv_info *adv_instance;
929
930 /* Ignore instance 0 */
931 if (instance == 0x00)
932 return 0;
933
934 adv_instance = hci_find_adv_instance(hdev, instance);
935 if (!adv_instance)
936 return 0;
937
938 /* TODO: Take into account the "appearance" and "local-name" flags here.
939 * These are currently being ignored as they are not supported.
940 */
941 return adv_instance->scan_rsp_len;
942 }
943
__hci_req_disable_advertising(struct hci_request * req)944 void __hci_req_disable_advertising(struct hci_request *req)
945 {
946 if (ext_adv_capable(req->hdev)) {
947 struct hci_cp_le_set_ext_adv_enable cp;
948
949 cp.enable = 0x00;
950 /* Disable all sets since we only support one set at the moment */
951 cp.num_of_sets = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
954 } else {
955 u8 enable = 0x00;
956
957 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
958 }
959 }
960
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)961 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
962 {
963 u32 flags;
964 struct adv_info *adv_instance;
965
966 if (instance == 0x00) {
967 /* Instance 0 always manages the "Tx Power" and "Flags"
968 * fields
969 */
970 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
971
972 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
973 * corresponds to the "connectable" instance flag.
974 */
975 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
976 flags |= MGMT_ADV_FLAG_CONNECTABLE;
977
978 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
980 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
981 flags |= MGMT_ADV_FLAG_DISCOV;
982
983 return flags;
984 }
985
986 adv_instance = hci_find_adv_instance(hdev, instance);
987
988 /* Return 0 when we got an invalid instance identifier. */
989 if (!adv_instance)
990 return 0;
991
992 return adv_instance->flags;
993 }
994
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)995 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
996 {
997 /* If privacy is not enabled don't use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
999 return false;
1000
1001 /* If basic privacy mode is enabled use RPA */
1002 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1003 return true;
1004
1005 /* If limited privacy mode is enabled don't use RPA if we're
1006 * both discoverable and bondable.
1007 */
1008 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1009 hci_dev_test_flag(hdev, HCI_BONDABLE))
1010 return false;
1011
1012 /* We're neither bondable nor discoverable in the limited
1013 * privacy mode, therefore use RPA.
1014 */
1015 return true;
1016 }
1017
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1018 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1019 {
1020 /* If there is no connection we are OK to advertise. */
1021 if (hci_conn_num(hdev, LE_LINK) == 0)
1022 return true;
1023
1024 /* Check le_states if there is any connection in slave role. */
1025 if (hdev->conn_hash.le_num_slave > 0) {
1026 /* Slave connection state and non connectable mode bit 20. */
1027 if (!connectable && !(hdev->le_states[2] & 0x10))
1028 return false;
1029
1030 /* Slave connection state and connectable mode bit 38
1031 * and scannable bit 21.
1032 */
1033 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1034 !(hdev->le_states[2] & 0x20)))
1035 return false;
1036 }
1037
1038 /* Check le_states if there is any connection in master role. */
1039 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1040 /* Master connection state and non connectable mode bit 18. */
1041 if (!connectable && !(hdev->le_states[2] & 0x02))
1042 return false;
1043
1044 /* Master connection state and connectable mode bit 35 and
1045 * scannable 19.
1046 */
1047 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1048 !(hdev->le_states[2] & 0x08)))
1049 return false;
1050 }
1051
1052 return true;
1053 }
1054
__hci_req_enable_advertising(struct hci_request * req)1055 void __hci_req_enable_advertising(struct hci_request *req)
1056 {
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_adv_param cp;
1059 u8 own_addr_type, enable = 0x01;
1060 bool connectable;
1061 u32 flags;
1062
1063 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1064
1065 /* If the "connectable" instance flag was not set, then choose between
1066 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1067 */
1068 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1069 mgmt_get_connectable(hdev);
1070
1071 if (!is_advertising_allowed(hdev, connectable))
1072 return;
1073
1074 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1075 __hci_req_disable_advertising(req);
1076
1077 /* Clear the HCI_LE_ADV bit temporarily so that the
1078 * hci_update_random_address knows that it's safe to go ahead
1079 * and write a new random address. The flag will be set back on
1080 * as soon as the SET_ADV_ENABLE HCI command completes.
1081 */
1082 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1083
1084 /* Set require_privacy to true only when non-connectable
1085 * advertising is used. In that case it is fine to use a
1086 * non-resolvable private address.
1087 */
1088 if (hci_update_random_address(req, !connectable,
1089 adv_use_rpa(hdev, flags),
1090 &own_addr_type) < 0)
1091 return;
1092
1093 memset(&cp, 0, sizeof(cp));
1094 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1095 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1096
1097 if (connectable)
1098 cp.type = LE_ADV_IND;
1099 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1100 cp.type = LE_ADV_SCAN_IND;
1101 else
1102 cp.type = LE_ADV_NONCONN_IND;
1103
1104 cp.own_address_type = own_addr_type;
1105 cp.channel_map = hdev->le_adv_channel_map;
1106
1107 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1108
1109 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1110 }
1111
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1112 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1113 {
1114 size_t short_len;
1115 size_t complete_len;
1116
1117 /* no space left for name (+ NULL + type + len) */
1118 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1119 return ad_len;
1120
1121 /* use complete name if present and fits */
1122 complete_len = strlen(hdev->dev_name);
1123 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1124 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1125 hdev->dev_name, complete_len + 1);
1126
1127 /* use short name if present */
1128 short_len = strlen(hdev->short_name);
1129 if (short_len)
1130 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1131 hdev->short_name, short_len + 1);
1132
1133 /* use shortened full name if present, we already know that name
1134 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1135 */
1136 if (complete_len) {
1137 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1138
1139 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1140 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1141
1142 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1143 sizeof(name));
1144 }
1145
1146 return ad_len;
1147 }
1148
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1149 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1150 {
1151 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1152 }
1153
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1154 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1155 {
1156 u8 scan_rsp_len = 0;
1157
1158 if (hdev->appearance) {
1159 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1160 }
1161
1162 return append_local_name(hdev, ptr, scan_rsp_len);
1163 }
1164
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1165 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1166 u8 *ptr)
1167 {
1168 struct adv_info *adv_instance;
1169 u32 instance_flags;
1170 u8 scan_rsp_len = 0;
1171
1172 adv_instance = hci_find_adv_instance(hdev, instance);
1173 if (!adv_instance)
1174 return 0;
1175
1176 instance_flags = adv_instance->flags;
1177
1178 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1179 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1180 }
1181
1182 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1183 adv_instance->scan_rsp_len);
1184
1185 scan_rsp_len += adv_instance->scan_rsp_len;
1186
1187 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1188 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1189
1190 return scan_rsp_len;
1191 }
1192
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1193 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1194 {
1195 struct hci_dev *hdev = req->hdev;
1196 u8 len;
1197
1198 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1199 return;
1200
1201 if (ext_adv_capable(hdev)) {
1202 struct hci_cp_le_set_ext_scan_rsp_data cp;
1203
1204 memset(&cp, 0, sizeof(cp));
1205
1206 if (instance)
1207 len = create_instance_scan_rsp_data(hdev, instance,
1208 cp.data);
1209 else
1210 len = create_default_scan_rsp_data(hdev, cp.data);
1211
1212 if (hdev->scan_rsp_data_len == len &&
1213 !memcmp(cp.data, hdev->scan_rsp_data, len))
1214 return;
1215
1216 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1217 hdev->scan_rsp_data_len = len;
1218
1219 cp.handle = 0;
1220 cp.length = len;
1221 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1222 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1223
1224 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1225 &cp);
1226 } else {
1227 struct hci_cp_le_set_scan_rsp_data cp;
1228
1229 memset(&cp, 0, sizeof(cp));
1230
1231 if (instance)
1232 len = create_instance_scan_rsp_data(hdev, instance,
1233 cp.data);
1234 else
1235 len = create_default_scan_rsp_data(hdev, cp.data);
1236
1237 if (hdev->scan_rsp_data_len == len &&
1238 !memcmp(cp.data, hdev->scan_rsp_data, len))
1239 return;
1240
1241 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1242 hdev->scan_rsp_data_len = len;
1243
1244 cp.length = len;
1245
1246 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1247 }
1248 }
1249
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1250 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1251 {
1252 struct adv_info *adv_instance = NULL;
1253 u8 ad_len = 0, flags = 0;
1254 u32 instance_flags;
1255
1256 /* Return 0 when the current instance identifier is invalid. */
1257 if (instance) {
1258 adv_instance = hci_find_adv_instance(hdev, instance);
1259 if (!adv_instance)
1260 return 0;
1261 }
1262
1263 instance_flags = get_adv_instance_flags(hdev, instance);
1264
1265 /* If instance already has the flags set skip adding it once
1266 * again.
1267 */
1268 if (adv_instance && eir_get_data(adv_instance->adv_data,
1269 adv_instance->adv_data_len, EIR_FLAGS,
1270 NULL))
1271 goto skip_flags;
1272
1273 /* The Add Advertising command allows userspace to set both the general
1274 * and limited discoverable flags.
1275 */
1276 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1277 flags |= LE_AD_GENERAL;
1278
1279 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1280 flags |= LE_AD_LIMITED;
1281
1282 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1283 flags |= LE_AD_NO_BREDR;
1284
1285 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1286 /* If a discovery flag wasn't provided, simply use the global
1287 * settings.
1288 */
1289 if (!flags)
1290 flags |= mgmt_get_adv_discov_flags(hdev);
1291
1292 /* If flags would still be empty, then there is no need to
1293 * include the "Flags" AD field".
1294 */
1295 if (flags) {
1296 ptr[0] = 0x02;
1297 ptr[1] = EIR_FLAGS;
1298 ptr[2] = flags;
1299
1300 ad_len += 3;
1301 ptr += 3;
1302 }
1303 }
1304
1305 skip_flags:
1306 if (adv_instance) {
1307 memcpy(ptr, adv_instance->adv_data,
1308 adv_instance->adv_data_len);
1309 ad_len += adv_instance->adv_data_len;
1310 ptr += adv_instance->adv_data_len;
1311 }
1312
1313 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1314 s8 adv_tx_power;
1315
1316 if (ext_adv_capable(hdev)) {
1317 if (adv_instance)
1318 adv_tx_power = adv_instance->tx_power;
1319 else
1320 adv_tx_power = hdev->adv_tx_power;
1321 } else {
1322 adv_tx_power = hdev->adv_tx_power;
1323 }
1324
1325 /* Provide Tx Power only if we can provide a valid value for it */
1326 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1327 ptr[0] = 0x02;
1328 ptr[1] = EIR_TX_POWER;
1329 ptr[2] = (u8)adv_tx_power;
1330
1331 ad_len += 3;
1332 ptr += 3;
1333 }
1334 }
1335
1336 return ad_len;
1337 }
1338
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1339 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1340 {
1341 struct hci_dev *hdev = req->hdev;
1342 u8 len;
1343
1344 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1345 return;
1346
1347 if (ext_adv_capable(hdev)) {
1348 struct hci_cp_le_set_ext_adv_data cp;
1349
1350 memset(&cp, 0, sizeof(cp));
1351
1352 len = create_instance_adv_data(hdev, instance, cp.data);
1353
1354 /* There's nothing to do if the data hasn't changed */
1355 if (hdev->adv_data_len == len &&
1356 memcmp(cp.data, hdev->adv_data, len) == 0)
1357 return;
1358
1359 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1360 hdev->adv_data_len = len;
1361
1362 cp.length = len;
1363 cp.handle = 0;
1364 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1365 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1366
1367 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1368 } else {
1369 struct hci_cp_le_set_adv_data cp;
1370
1371 memset(&cp, 0, sizeof(cp));
1372
1373 len = create_instance_adv_data(hdev, instance, cp.data);
1374
1375 /* There's nothing to do if the data hasn't changed */
1376 if (hdev->adv_data_len == len &&
1377 memcmp(cp.data, hdev->adv_data, len) == 0)
1378 return;
1379
1380 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1381 hdev->adv_data_len = len;
1382
1383 cp.length = len;
1384
1385 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1386 }
1387 }
1388
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1389 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1390 {
1391 struct hci_request req;
1392
1393 hci_req_init(&req, hdev);
1394 __hci_req_update_adv_data(&req, instance);
1395
1396 return hci_req_run(&req, NULL);
1397 }
1398
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1399 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1400 {
1401 BT_DBG("%s status %u", hdev->name, status);
1402 }
1403
hci_req_reenable_advertising(struct hci_dev * hdev)1404 void hci_req_reenable_advertising(struct hci_dev *hdev)
1405 {
1406 struct hci_request req;
1407
1408 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1409 list_empty(&hdev->adv_instances))
1410 return;
1411
1412 hci_req_init(&req, hdev);
1413
1414 if (hdev->cur_adv_instance) {
1415 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1416 true);
1417 } else {
1418 if (ext_adv_capable(hdev)) {
1419 __hci_req_start_ext_adv(&req, 0x00);
1420 } else {
1421 __hci_req_update_adv_data(&req, 0x00);
1422 __hci_req_update_scan_rsp_data(&req, 0x00);
1423 __hci_req_enable_advertising(&req);
1424 }
1425 }
1426
1427 hci_req_run(&req, adv_enable_complete);
1428 }
1429
adv_timeout_expire(struct work_struct * work)1430 static void adv_timeout_expire(struct work_struct *work)
1431 {
1432 struct hci_dev *hdev = container_of(work, struct hci_dev,
1433 adv_instance_expire.work);
1434
1435 struct hci_request req;
1436 u8 instance;
1437
1438 BT_DBG("%s", hdev->name);
1439
1440 hci_dev_lock(hdev);
1441
1442 hdev->adv_instance_timeout = 0;
1443
1444 instance = hdev->cur_adv_instance;
1445 if (instance == 0x00)
1446 goto unlock;
1447
1448 hci_req_init(&req, hdev);
1449
1450 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1451
1452 if (list_empty(&hdev->adv_instances))
1453 __hci_req_disable_advertising(&req);
1454
1455 hci_req_run(&req, NULL);
1456
1457 unlock:
1458 hci_dev_unlock(hdev);
1459 }
1460
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1461 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1462 bool use_rpa, struct adv_info *adv_instance,
1463 u8 *own_addr_type, bdaddr_t *rand_addr)
1464 {
1465 int err;
1466
1467 bacpy(rand_addr, BDADDR_ANY);
1468
1469 /* If privacy is enabled use a resolvable private address. If
1470 * current RPA has expired then generate a new one.
1471 */
1472 if (use_rpa) {
1473 int to;
1474
1475 *own_addr_type = ADDR_LE_DEV_RANDOM;
1476
1477 if (adv_instance) {
1478 if (!adv_instance->rpa_expired &&
1479 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1480 return 0;
1481
1482 adv_instance->rpa_expired = false;
1483 } else {
1484 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1485 !bacmp(&hdev->random_addr, &hdev->rpa))
1486 return 0;
1487 }
1488
1489 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1490 if (err < 0) {
1491 BT_ERR("%s failed to generate new RPA", hdev->name);
1492 return err;
1493 }
1494
1495 bacpy(rand_addr, &hdev->rpa);
1496
1497 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1498 if (adv_instance)
1499 queue_delayed_work(hdev->workqueue,
1500 &adv_instance->rpa_expired_cb, to);
1501 else
1502 queue_delayed_work(hdev->workqueue,
1503 &hdev->rpa_expired, to);
1504
1505 return 0;
1506 }
1507
1508 /* In case of required privacy without resolvable private address,
1509 * use an non-resolvable private address. This is useful for
1510 * non-connectable advertising.
1511 */
1512 if (require_privacy) {
1513 bdaddr_t nrpa;
1514
1515 while (true) {
1516 /* The non-resolvable private address is generated
1517 * from random six bytes with the two most significant
1518 * bits cleared.
1519 */
1520 get_random_bytes(&nrpa, 6);
1521 nrpa.b[5] &= 0x3f;
1522
1523 /* The non-resolvable private address shall not be
1524 * equal to the public address.
1525 */
1526 if (bacmp(&hdev->bdaddr, &nrpa))
1527 break;
1528 }
1529
1530 *own_addr_type = ADDR_LE_DEV_RANDOM;
1531 bacpy(rand_addr, &nrpa);
1532
1533 return 0;
1534 }
1535
1536 /* No privacy so use a public address. */
1537 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1538
1539 return 0;
1540 }
1541
__hci_req_clear_ext_adv_sets(struct hci_request * req)1542 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1543 {
1544 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1545 }
1546
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1547 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1548 {
1549 struct hci_cp_le_set_ext_adv_params cp;
1550 struct hci_dev *hdev = req->hdev;
1551 bool connectable;
1552 u32 flags;
1553 bdaddr_t random_addr;
1554 u8 own_addr_type;
1555 int err;
1556 struct adv_info *adv_instance;
1557 bool secondary_adv;
1558 /* In ext adv set param interval is 3 octets */
1559 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1560
1561 if (instance > 0) {
1562 adv_instance = hci_find_adv_instance(hdev, instance);
1563 if (!adv_instance)
1564 return -EINVAL;
1565 } else {
1566 adv_instance = NULL;
1567 }
1568
1569 flags = get_adv_instance_flags(hdev, instance);
1570
1571 /* If the "connectable" instance flag was not set, then choose between
1572 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1573 */
1574 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1575 mgmt_get_connectable(hdev);
1576
1577 if (!is_advertising_allowed(hdev, connectable))
1578 return -EPERM;
1579
1580 /* Set require_privacy to true only when non-connectable
1581 * advertising is used. In that case it is fine to use a
1582 * non-resolvable private address.
1583 */
1584 err = hci_get_random_address(hdev, !connectable,
1585 adv_use_rpa(hdev, flags), adv_instance,
1586 &own_addr_type, &random_addr);
1587 if (err < 0)
1588 return err;
1589
1590 memset(&cp, 0, sizeof(cp));
1591
1592 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1593 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1594
1595 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1596
1597 if (connectable) {
1598 if (secondary_adv)
1599 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1600 else
1601 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1602 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1603 if (secondary_adv)
1604 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1605 else
1606 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1607 } else {
1608 if (secondary_adv)
1609 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1610 else
1611 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1612 }
1613
1614 cp.own_addr_type = own_addr_type;
1615 cp.channel_map = hdev->le_adv_channel_map;
1616 cp.tx_power = 127;
1617 cp.handle = 0;
1618
1619 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1620 cp.primary_phy = HCI_ADV_PHY_1M;
1621 cp.secondary_phy = HCI_ADV_PHY_2M;
1622 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1623 cp.primary_phy = HCI_ADV_PHY_CODED;
1624 cp.secondary_phy = HCI_ADV_PHY_CODED;
1625 } else {
1626 /* In all other cases use 1M */
1627 cp.primary_phy = HCI_ADV_PHY_1M;
1628 cp.secondary_phy = HCI_ADV_PHY_1M;
1629 }
1630
1631 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1632
1633 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1634 bacmp(&random_addr, BDADDR_ANY)) {
1635 struct hci_cp_le_set_adv_set_rand_addr cp;
1636
1637 /* Check if random address need to be updated */
1638 if (adv_instance) {
1639 if (!bacmp(&random_addr, &adv_instance->random_addr))
1640 return 0;
1641 } else {
1642 if (!bacmp(&random_addr, &hdev->random_addr))
1643 return 0;
1644 }
1645
1646 memset(&cp, 0, sizeof(cp));
1647
1648 cp.handle = 0;
1649 bacpy(&cp.bdaddr, &random_addr);
1650
1651 hci_req_add(req,
1652 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1653 sizeof(cp), &cp);
1654 }
1655
1656 return 0;
1657 }
1658
__hci_req_enable_ext_advertising(struct hci_request * req)1659 void __hci_req_enable_ext_advertising(struct hci_request *req)
1660 {
1661 struct hci_cp_le_set_ext_adv_enable *cp;
1662 struct hci_cp_ext_adv_set *adv_set;
1663 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1664
1665 cp = (void *) data;
1666 adv_set = (void *) cp->data;
1667
1668 memset(cp, 0, sizeof(*cp));
1669
1670 cp->enable = 0x01;
1671 cp->num_of_sets = 0x01;
1672
1673 memset(adv_set, 0, sizeof(*adv_set));
1674
1675 adv_set->handle = 0;
1676
1677 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1678 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1679 data);
1680 }
1681
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)1682 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1683 {
1684 struct hci_dev *hdev = req->hdev;
1685 int err;
1686
1687 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1688 __hci_req_disable_advertising(req);
1689
1690 err = __hci_req_setup_ext_adv_instance(req, instance);
1691 if (err < 0)
1692 return err;
1693
1694 __hci_req_update_scan_rsp_data(req, instance);
1695 __hci_req_enable_ext_advertising(req);
1696
1697 return 0;
1698 }
1699
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1700 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1701 bool force)
1702 {
1703 struct hci_dev *hdev = req->hdev;
1704 struct adv_info *adv_instance = NULL;
1705 u16 timeout;
1706
1707 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1708 list_empty(&hdev->adv_instances))
1709 return -EPERM;
1710
1711 if (hdev->adv_instance_timeout)
1712 return -EBUSY;
1713
1714 adv_instance = hci_find_adv_instance(hdev, instance);
1715 if (!adv_instance)
1716 return -ENOENT;
1717
1718 /* A zero timeout means unlimited advertising. As long as there is
1719 * only one instance, duration should be ignored. We still set a timeout
1720 * in case further instances are being added later on.
1721 *
1722 * If the remaining lifetime of the instance is more than the duration
1723 * then the timeout corresponds to the duration, otherwise it will be
1724 * reduced to the remaining instance lifetime.
1725 */
1726 if (adv_instance->timeout == 0 ||
1727 adv_instance->duration <= adv_instance->remaining_time)
1728 timeout = adv_instance->duration;
1729 else
1730 timeout = adv_instance->remaining_time;
1731
1732 /* The remaining time is being reduced unless the instance is being
1733 * advertised without time limit.
1734 */
1735 if (adv_instance->timeout)
1736 adv_instance->remaining_time =
1737 adv_instance->remaining_time - timeout;
1738
1739 hdev->adv_instance_timeout = timeout;
1740 queue_delayed_work(hdev->req_workqueue,
1741 &hdev->adv_instance_expire,
1742 msecs_to_jiffies(timeout * 1000));
1743
1744 /* If we're just re-scheduling the same instance again then do not
1745 * execute any HCI commands. This happens when a single instance is
1746 * being advertised.
1747 */
1748 if (!force && hdev->cur_adv_instance == instance &&
1749 hci_dev_test_flag(hdev, HCI_LE_ADV))
1750 return 0;
1751
1752 hdev->cur_adv_instance = instance;
1753 if (ext_adv_capable(hdev)) {
1754 __hci_req_start_ext_adv(req, instance);
1755 } else {
1756 __hci_req_update_adv_data(req, instance);
1757 __hci_req_update_scan_rsp_data(req, instance);
1758 __hci_req_enable_advertising(req);
1759 }
1760
1761 return 0;
1762 }
1763
cancel_adv_timeout(struct hci_dev * hdev)1764 static void cancel_adv_timeout(struct hci_dev *hdev)
1765 {
1766 if (hdev->adv_instance_timeout) {
1767 hdev->adv_instance_timeout = 0;
1768 cancel_delayed_work(&hdev->adv_instance_expire);
1769 }
1770 }
1771
1772 /* For a single instance:
1773 * - force == true: The instance will be removed even when its remaining
1774 * lifetime is not zero.
1775 * - force == false: the instance will be deactivated but kept stored unless
1776 * the remaining lifetime is zero.
1777 *
1778 * For instance == 0x00:
1779 * - force == true: All instances will be removed regardless of their timeout
1780 * setting.
1781 * - force == false: Only instances that have a timeout will be removed.
1782 */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)1783 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1784 struct hci_request *req, u8 instance,
1785 bool force)
1786 {
1787 struct adv_info *adv_instance, *n, *next_instance = NULL;
1788 int err;
1789 u8 rem_inst;
1790
1791 /* Cancel any timeout concerning the removed instance(s). */
1792 if (!instance || hdev->cur_adv_instance == instance)
1793 cancel_adv_timeout(hdev);
1794
1795 /* Get the next instance to advertise BEFORE we remove
1796 * the current one. This can be the same instance again
1797 * if there is only one instance.
1798 */
1799 if (instance && hdev->cur_adv_instance == instance)
1800 next_instance = hci_get_next_instance(hdev, instance);
1801
1802 if (instance == 0x00) {
1803 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1804 list) {
1805 if (!(force || adv_instance->timeout))
1806 continue;
1807
1808 rem_inst = adv_instance->instance;
1809 err = hci_remove_adv_instance(hdev, rem_inst);
1810 if (!err)
1811 mgmt_advertising_removed(sk, hdev, rem_inst);
1812 }
1813 } else {
1814 adv_instance = hci_find_adv_instance(hdev, instance);
1815
1816 if (force || (adv_instance && adv_instance->timeout &&
1817 !adv_instance->remaining_time)) {
1818 /* Don't advertise a removed instance. */
1819 if (next_instance &&
1820 next_instance->instance == instance)
1821 next_instance = NULL;
1822
1823 err = hci_remove_adv_instance(hdev, instance);
1824 if (!err)
1825 mgmt_advertising_removed(sk, hdev, instance);
1826 }
1827 }
1828
1829 if (!req || !hdev_is_powered(hdev) ||
1830 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1831 return;
1832
1833 if (next_instance)
1834 __hci_req_schedule_adv_instance(req, next_instance->instance,
1835 false);
1836 }
1837
set_random_addr(struct hci_request * req,bdaddr_t * rpa)1838 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1839 {
1840 struct hci_dev *hdev = req->hdev;
1841
1842 /* If we're advertising or initiating an LE connection we can't
1843 * go ahead and change the random address at this time. This is
1844 * because the eventual initiator address used for the
1845 * subsequently created connection will be undefined (some
1846 * controllers use the new address and others the one we had
1847 * when the operation started).
1848 *
1849 * In this kind of scenario skip the update and let the random
1850 * address be updated at the next cycle.
1851 */
1852 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1853 hci_lookup_le_connect(hdev)) {
1854 BT_DBG("Deferring random address update");
1855 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1856 return;
1857 }
1858
1859 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1860 }
1861
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)1862 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1863 bool use_rpa, u8 *own_addr_type)
1864 {
1865 struct hci_dev *hdev = req->hdev;
1866 int err;
1867
1868 /* If privacy is enabled use a resolvable private address. If
1869 * current RPA has expired or there is something else than
1870 * the current RPA in use, then generate a new one.
1871 */
1872 if (use_rpa) {
1873 int to;
1874
1875 *own_addr_type = ADDR_LE_DEV_RANDOM;
1876
1877 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1878 !bacmp(&hdev->random_addr, &hdev->rpa))
1879 return 0;
1880
1881 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1882 if (err < 0) {
1883 bt_dev_err(hdev, "failed to generate new RPA");
1884 return err;
1885 }
1886
1887 set_random_addr(req, &hdev->rpa);
1888
1889 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1890 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1891
1892 return 0;
1893 }
1894
1895 /* In case of required privacy without resolvable private address,
1896 * use an non-resolvable private address. This is useful for active
1897 * scanning and non-connectable advertising.
1898 */
1899 if (require_privacy) {
1900 bdaddr_t nrpa;
1901
1902 while (true) {
1903 /* The non-resolvable private address is generated
1904 * from random six bytes with the two most significant
1905 * bits cleared.
1906 */
1907 get_random_bytes(&nrpa, 6);
1908 nrpa.b[5] &= 0x3f;
1909
1910 /* The non-resolvable private address shall not be
1911 * equal to the public address.
1912 */
1913 if (bacmp(&hdev->bdaddr, &nrpa))
1914 break;
1915 }
1916
1917 *own_addr_type = ADDR_LE_DEV_RANDOM;
1918 set_random_addr(req, &nrpa);
1919 return 0;
1920 }
1921
1922 /* If forcing static address is in use or there is no public
1923 * address use the static address as random address (but skip
1924 * the HCI command if the current random address is already the
1925 * static one.
1926 *
1927 * In case BR/EDR has been disabled on a dual-mode controller
1928 * and a static address has been configured, then use that
1929 * address instead of the public BR/EDR address.
1930 */
1931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1932 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1933 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1934 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1935 *own_addr_type = ADDR_LE_DEV_RANDOM;
1936 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1937 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1938 &hdev->static_addr);
1939 return 0;
1940 }
1941
1942 /* Neither privacy nor static address is being used so use a
1943 * public address.
1944 */
1945 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1946
1947 return 0;
1948 }
1949
disconnected_whitelist_entries(struct hci_dev * hdev)1950 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1951 {
1952 struct bdaddr_list *b;
1953
1954 list_for_each_entry(b, &hdev->whitelist, list) {
1955 struct hci_conn *conn;
1956
1957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1958 if (!conn)
1959 return true;
1960
1961 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1962 return true;
1963 }
1964
1965 return false;
1966 }
1967
__hci_req_update_scan(struct hci_request * req)1968 void __hci_req_update_scan(struct hci_request *req)
1969 {
1970 struct hci_dev *hdev = req->hdev;
1971 u8 scan;
1972
1973 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1974 return;
1975
1976 if (!hdev_is_powered(hdev))
1977 return;
1978
1979 if (mgmt_powering_down(hdev))
1980 return;
1981
1982 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1983 disconnected_whitelist_entries(hdev))
1984 scan = SCAN_PAGE;
1985 else
1986 scan = SCAN_DISABLED;
1987
1988 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1989 scan |= SCAN_INQUIRY;
1990
1991 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1992 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1993 return;
1994
1995 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1996 }
1997
update_scan(struct hci_request * req,unsigned long opt)1998 static int update_scan(struct hci_request *req, unsigned long opt)
1999 {
2000 hci_dev_lock(req->hdev);
2001 __hci_req_update_scan(req);
2002 hci_dev_unlock(req->hdev);
2003 return 0;
2004 }
2005
scan_update_work(struct work_struct * work)2006 static void scan_update_work(struct work_struct *work)
2007 {
2008 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2009
2010 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2011 }
2012
connectable_update(struct hci_request * req,unsigned long opt)2013 static int connectable_update(struct hci_request *req, unsigned long opt)
2014 {
2015 struct hci_dev *hdev = req->hdev;
2016
2017 hci_dev_lock(hdev);
2018
2019 __hci_req_update_scan(req);
2020
2021 /* If BR/EDR is not enabled and we disable advertising as a
2022 * by-product of disabling connectable, we need to update the
2023 * advertising flags.
2024 */
2025 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2026 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2027
2028 /* Update the advertising parameters if necessary */
2029 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2030 !list_empty(&hdev->adv_instances)) {
2031 if (ext_adv_capable(hdev))
2032 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2033 else
2034 __hci_req_enable_advertising(req);
2035 }
2036
2037 __hci_update_background_scan(req);
2038
2039 hci_dev_unlock(hdev);
2040
2041 return 0;
2042 }
2043
connectable_update_work(struct work_struct * work)2044 static void connectable_update_work(struct work_struct *work)
2045 {
2046 struct hci_dev *hdev = container_of(work, struct hci_dev,
2047 connectable_update);
2048 u8 status;
2049
2050 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2051 mgmt_set_connectable_complete(hdev, status);
2052 }
2053
get_service_classes(struct hci_dev * hdev)2054 static u8 get_service_classes(struct hci_dev *hdev)
2055 {
2056 struct bt_uuid *uuid;
2057 u8 val = 0;
2058
2059 list_for_each_entry(uuid, &hdev->uuids, list)
2060 val |= uuid->svc_hint;
2061
2062 return val;
2063 }
2064
__hci_req_update_class(struct hci_request * req)2065 void __hci_req_update_class(struct hci_request *req)
2066 {
2067 struct hci_dev *hdev = req->hdev;
2068 u8 cod[3];
2069
2070 BT_DBG("%s", hdev->name);
2071
2072 if (!hdev_is_powered(hdev))
2073 return;
2074
2075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2076 return;
2077
2078 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2079 return;
2080
2081 cod[0] = hdev->minor_class;
2082 cod[1] = hdev->major_class;
2083 cod[2] = get_service_classes(hdev);
2084
2085 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2086 cod[1] |= 0x20;
2087
2088 if (memcmp(cod, hdev->dev_class, 3) == 0)
2089 return;
2090
2091 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2092 }
2093
write_iac(struct hci_request * req)2094 static void write_iac(struct hci_request *req)
2095 {
2096 struct hci_dev *hdev = req->hdev;
2097 struct hci_cp_write_current_iac_lap cp;
2098
2099 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2100 return;
2101
2102 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2103 /* Limited discoverable mode */
2104 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2105 cp.iac_lap[0] = 0x00; /* LIAC */
2106 cp.iac_lap[1] = 0x8b;
2107 cp.iac_lap[2] = 0x9e;
2108 cp.iac_lap[3] = 0x33; /* GIAC */
2109 cp.iac_lap[4] = 0x8b;
2110 cp.iac_lap[5] = 0x9e;
2111 } else {
2112 /* General discoverable mode */
2113 cp.num_iac = 1;
2114 cp.iac_lap[0] = 0x33; /* GIAC */
2115 cp.iac_lap[1] = 0x8b;
2116 cp.iac_lap[2] = 0x9e;
2117 }
2118
2119 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2120 (cp.num_iac * 3) + 1, &cp);
2121 }
2122
discoverable_update(struct hci_request * req,unsigned long opt)2123 static int discoverable_update(struct hci_request *req, unsigned long opt)
2124 {
2125 struct hci_dev *hdev = req->hdev;
2126
2127 hci_dev_lock(hdev);
2128
2129 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2130 write_iac(req);
2131 __hci_req_update_scan(req);
2132 __hci_req_update_class(req);
2133 }
2134
2135 /* Advertising instances don't use the global discoverable setting, so
2136 * only update AD if advertising was enabled using Set Advertising.
2137 */
2138 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2139 __hci_req_update_adv_data(req, 0x00);
2140
2141 /* Discoverable mode affects the local advertising
2142 * address in limited privacy mode.
2143 */
2144 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2145 if (ext_adv_capable(hdev))
2146 __hci_req_start_ext_adv(req, 0x00);
2147 else
2148 __hci_req_enable_advertising(req);
2149 }
2150 }
2151
2152 hci_dev_unlock(hdev);
2153
2154 return 0;
2155 }
2156
discoverable_update_work(struct work_struct * work)2157 static void discoverable_update_work(struct work_struct *work)
2158 {
2159 struct hci_dev *hdev = container_of(work, struct hci_dev,
2160 discoverable_update);
2161 u8 status;
2162
2163 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2164 mgmt_set_discoverable_complete(hdev, status);
2165 }
2166
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2167 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2168 u8 reason)
2169 {
2170 switch (conn->state) {
2171 case BT_CONNECTED:
2172 case BT_CONFIG:
2173 if (conn->type == AMP_LINK) {
2174 struct hci_cp_disconn_phy_link cp;
2175
2176 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2177 cp.reason = reason;
2178 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2179 &cp);
2180 } else {
2181 struct hci_cp_disconnect dc;
2182
2183 dc.handle = cpu_to_le16(conn->handle);
2184 dc.reason = reason;
2185 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2186 }
2187
2188 conn->state = BT_DISCONN;
2189
2190 break;
2191 case BT_CONNECT:
2192 if (conn->type == LE_LINK) {
2193 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2194 break;
2195 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2196 0, NULL);
2197 } else if (conn->type == ACL_LINK) {
2198 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2199 break;
2200 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2201 6, &conn->dst);
2202 }
2203 break;
2204 case BT_CONNECT2:
2205 if (conn->type == ACL_LINK) {
2206 struct hci_cp_reject_conn_req rej;
2207
2208 bacpy(&rej.bdaddr, &conn->dst);
2209 rej.reason = reason;
2210
2211 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2212 sizeof(rej), &rej);
2213 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2214 struct hci_cp_reject_sync_conn_req rej;
2215
2216 bacpy(&rej.bdaddr, &conn->dst);
2217
2218 /* SCO rejection has its own limited set of
2219 * allowed error values (0x0D-0x0F) which isn't
2220 * compatible with most values passed to this
2221 * function. To be safe hard-code one of the
2222 * values that's suitable for SCO.
2223 */
2224 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2225
2226 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2227 sizeof(rej), &rej);
2228 }
2229 break;
2230 default:
2231 conn->state = BT_CLOSED;
2232 break;
2233 }
2234 }
2235
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2236 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2237 {
2238 if (status)
2239 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2240 }
2241
hci_abort_conn(struct hci_conn * conn,u8 reason)2242 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2243 {
2244 struct hci_request req;
2245 int err;
2246
2247 hci_req_init(&req, conn->hdev);
2248
2249 __hci_abort_conn(&req, conn, reason);
2250
2251 err = hci_req_run(&req, abort_conn_complete);
2252 if (err && err != -ENODATA) {
2253 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2254 return err;
2255 }
2256
2257 return 0;
2258 }
2259
update_bg_scan(struct hci_request * req,unsigned long opt)2260 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2261 {
2262 hci_dev_lock(req->hdev);
2263 __hci_update_background_scan(req);
2264 hci_dev_unlock(req->hdev);
2265 return 0;
2266 }
2267
bg_scan_update(struct work_struct * work)2268 static void bg_scan_update(struct work_struct *work)
2269 {
2270 struct hci_dev *hdev = container_of(work, struct hci_dev,
2271 bg_scan_update);
2272 struct hci_conn *conn;
2273 u8 status;
2274 int err;
2275
2276 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2277 if (!err)
2278 return;
2279
2280 hci_dev_lock(hdev);
2281
2282 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2283 if (conn)
2284 hci_le_conn_failed(conn, status);
2285
2286 hci_dev_unlock(hdev);
2287 }
2288
le_scan_disable(struct hci_request * req,unsigned long opt)2289 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2290 {
2291 hci_req_add_le_scan_disable(req);
2292 return 0;
2293 }
2294
bredr_inquiry(struct hci_request * req,unsigned long opt)2295 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2296 {
2297 u8 length = opt;
2298 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2299 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2300 struct hci_cp_inquiry cp;
2301
2302 BT_DBG("%s", req->hdev->name);
2303
2304 hci_dev_lock(req->hdev);
2305 hci_inquiry_cache_flush(req->hdev);
2306 hci_dev_unlock(req->hdev);
2307
2308 memset(&cp, 0, sizeof(cp));
2309
2310 if (req->hdev->discovery.limited)
2311 memcpy(&cp.lap, liac, sizeof(cp.lap));
2312 else
2313 memcpy(&cp.lap, giac, sizeof(cp.lap));
2314
2315 cp.length = length;
2316
2317 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2318
2319 return 0;
2320 }
2321
le_scan_disable_work(struct work_struct * work)2322 static void le_scan_disable_work(struct work_struct *work)
2323 {
2324 struct hci_dev *hdev = container_of(work, struct hci_dev,
2325 le_scan_disable.work);
2326 u8 status;
2327
2328 BT_DBG("%s", hdev->name);
2329
2330 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2331 return;
2332
2333 cancel_delayed_work(&hdev->le_scan_restart);
2334
2335 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2336 if (status) {
2337 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2338 status);
2339 return;
2340 }
2341
2342 hdev->discovery.scan_start = 0;
2343
2344 /* If we were running LE only scan, change discovery state. If
2345 * we were running both LE and BR/EDR inquiry simultaneously,
2346 * and BR/EDR inquiry is already finished, stop discovery,
2347 * otherwise BR/EDR inquiry will stop discovery when finished.
2348 * If we will resolve remote device name, do not change
2349 * discovery state.
2350 */
2351
2352 if (hdev->discovery.type == DISCOV_TYPE_LE)
2353 goto discov_stopped;
2354
2355 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2356 return;
2357
2358 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2359 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2360 hdev->discovery.state != DISCOVERY_RESOLVING)
2361 goto discov_stopped;
2362
2363 return;
2364 }
2365
2366 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2367 HCI_CMD_TIMEOUT, &status);
2368 if (status) {
2369 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2370 goto discov_stopped;
2371 }
2372
2373 return;
2374
2375 discov_stopped:
2376 hci_dev_lock(hdev);
2377 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2378 hci_dev_unlock(hdev);
2379 }
2380
le_scan_restart(struct hci_request * req,unsigned long opt)2381 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2382 {
2383 struct hci_dev *hdev = req->hdev;
2384
2385 /* If controller is not scanning we are done. */
2386 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2387 return 0;
2388
2389 hci_req_add_le_scan_disable(req);
2390
2391 if (use_ext_scan(hdev)) {
2392 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2393
2394 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2395 ext_enable_cp.enable = LE_SCAN_ENABLE;
2396 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2397
2398 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2399 sizeof(ext_enable_cp), &ext_enable_cp);
2400 } else {
2401 struct hci_cp_le_set_scan_enable cp;
2402
2403 memset(&cp, 0, sizeof(cp));
2404 cp.enable = LE_SCAN_ENABLE;
2405 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2406 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2407 }
2408
2409 return 0;
2410 }
2411
le_scan_restart_work(struct work_struct * work)2412 static void le_scan_restart_work(struct work_struct *work)
2413 {
2414 struct hci_dev *hdev = container_of(work, struct hci_dev,
2415 le_scan_restart.work);
2416 unsigned long timeout, duration, scan_start, now;
2417 u8 status;
2418
2419 BT_DBG("%s", hdev->name);
2420
2421 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2422 if (status) {
2423 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2424 status);
2425 return;
2426 }
2427
2428 hci_dev_lock(hdev);
2429
2430 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2431 !hdev->discovery.scan_start)
2432 goto unlock;
2433
2434 /* When the scan was started, hdev->le_scan_disable has been queued
2435 * after duration from scan_start. During scan restart this job
2436 * has been canceled, and we need to queue it again after proper
2437 * timeout, to make sure that scan does not run indefinitely.
2438 */
2439 duration = hdev->discovery.scan_duration;
2440 scan_start = hdev->discovery.scan_start;
2441 now = jiffies;
2442 if (now - scan_start <= duration) {
2443 int elapsed;
2444
2445 if (now >= scan_start)
2446 elapsed = now - scan_start;
2447 else
2448 elapsed = ULONG_MAX - scan_start + now;
2449
2450 timeout = duration - elapsed;
2451 } else {
2452 timeout = 0;
2453 }
2454
2455 queue_delayed_work(hdev->req_workqueue,
2456 &hdev->le_scan_disable, timeout);
2457
2458 unlock:
2459 hci_dev_unlock(hdev);
2460 }
2461
active_scan(struct hci_request * req,unsigned long opt)2462 static int active_scan(struct hci_request *req, unsigned long opt)
2463 {
2464 uint16_t interval = opt;
2465 struct hci_dev *hdev = req->hdev;
2466 u8 own_addr_type;
2467 int err;
2468
2469 BT_DBG("%s", hdev->name);
2470
2471 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2472 hci_dev_lock(hdev);
2473
2474 /* Don't let discovery abort an outgoing connection attempt
2475 * that's using directed advertising.
2476 */
2477 if (hci_lookup_le_connect(hdev)) {
2478 hci_dev_unlock(hdev);
2479 return -EBUSY;
2480 }
2481
2482 cancel_adv_timeout(hdev);
2483 hci_dev_unlock(hdev);
2484
2485 __hci_req_disable_advertising(req);
2486 }
2487
2488 /* If controller is scanning, it means the background scanning is
2489 * running. Thus, we should temporarily stop it in order to set the
2490 * discovery scanning parameters.
2491 */
2492 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2493 hci_req_add_le_scan_disable(req);
2494
2495 /* All active scans will be done with either a resolvable private
2496 * address (when privacy feature has been enabled) or non-resolvable
2497 * private address.
2498 */
2499 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2500 &own_addr_type);
2501 if (err < 0)
2502 own_addr_type = ADDR_LE_DEV_PUBLIC;
2503
2504 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2505 own_addr_type, 0);
2506 return 0;
2507 }
2508
interleaved_discov(struct hci_request * req,unsigned long opt)2509 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2510 {
2511 int err;
2512
2513 BT_DBG("%s", req->hdev->name);
2514
2515 err = active_scan(req, opt);
2516 if (err)
2517 return err;
2518
2519 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2520 }
2521
start_discovery(struct hci_dev * hdev,u8 * status)2522 static void start_discovery(struct hci_dev *hdev, u8 *status)
2523 {
2524 unsigned long timeout;
2525
2526 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2527
2528 switch (hdev->discovery.type) {
2529 case DISCOV_TYPE_BREDR:
2530 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2531 hci_req_sync(hdev, bredr_inquiry,
2532 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2533 status);
2534 return;
2535 case DISCOV_TYPE_INTERLEAVED:
2536 /* When running simultaneous discovery, the LE scanning time
2537 * should occupy the whole discovery time sine BR/EDR inquiry
2538 * and LE scanning are scheduled by the controller.
2539 *
2540 * For interleaving discovery in comparison, BR/EDR inquiry
2541 * and LE scanning are done sequentially with separate
2542 * timeouts.
2543 */
2544 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2545 &hdev->quirks)) {
2546 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2547 /* During simultaneous discovery, we double LE scan
2548 * interval. We must leave some time for the controller
2549 * to do BR/EDR inquiry.
2550 */
2551 hci_req_sync(hdev, interleaved_discov,
2552 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2553 status);
2554 break;
2555 }
2556
2557 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2558 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2559 HCI_CMD_TIMEOUT, status);
2560 break;
2561 case DISCOV_TYPE_LE:
2562 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2563 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2564 HCI_CMD_TIMEOUT, status);
2565 break;
2566 default:
2567 *status = HCI_ERROR_UNSPECIFIED;
2568 return;
2569 }
2570
2571 if (*status)
2572 return;
2573
2574 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2575
2576 /* When service discovery is used and the controller has a
2577 * strict duplicate filter, it is important to remember the
2578 * start and duration of the scan. This is required for
2579 * restarting scanning during the discovery phase.
2580 */
2581 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2582 hdev->discovery.result_filtering) {
2583 hdev->discovery.scan_start = jiffies;
2584 hdev->discovery.scan_duration = timeout;
2585 }
2586
2587 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2588 timeout);
2589 }
2590
hci_req_stop_discovery(struct hci_request * req)2591 bool hci_req_stop_discovery(struct hci_request *req)
2592 {
2593 struct hci_dev *hdev = req->hdev;
2594 struct discovery_state *d = &hdev->discovery;
2595 struct hci_cp_remote_name_req_cancel cp;
2596 struct inquiry_entry *e;
2597 bool ret = false;
2598
2599 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2600
2601 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2602 if (test_bit(HCI_INQUIRY, &hdev->flags))
2603 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2604
2605 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2606 cancel_delayed_work(&hdev->le_scan_disable);
2607 hci_req_add_le_scan_disable(req);
2608 }
2609
2610 ret = true;
2611 } else {
2612 /* Passive scanning */
2613 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2614 hci_req_add_le_scan_disable(req);
2615 ret = true;
2616 }
2617 }
2618
2619 /* No further actions needed for LE-only discovery */
2620 if (d->type == DISCOV_TYPE_LE)
2621 return ret;
2622
2623 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2625 NAME_PENDING);
2626 if (!e)
2627 return ret;
2628
2629 bacpy(&cp.bdaddr, &e->data.bdaddr);
2630 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2631 &cp);
2632 ret = true;
2633 }
2634
2635 return ret;
2636 }
2637
stop_discovery(struct hci_request * req,unsigned long opt)2638 static int stop_discovery(struct hci_request *req, unsigned long opt)
2639 {
2640 hci_dev_lock(req->hdev);
2641 hci_req_stop_discovery(req);
2642 hci_dev_unlock(req->hdev);
2643
2644 return 0;
2645 }
2646
discov_update(struct work_struct * work)2647 static void discov_update(struct work_struct *work)
2648 {
2649 struct hci_dev *hdev = container_of(work, struct hci_dev,
2650 discov_update);
2651 u8 status = 0;
2652
2653 switch (hdev->discovery.state) {
2654 case DISCOVERY_STARTING:
2655 start_discovery(hdev, &status);
2656 mgmt_start_discovery_complete(hdev, status);
2657 if (status)
2658 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2659 else
2660 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2661 break;
2662 case DISCOVERY_STOPPING:
2663 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2664 mgmt_stop_discovery_complete(hdev, status);
2665 if (!status)
2666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2667 break;
2668 case DISCOVERY_STOPPED:
2669 default:
2670 return;
2671 }
2672 }
2673
discov_off(struct work_struct * work)2674 static void discov_off(struct work_struct *work)
2675 {
2676 struct hci_dev *hdev = container_of(work, struct hci_dev,
2677 discov_off.work);
2678
2679 BT_DBG("%s", hdev->name);
2680
2681 hci_dev_lock(hdev);
2682
2683 /* When discoverable timeout triggers, then just make sure
2684 * the limited discoverable flag is cleared. Even in the case
2685 * of a timeout triggered from general discoverable, it is
2686 * safe to unconditionally clear the flag.
2687 */
2688 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2689 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2690 hdev->discov_timeout = 0;
2691
2692 hci_dev_unlock(hdev);
2693
2694 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2695 mgmt_new_settings(hdev);
2696 }
2697
powered_update_hci(struct hci_request * req,unsigned long opt)2698 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2699 {
2700 struct hci_dev *hdev = req->hdev;
2701 u8 link_sec;
2702
2703 hci_dev_lock(hdev);
2704
2705 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2706 !lmp_host_ssp_capable(hdev)) {
2707 u8 mode = 0x01;
2708
2709 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2710
2711 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2712 u8 support = 0x01;
2713
2714 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2715 sizeof(support), &support);
2716 }
2717 }
2718
2719 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2720 lmp_bredr_capable(hdev)) {
2721 struct hci_cp_write_le_host_supported cp;
2722
2723 cp.le = 0x01;
2724 cp.simul = 0x00;
2725
2726 /* Check first if we already have the right
2727 * host state (host features set)
2728 */
2729 if (cp.le != lmp_host_le_capable(hdev) ||
2730 cp.simul != lmp_host_le_br_capable(hdev))
2731 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2732 sizeof(cp), &cp);
2733 }
2734
2735 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2736 /* Make sure the controller has a good default for
2737 * advertising data. This also applies to the case
2738 * where BR/EDR was toggled during the AUTO_OFF phase.
2739 */
2740 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2741 list_empty(&hdev->adv_instances)) {
2742 int err;
2743
2744 if (ext_adv_capable(hdev)) {
2745 err = __hci_req_setup_ext_adv_instance(req,
2746 0x00);
2747 if (!err)
2748 __hci_req_update_scan_rsp_data(req,
2749 0x00);
2750 } else {
2751 err = 0;
2752 __hci_req_update_adv_data(req, 0x00);
2753 __hci_req_update_scan_rsp_data(req, 0x00);
2754 }
2755
2756 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2757 if (!ext_adv_capable(hdev))
2758 __hci_req_enable_advertising(req);
2759 else if (!err)
2760 __hci_req_enable_ext_advertising(req);
2761 }
2762 } else if (!list_empty(&hdev->adv_instances)) {
2763 struct adv_info *adv_instance;
2764
2765 adv_instance = list_first_entry(&hdev->adv_instances,
2766 struct adv_info, list);
2767 __hci_req_schedule_adv_instance(req,
2768 adv_instance->instance,
2769 true);
2770 }
2771 }
2772
2773 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2774 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2775 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2776 sizeof(link_sec), &link_sec);
2777
2778 if (lmp_bredr_capable(hdev)) {
2779 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2780 __hci_req_write_fast_connectable(req, true);
2781 else
2782 __hci_req_write_fast_connectable(req, false);
2783 __hci_req_update_scan(req);
2784 __hci_req_update_class(req);
2785 __hci_req_update_name(req);
2786 __hci_req_update_eir(req);
2787 }
2788
2789 hci_dev_unlock(hdev);
2790 return 0;
2791 }
2792
__hci_req_hci_power_on(struct hci_dev * hdev)2793 int __hci_req_hci_power_on(struct hci_dev *hdev)
2794 {
2795 /* Register the available SMP channels (BR/EDR and LE) only when
2796 * successfully powering on the controller. This late
2797 * registration is required so that LE SMP can clearly decide if
2798 * the public address or static address is used.
2799 */
2800 smp_register(hdev);
2801
2802 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2803 NULL);
2804 }
2805
hci_request_setup(struct hci_dev * hdev)2806 void hci_request_setup(struct hci_dev *hdev)
2807 {
2808 INIT_WORK(&hdev->discov_update, discov_update);
2809 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2810 INIT_WORK(&hdev->scan_update, scan_update_work);
2811 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2812 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2813 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2814 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2815 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2816 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2817 }
2818
hci_request_cancel_all(struct hci_dev * hdev)2819 void hci_request_cancel_all(struct hci_dev *hdev)
2820 {
2821 hci_req_sync_cancel(hdev, ENODEV);
2822
2823 cancel_work_sync(&hdev->discov_update);
2824 cancel_work_sync(&hdev->bg_scan_update);
2825 cancel_work_sync(&hdev->scan_update);
2826 cancel_work_sync(&hdev->connectable_update);
2827 cancel_work_sync(&hdev->discoverable_update);
2828 cancel_delayed_work_sync(&hdev->discov_off);
2829 cancel_delayed_work_sync(&hdev->le_scan_disable);
2830 cancel_delayed_work_sync(&hdev->le_scan_restart);
2831
2832 if (hdev->adv_instance_timeout) {
2833 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2834 hdev->adv_instance_timeout = 0;
2835 }
2836 }
2837