• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
hci_req_purge(struct hci_request * req)44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
hci_req_status_pend(struct hci_dev * hdev)49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 	return hdev->req_status == HCI_REQ_PEND;
52 }
53 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 		   hci_req_complete_skb_t complete_skb)
56 {
57 	struct hci_dev *hdev = req->hdev;
58 	struct sk_buff *skb;
59 	unsigned long flags;
60 
61 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62 
63 	/* If an error occurred during request building, remove all HCI
64 	 * commands queued on the HCI request queue.
65 	 */
66 	if (req->err) {
67 		skb_queue_purge(&req->cmd_q);
68 		return req->err;
69 	}
70 
71 	/* Do not allow empty requests */
72 	if (skb_queue_empty(&req->cmd_q))
73 		return -ENODATA;
74 
75 	skb = skb_peek_tail(&req->cmd_q);
76 	if (complete) {
77 		bt_cb(skb)->hci.req_complete = complete;
78 	} else if (complete_skb) {
79 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 	}
82 
83 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86 
87 	queue_work(hdev->workqueue, &hdev->cmd_work);
88 
89 	return 0;
90 }
91 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 	return req_run(req, complete, NULL);
95 }
96 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 	return req_run(req, NULL, complete);
100 }
101 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 				  struct sk_buff *skb)
104 {
105 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		if (skb)
111 			hdev->req_skb = skb_get(skb);
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
hci_req_sync_cancel(struct hci_dev * hdev,int err)116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 				  const void *param, u8 event, u32 timeout)
129 {
130 	struct hci_request req;
131 	struct sk_buff *skb;
132 	int err = 0;
133 
134 	BT_DBG("%s", hdev->name);
135 
136 	hci_req_init(&req, hdev);
137 
138 	hci_req_add_ev(&req, opcode, plen, param, event);
139 
140 	hdev->req_status = HCI_REQ_PEND;
141 
142 	err = hci_req_run_skb(&req, hci_req_sync_complete);
143 	if (err < 0)
144 		return ERR_PTR(err);
145 
146 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 			hdev->req_status != HCI_REQ_PEND, timeout);
148 
149 	if (err == -ERESTARTSYS)
150 		return ERR_PTR(-EINTR);
151 
152 	switch (hdev->req_status) {
153 	case HCI_REQ_DONE:
154 		err = -bt_to_errno(hdev->req_result);
155 		break;
156 
157 	case HCI_REQ_CANCELED:
158 		err = -hdev->req_result;
159 		break;
160 
161 	default:
162 		err = -ETIMEDOUT;
163 		break;
164 	}
165 
166 	hdev->req_status = hdev->req_result = 0;
167 	skb = hdev->req_skb;
168 	hdev->req_skb = NULL;
169 
170 	BT_DBG("%s end: err %d", hdev->name, err);
171 
172 	if (err < 0) {
173 		kfree_skb(skb);
174 		return ERR_PTR(err);
175 	}
176 
177 	if (!skb)
178 		return ERR_PTR(-ENODATA);
179 
180 	return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 			       const void *param, u32 timeout)
186 {
187 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190 
191 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 						     unsigned long opt),
194 		   unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 	struct hci_request req;
197 	int err = 0;
198 
199 	BT_DBG("%s start", hdev->name);
200 
201 	hci_req_init(&req, hdev);
202 
203 	hdev->req_status = HCI_REQ_PEND;
204 
205 	err = func(&req, opt);
206 	if (err) {
207 		if (hci_status)
208 			*hci_status = HCI_ERROR_UNSPECIFIED;
209 		return err;
210 	}
211 
212 	err = hci_req_run_skb(&req, hci_req_sync_complete);
213 	if (err < 0) {
214 		hdev->req_status = 0;
215 
216 		/* ENODATA means the HCI request command queue is empty.
217 		 * This can happen when a request with conditionals doesn't
218 		 * trigger any commands to be sent. This is normal behavior
219 		 * and should not trigger an error return.
220 		 */
221 		if (err == -ENODATA) {
222 			if (hci_status)
223 				*hci_status = 0;
224 			return 0;
225 		}
226 
227 		if (hci_status)
228 			*hci_status = HCI_ERROR_UNSPECIFIED;
229 
230 		return err;
231 	}
232 
233 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 			hdev->req_status != HCI_REQ_PEND, timeout);
235 
236 	if (err == -ERESTARTSYS)
237 		return -EINTR;
238 
239 	switch (hdev->req_status) {
240 	case HCI_REQ_DONE:
241 		err = -bt_to_errno(hdev->req_result);
242 		if (hci_status)
243 			*hci_status = hdev->req_result;
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		if (hci_status)
249 			*hci_status = HCI_ERROR_UNSPECIFIED;
250 		break;
251 
252 	default:
253 		err = -ETIMEDOUT;
254 		if (hci_status)
255 			*hci_status = HCI_ERROR_UNSPECIFIED;
256 		break;
257 	}
258 
259 	kfree_skb(hdev->req_skb);
260 	hdev->req_skb = NULL;
261 	hdev->req_status = hdev->req_result = 0;
262 
263 	BT_DBG("%s end: err %d", hdev->name, err);
264 
265 	return err;
266 }
267 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 						  unsigned long opt),
270 		 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 	int ret;
273 
274 	/* Serialize all requests */
275 	hci_req_sync_lock(hdev);
276 	/* check the state after obtaing the lock to protect the HCI_UP
277 	 * against any races from hci_dev_do_close when the controller
278 	 * gets removed.
279 	 */
280 	if (test_bit(HCI_UP, &hdev->flags))
281 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
282 	else
283 		ret = -ENETDOWN;
284 	hci_req_sync_unlock(hdev);
285 
286 	return ret;
287 }
288 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 				const void *param)
291 {
292 	int len = HCI_COMMAND_HDR_SIZE + plen;
293 	struct hci_command_hdr *hdr;
294 	struct sk_buff *skb;
295 
296 	skb = bt_skb_alloc(len, GFP_ATOMIC);
297 	if (!skb)
298 		return NULL;
299 
300 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 	hdr->opcode = cpu_to_le16(opcode);
302 	hdr->plen   = plen;
303 
304 	if (plen)
305 		skb_put_data(skb, param, plen);
306 
307 	BT_DBG("skb len %d", skb->len);
308 
309 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 	hci_skb_opcode(skb) = opcode;
311 
312 	return skb;
313 }
314 
315 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 		    const void *param, u8 event)
318 {
319 	struct hci_dev *hdev = req->hdev;
320 	struct sk_buff *skb;
321 
322 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323 
324 	/* If an error occurred during request building, there is no point in
325 	 * queueing the HCI command. We can simply return.
326 	 */
327 	if (req->err)
328 		return;
329 
330 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 	if (!skb) {
332 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
333 			   opcode);
334 		req->err = -ENOMEM;
335 		return;
336 	}
337 
338 	if (skb_queue_empty(&req->cmd_q))
339 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340 
341 	bt_cb(skb)->hci.req_event = event;
342 
343 	skb_queue_tail(&req->cmd_q, skb);
344 }
345 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 		 const void *param)
348 {
349 	hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354 	struct hci_dev *hdev = req->hdev;
355 	struct hci_cp_write_page_scan_activity acp;
356 	u8 type;
357 
358 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 		return;
360 
361 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 		return;
363 
364 	if (enable) {
365 		type = PAGE_SCAN_TYPE_INTERLACED;
366 
367 		/* 160 msec page scan interval */
368 		acp.interval = cpu_to_le16(0x0100);
369 	} else {
370 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
371 
372 		/* default 1.28 sec page scan */
373 		acp.interval = cpu_to_le16(0x0800);
374 	}
375 
376 	acp.window = cpu_to_le16(0x0012);
377 
378 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 			    sizeof(acp), &acp);
382 
383 	if (hdev->page_scan_type != type)
384 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386 
387 /* This function controls the background scanning based on hdev->pend_le_conns
388  * list. If there are pending LE connection we start the background scanning,
389  * otherwise we stop it.
390  *
391  * This function requires the caller holds hdev->lock.
392  */
__hci_update_background_scan(struct hci_request * req)393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395 	struct hci_dev *hdev = req->hdev;
396 
397 	if (!test_bit(HCI_UP, &hdev->flags) ||
398 	    test_bit(HCI_INIT, &hdev->flags) ||
399 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
400 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 		return;
404 
405 	/* No point in doing scanning if LE support hasn't been enabled */
406 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 		return;
408 
409 	/* If discovery is active don't interfere with it */
410 	if (hdev->discovery.state != DISCOVERY_STOPPED)
411 		return;
412 
413 	/* Reset RSSI and UUID filters when starting background scanning
414 	 * since these filters are meant for service discovery only.
415 	 *
416 	 * The Start Discovery and Start Service Discovery operations
417 	 * ensure to set proper values for RSSI threshold and UUID
418 	 * filter list. So it is safe to just reset them here.
419 	 */
420 	hci_discovery_filter_clear(hdev);
421 
422 	if (list_empty(&hdev->pend_le_conns) &&
423 	    list_empty(&hdev->pend_le_reports)) {
424 		/* If there is no pending LE connections or devices
425 		 * to be scanned for, we should stop the background
426 		 * scanning.
427 		 */
428 
429 		/* If controller is not scanning we are done. */
430 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 			return;
432 
433 		hci_req_add_le_scan_disable(req);
434 
435 		BT_DBG("%s stopping background scanning", hdev->name);
436 	} else {
437 		/* If there is at least one pending LE connection, we should
438 		 * keep the background scan running.
439 		 */
440 
441 		/* If controller is connecting, we should not start scanning
442 		 * since some controllers are not able to scan and connect at
443 		 * the same time.
444 		 */
445 		if (hci_lookup_le_connect(hdev))
446 			return;
447 
448 		/* If controller is currently scanning, we stop it to ensure we
449 		 * don't miss any advertising (due to duplicates filter).
450 		 */
451 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 			hci_req_add_le_scan_disable(req);
453 
454 		hci_req_add_le_passive_scan(req);
455 
456 		BT_DBG("%s starting background scanning", hdev->name);
457 	}
458 }
459 
__hci_req_update_name(struct hci_request * req)460 void __hci_req_update_name(struct hci_request *req)
461 {
462 	struct hci_dev *hdev = req->hdev;
463 	struct hci_cp_write_local_name cp;
464 
465 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466 
467 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469 
470 #define PNP_INFO_SVCLASS_ID		0x1200
471 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474 	u8 *ptr = data, *uuids_start = NULL;
475 	struct bt_uuid *uuid;
476 
477 	if (len < 4)
478 		return ptr;
479 
480 	list_for_each_entry(uuid, &hdev->uuids, list) {
481 		u16 uuid16;
482 
483 		if (uuid->size != 16)
484 			continue;
485 
486 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 		if (uuid16 < 0x1100)
488 			continue;
489 
490 		if (uuid16 == PNP_INFO_SVCLASS_ID)
491 			continue;
492 
493 		if (!uuids_start) {
494 			uuids_start = ptr;
495 			uuids_start[0] = 1;
496 			uuids_start[1] = EIR_UUID16_ALL;
497 			ptr += 2;
498 		}
499 
500 		/* Stop if not enough space to put next UUID */
501 		if ((ptr - data) + sizeof(u16) > len) {
502 			uuids_start[1] = EIR_UUID16_SOME;
503 			break;
504 		}
505 
506 		*ptr++ = (uuid16 & 0x00ff);
507 		*ptr++ = (uuid16 & 0xff00) >> 8;
508 		uuids_start[0] += sizeof(uuid16);
509 	}
510 
511 	return ptr;
512 }
513 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516 	u8 *ptr = data, *uuids_start = NULL;
517 	struct bt_uuid *uuid;
518 
519 	if (len < 6)
520 		return ptr;
521 
522 	list_for_each_entry(uuid, &hdev->uuids, list) {
523 		if (uuid->size != 32)
524 			continue;
525 
526 		if (!uuids_start) {
527 			uuids_start = ptr;
528 			uuids_start[0] = 1;
529 			uuids_start[1] = EIR_UUID32_ALL;
530 			ptr += 2;
531 		}
532 
533 		/* Stop if not enough space to put next UUID */
534 		if ((ptr - data) + sizeof(u32) > len) {
535 			uuids_start[1] = EIR_UUID32_SOME;
536 			break;
537 		}
538 
539 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 		ptr += sizeof(u32);
541 		uuids_start[0] += sizeof(u32);
542 	}
543 
544 	return ptr;
545 }
546 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549 	u8 *ptr = data, *uuids_start = NULL;
550 	struct bt_uuid *uuid;
551 
552 	if (len < 18)
553 		return ptr;
554 
555 	list_for_each_entry(uuid, &hdev->uuids, list) {
556 		if (uuid->size != 128)
557 			continue;
558 
559 		if (!uuids_start) {
560 			uuids_start = ptr;
561 			uuids_start[0] = 1;
562 			uuids_start[1] = EIR_UUID128_ALL;
563 			ptr += 2;
564 		}
565 
566 		/* Stop if not enough space to put next UUID */
567 		if ((ptr - data) + 16 > len) {
568 			uuids_start[1] = EIR_UUID128_SOME;
569 			break;
570 		}
571 
572 		memcpy(ptr, uuid->uuid, 16);
573 		ptr += 16;
574 		uuids_start[0] += 16;
575 	}
576 
577 	return ptr;
578 }
579 
create_eir(struct hci_dev * hdev,u8 * data)580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582 	u8 *ptr = data;
583 	size_t name_len;
584 
585 	name_len = strlen(hdev->dev_name);
586 
587 	if (name_len > 0) {
588 		/* EIR Data type */
589 		if (name_len > 48) {
590 			name_len = 48;
591 			ptr[1] = EIR_NAME_SHORT;
592 		} else
593 			ptr[1] = EIR_NAME_COMPLETE;
594 
595 		/* EIR Data length */
596 		ptr[0] = name_len + 1;
597 
598 		memcpy(ptr + 2, hdev->dev_name, name_len);
599 
600 		ptr += (name_len + 2);
601 	}
602 
603 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 		ptr[0] = 2;
605 		ptr[1] = EIR_TX_POWER;
606 		ptr[2] = (u8) hdev->inq_tx_power;
607 
608 		ptr += 3;
609 	}
610 
611 	if (hdev->devid_source > 0) {
612 		ptr[0] = 9;
613 		ptr[1] = EIR_DEVICE_ID;
614 
615 		put_unaligned_le16(hdev->devid_source, ptr + 2);
616 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 		put_unaligned_le16(hdev->devid_product, ptr + 6);
618 		put_unaligned_le16(hdev->devid_version, ptr + 8);
619 
620 		ptr += 10;
621 	}
622 
623 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627 
__hci_req_update_eir(struct hci_request * req)628 void __hci_req_update_eir(struct hci_request *req)
629 {
630 	struct hci_dev *hdev = req->hdev;
631 	struct hci_cp_write_eir cp;
632 
633 	if (!hdev_is_powered(hdev))
634 		return;
635 
636 	if (!lmp_ext_inq_capable(hdev))
637 		return;
638 
639 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 		return;
641 
642 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 		return;
644 
645 	memset(&cp, 0, sizeof(cp));
646 
647 	create_eir(hdev, cp.data);
648 
649 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 		return;
651 
652 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
653 
654 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656 
hci_req_add_le_scan_disable(struct hci_request * req)657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659 	struct hci_dev *hdev = req->hdev;
660 
661 	if (use_ext_scan(hdev)) {
662 		struct hci_cp_le_set_ext_scan_enable cp;
663 
664 		memset(&cp, 0, sizeof(cp));
665 		cp.enable = LE_SCAN_DISABLE;
666 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
667 			    &cp);
668 	} else {
669 		struct hci_cp_le_set_scan_enable cp;
670 
671 		memset(&cp, 0, sizeof(cp));
672 		cp.enable = LE_SCAN_DISABLE;
673 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
674 	}
675 }
676 
add_to_white_list(struct hci_request * req,struct hci_conn_params * params)677 static void add_to_white_list(struct hci_request *req,
678 			      struct hci_conn_params *params)
679 {
680 	struct hci_cp_le_add_to_white_list cp;
681 
682 	cp.bdaddr_type = params->addr_type;
683 	bacpy(&cp.bdaddr, &params->addr);
684 
685 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
686 }
687 
update_white_list(struct hci_request * req)688 static u8 update_white_list(struct hci_request *req)
689 {
690 	struct hci_dev *hdev = req->hdev;
691 	struct hci_conn_params *params;
692 	struct bdaddr_list *b;
693 	uint8_t white_list_entries = 0;
694 
695 	/* Go through the current white list programmed into the
696 	 * controller one by one and check if that address is still
697 	 * in the list of pending connections or list of devices to
698 	 * report. If not present in either list, then queue the
699 	 * command to remove it from the controller.
700 	 */
701 	list_for_each_entry(b, &hdev->le_white_list, list) {
702 		/* If the device is neither in pend_le_conns nor
703 		 * pend_le_reports then remove it from the whitelist.
704 		 */
705 		if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
706 					       &b->bdaddr, b->bdaddr_type) &&
707 		    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
708 					       &b->bdaddr, b->bdaddr_type)) {
709 			struct hci_cp_le_del_from_white_list cp;
710 
711 			cp.bdaddr_type = b->bdaddr_type;
712 			bacpy(&cp.bdaddr, &b->bdaddr);
713 
714 			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
715 				    sizeof(cp), &cp);
716 			continue;
717 		}
718 
719 		if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
720 			/* White list can not be used with RPAs */
721 			return 0x00;
722 		}
723 
724 		white_list_entries++;
725 	}
726 
727 	/* Since all no longer valid white list entries have been
728 	 * removed, walk through the list of pending connections
729 	 * and ensure that any new device gets programmed into
730 	 * the controller.
731 	 *
732 	 * If the list of the devices is larger than the list of
733 	 * available white list entries in the controller, then
734 	 * just abort and return filer policy value to not use the
735 	 * white list.
736 	 */
737 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
738 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
739 					   &params->addr, params->addr_type))
740 			continue;
741 
742 		if (white_list_entries >= hdev->le_white_list_size) {
743 			/* Select filter policy to accept all advertising */
744 			return 0x00;
745 		}
746 
747 		if (hci_find_irk_by_addr(hdev, &params->addr,
748 					 params->addr_type)) {
749 			/* White list can not be used with RPAs */
750 			return 0x00;
751 		}
752 
753 		white_list_entries++;
754 		add_to_white_list(req, params);
755 	}
756 
757 	/* After adding all new pending connections, walk through
758 	 * the list of pending reports and also add these to the
759 	 * white list if there is still space.
760 	 */
761 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
762 		if (hci_bdaddr_list_lookup(&hdev->le_white_list,
763 					   &params->addr, params->addr_type))
764 			continue;
765 
766 		if (white_list_entries >= hdev->le_white_list_size) {
767 			/* Select filter policy to accept all advertising */
768 			return 0x00;
769 		}
770 
771 		if (hci_find_irk_by_addr(hdev, &params->addr,
772 					 params->addr_type)) {
773 			/* White list can not be used with RPAs */
774 			return 0x00;
775 		}
776 
777 		white_list_entries++;
778 		add_to_white_list(req, params);
779 	}
780 
781 	/* Select filter policy to use white list */
782 	return 0x01;
783 }
784 
scan_use_rpa(struct hci_dev * hdev)785 static bool scan_use_rpa(struct hci_dev *hdev)
786 {
787 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
788 }
789 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)790 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
791 			       u16 window, u8 own_addr_type, u8 filter_policy)
792 {
793 	struct hci_dev *hdev = req->hdev;
794 
795 	/* Use ext scanning if set ext scan param and ext scan enable is
796 	 * supported
797 	 */
798 	if (use_ext_scan(hdev)) {
799 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 		struct hci_cp_le_scan_phy_params *phy_params;
802 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
803 		u32 plen;
804 
805 		ext_param_cp = (void *)data;
806 		phy_params = (void *)ext_param_cp->data;
807 
808 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 		ext_param_cp->own_addr_type = own_addr_type;
810 		ext_param_cp->filter_policy = filter_policy;
811 
812 		plen = sizeof(*ext_param_cp);
813 
814 		if (scan_1m(hdev) || scan_2m(hdev)) {
815 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
816 
817 			memset(phy_params, 0, sizeof(*phy_params));
818 			phy_params->type = type;
819 			phy_params->interval = cpu_to_le16(interval);
820 			phy_params->window = cpu_to_le16(window);
821 
822 			plen += sizeof(*phy_params);
823 			phy_params++;
824 		}
825 
826 		if (scan_coded(hdev)) {
827 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
828 
829 			memset(phy_params, 0, sizeof(*phy_params));
830 			phy_params->type = type;
831 			phy_params->interval = cpu_to_le16(interval);
832 			phy_params->window = cpu_to_le16(window);
833 
834 			plen += sizeof(*phy_params);
835 			phy_params++;
836 		}
837 
838 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
839 			    plen, ext_param_cp);
840 
841 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 		ext_enable_cp.enable = LE_SCAN_ENABLE;
843 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
844 
845 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 			    sizeof(ext_enable_cp), &ext_enable_cp);
847 	} else {
848 		struct hci_cp_le_set_scan_param param_cp;
849 		struct hci_cp_le_set_scan_enable enable_cp;
850 
851 		memset(&param_cp, 0, sizeof(param_cp));
852 		param_cp.type = type;
853 		param_cp.interval = cpu_to_le16(interval);
854 		param_cp.window = cpu_to_le16(window);
855 		param_cp.own_address_type = own_addr_type;
856 		param_cp.filter_policy = filter_policy;
857 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
858 			    &param_cp);
859 
860 		memset(&enable_cp, 0, sizeof(enable_cp));
861 		enable_cp.enable = LE_SCAN_ENABLE;
862 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
863 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 			    &enable_cp);
865 	}
866 }
867 
hci_req_add_le_passive_scan(struct hci_request * req)868 void hci_req_add_le_passive_scan(struct hci_request *req)
869 {
870 	struct hci_dev *hdev = req->hdev;
871 	u8 own_addr_type;
872 	u8 filter_policy;
873 
874 	/* Set require_privacy to false since no SCAN_REQ are send
875 	 * during passive scanning. Not using an non-resolvable address
876 	 * here is important so that peer devices using direct
877 	 * advertising with our address will be correctly reported
878 	 * by the controller.
879 	 */
880 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
881 				      &own_addr_type))
882 		return;
883 
884 	/* Adding or removing entries from the white list must
885 	 * happen before enabling scanning. The controller does
886 	 * not allow white list modification while scanning.
887 	 */
888 	filter_policy = update_white_list(req);
889 
890 	/* When the controller is using random resolvable addresses and
891 	 * with that having LE privacy enabled, then controllers with
892 	 * Extended Scanner Filter Policies support can now enable support
893 	 * for handling directed advertising.
894 	 *
895 	 * So instead of using filter polices 0x00 (no whitelist)
896 	 * and 0x01 (whitelist enabled) use the new filter policies
897 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
898 	 */
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
900 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
901 		filter_policy |= 0x02;
902 
903 	hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
904 			   hdev->le_scan_window, own_addr_type, filter_policy);
905 }
906 
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)907 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
908 {
909 	struct adv_info *adv_instance;
910 
911 	/* Ignore instance 0 */
912 	if (instance == 0x00)
913 		return 0;
914 
915 	adv_instance = hci_find_adv_instance(hdev, instance);
916 	if (!adv_instance)
917 		return 0;
918 
919 	/* TODO: Take into account the "appearance" and "local-name" flags here.
920 	 * These are currently being ignored as they are not supported.
921 	 */
922 	return adv_instance->scan_rsp_len;
923 }
924 
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)925 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
926 {
927 	u8 instance = hdev->cur_adv_instance;
928 	struct adv_info *adv_instance;
929 
930 	/* Ignore instance 0 */
931 	if (instance == 0x00)
932 		return 0;
933 
934 	adv_instance = hci_find_adv_instance(hdev, instance);
935 	if (!adv_instance)
936 		return 0;
937 
938 	/* TODO: Take into account the "appearance" and "local-name" flags here.
939 	 * These are currently being ignored as they are not supported.
940 	 */
941 	return adv_instance->scan_rsp_len;
942 }
943 
__hci_req_disable_advertising(struct hci_request * req)944 void __hci_req_disable_advertising(struct hci_request *req)
945 {
946 	if (ext_adv_capable(req->hdev)) {
947 		struct hci_cp_le_set_ext_adv_enable cp;
948 
949 		cp.enable = 0x00;
950 		/* Disable all sets since we only support one set at the moment */
951 		cp.num_of_sets = 0x00;
952 
953 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
954 	} else {
955 		u8 enable = 0x00;
956 
957 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
958 	}
959 }
960 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)961 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
962 {
963 	u32 flags;
964 	struct adv_info *adv_instance;
965 
966 	if (instance == 0x00) {
967 		/* Instance 0 always manages the "Tx Power" and "Flags"
968 		 * fields
969 		 */
970 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
971 
972 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
973 		 * corresponds to the "connectable" instance flag.
974 		 */
975 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
976 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
977 
978 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
980 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
981 			flags |= MGMT_ADV_FLAG_DISCOV;
982 
983 		return flags;
984 	}
985 
986 	adv_instance = hci_find_adv_instance(hdev, instance);
987 
988 	/* Return 0 when we got an invalid instance identifier. */
989 	if (!adv_instance)
990 		return 0;
991 
992 	return adv_instance->flags;
993 }
994 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)995 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
996 {
997 	/* If privacy is not enabled don't use RPA */
998 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
999 		return false;
1000 
1001 	/* If basic privacy mode is enabled use RPA */
1002 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1003 		return true;
1004 
1005 	/* If limited privacy mode is enabled don't use RPA if we're
1006 	 * both discoverable and bondable.
1007 	 */
1008 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1009 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1010 		return false;
1011 
1012 	/* We're neither bondable nor discoverable in the limited
1013 	 * privacy mode, therefore use RPA.
1014 	 */
1015 	return true;
1016 }
1017 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)1018 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1019 {
1020 	/* If there is no connection we are OK to advertise. */
1021 	if (hci_conn_num(hdev, LE_LINK) == 0)
1022 		return true;
1023 
1024 	/* Check le_states if there is any connection in slave role. */
1025 	if (hdev->conn_hash.le_num_slave > 0) {
1026 		/* Slave connection state and non connectable mode bit 20. */
1027 		if (!connectable && !(hdev->le_states[2] & 0x10))
1028 			return false;
1029 
1030 		/* Slave connection state and connectable mode bit 38
1031 		 * and scannable bit 21.
1032 		 */
1033 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1034 				    !(hdev->le_states[2] & 0x20)))
1035 			return false;
1036 	}
1037 
1038 	/* Check le_states if there is any connection in master role. */
1039 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1040 		/* Master connection state and non connectable mode bit 18. */
1041 		if (!connectable && !(hdev->le_states[2] & 0x02))
1042 			return false;
1043 
1044 		/* Master connection state and connectable mode bit 35 and
1045 		 * scannable 19.
1046 		 */
1047 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1048 				    !(hdev->le_states[2] & 0x08)))
1049 			return false;
1050 	}
1051 
1052 	return true;
1053 }
1054 
__hci_req_enable_advertising(struct hci_request * req)1055 void __hci_req_enable_advertising(struct hci_request *req)
1056 {
1057 	struct hci_dev *hdev = req->hdev;
1058 	struct hci_cp_le_set_adv_param cp;
1059 	u8 own_addr_type, enable = 0x01;
1060 	bool connectable;
1061 	u16 adv_min_interval, adv_max_interval;
1062 	u32 flags;
1063 
1064 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1065 
1066 	/* If the "connectable" instance flag was not set, then choose between
1067 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1068 	 */
1069 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1070 		      mgmt_get_connectable(hdev);
1071 
1072 	if (!is_advertising_allowed(hdev, connectable))
1073 		return;
1074 
1075 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1076 		__hci_req_disable_advertising(req);
1077 
1078 	/* Clear the HCI_LE_ADV bit temporarily so that the
1079 	 * hci_update_random_address knows that it's safe to go ahead
1080 	 * and write a new random address. The flag will be set back on
1081 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1082 	 */
1083 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1084 
1085 	/* Set require_privacy to true only when non-connectable
1086 	 * advertising is used. In that case it is fine to use a
1087 	 * non-resolvable private address.
1088 	 */
1089 	if (hci_update_random_address(req, !connectable,
1090 				      adv_use_rpa(hdev, flags),
1091 				      &own_addr_type) < 0)
1092 		return;
1093 
1094 	memset(&cp, 0, sizeof(cp));
1095 
1096 	if (connectable) {
1097 		cp.type = LE_ADV_IND;
1098 
1099 		adv_min_interval = hdev->le_adv_min_interval;
1100 		adv_max_interval = hdev->le_adv_max_interval;
1101 	} else {
1102 		if (get_cur_adv_instance_scan_rsp_len(hdev))
1103 			cp.type = LE_ADV_SCAN_IND;
1104 		else
1105 			cp.type = LE_ADV_NONCONN_IND;
1106 
1107 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1108 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1109 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1110 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1111 		} else {
1112 			adv_min_interval = hdev->le_adv_min_interval;
1113 			adv_max_interval = hdev->le_adv_max_interval;
1114 		}
1115 	}
1116 
1117 	cp.min_interval = cpu_to_le16(adv_min_interval);
1118 	cp.max_interval = cpu_to_le16(adv_max_interval);
1119 	cp.own_address_type = own_addr_type;
1120 	cp.channel_map = hdev->le_adv_channel_map;
1121 
1122 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1123 
1124 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1125 }
1126 
append_local_name(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1127 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1128 {
1129 	size_t short_len;
1130 	size_t complete_len;
1131 
1132 	/* no space left for name (+ NULL + type + len) */
1133 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1134 		return ad_len;
1135 
1136 	/* use complete name if present and fits */
1137 	complete_len = strlen(hdev->dev_name);
1138 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1139 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1140 				       hdev->dev_name, complete_len + 1);
1141 
1142 	/* use short name if present */
1143 	short_len = strlen(hdev->short_name);
1144 	if (short_len)
1145 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1146 				       hdev->short_name, short_len + 1);
1147 
1148 	/* use shortened full name if present, we already know that name
1149 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1150 	 */
1151 	if (complete_len) {
1152 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1153 
1154 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1155 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1156 
1157 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1158 				       sizeof(name));
1159 	}
1160 
1161 	return ad_len;
1162 }
1163 
append_appearance(struct hci_dev * hdev,u8 * ptr,u8 ad_len)1164 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1165 {
1166 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1167 }
1168 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)1169 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1170 {
1171 	u8 scan_rsp_len = 0;
1172 
1173 	if (hdev->appearance) {
1174 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1175 	}
1176 
1177 	return append_local_name(hdev, ptr, scan_rsp_len);
1178 }
1179 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1180 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1181 					u8 *ptr)
1182 {
1183 	struct adv_info *adv_instance;
1184 	u32 instance_flags;
1185 	u8 scan_rsp_len = 0;
1186 
1187 	adv_instance = hci_find_adv_instance(hdev, instance);
1188 	if (!adv_instance)
1189 		return 0;
1190 
1191 	instance_flags = adv_instance->flags;
1192 
1193 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1194 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1195 	}
1196 
1197 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1198 	       adv_instance->scan_rsp_len);
1199 
1200 	scan_rsp_len += adv_instance->scan_rsp_len;
1201 
1202 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1203 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1204 
1205 	return scan_rsp_len;
1206 }
1207 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)1208 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1209 {
1210 	struct hci_dev *hdev = req->hdev;
1211 	u8 len;
1212 
1213 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1214 		return;
1215 
1216 	if (ext_adv_capable(hdev)) {
1217 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1218 
1219 		memset(&cp, 0, sizeof(cp));
1220 
1221 		if (instance)
1222 			len = create_instance_scan_rsp_data(hdev, instance,
1223 							    cp.data);
1224 		else
1225 			len = create_default_scan_rsp_data(hdev, cp.data);
1226 
1227 		if (hdev->scan_rsp_data_len == len &&
1228 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1229 			return;
1230 
1231 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1232 		hdev->scan_rsp_data_len = len;
1233 
1234 		cp.handle = 0;
1235 		cp.length = len;
1236 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1237 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1238 
1239 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1240 			    &cp);
1241 	} else {
1242 		struct hci_cp_le_set_scan_rsp_data cp;
1243 
1244 		memset(&cp, 0, sizeof(cp));
1245 
1246 		if (instance)
1247 			len = create_instance_scan_rsp_data(hdev, instance,
1248 							    cp.data);
1249 		else
1250 			len = create_default_scan_rsp_data(hdev, cp.data);
1251 
1252 		if (hdev->scan_rsp_data_len == len &&
1253 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1254 			return;
1255 
1256 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1257 		hdev->scan_rsp_data_len = len;
1258 
1259 		cp.length = len;
1260 
1261 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1262 	}
1263 }
1264 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1265 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1266 {
1267 	struct adv_info *adv_instance = NULL;
1268 	u8 ad_len = 0, flags = 0;
1269 	u32 instance_flags;
1270 
1271 	/* Return 0 when the current instance identifier is invalid. */
1272 	if (instance) {
1273 		adv_instance = hci_find_adv_instance(hdev, instance);
1274 		if (!adv_instance)
1275 			return 0;
1276 	}
1277 
1278 	instance_flags = get_adv_instance_flags(hdev, instance);
1279 
1280 	/* If instance already has the flags set skip adding it once
1281 	 * again.
1282 	 */
1283 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1284 					 adv_instance->adv_data_len, EIR_FLAGS,
1285 					 NULL))
1286 		goto skip_flags;
1287 
1288 	/* The Add Advertising command allows userspace to set both the general
1289 	 * and limited discoverable flags.
1290 	 */
1291 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1292 		flags |= LE_AD_GENERAL;
1293 
1294 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1295 		flags |= LE_AD_LIMITED;
1296 
1297 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1298 		flags |= LE_AD_NO_BREDR;
1299 
1300 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1301 		/* If a discovery flag wasn't provided, simply use the global
1302 		 * settings.
1303 		 */
1304 		if (!flags)
1305 			flags |= mgmt_get_adv_discov_flags(hdev);
1306 
1307 		/* If flags would still be empty, then there is no need to
1308 		 * include the "Flags" AD field".
1309 		 */
1310 		if (flags) {
1311 			ptr[0] = 0x02;
1312 			ptr[1] = EIR_FLAGS;
1313 			ptr[2] = flags;
1314 
1315 			ad_len += 3;
1316 			ptr += 3;
1317 		}
1318 	}
1319 
1320 skip_flags:
1321 	if (adv_instance) {
1322 		memcpy(ptr, adv_instance->adv_data,
1323 		       adv_instance->adv_data_len);
1324 		ad_len += adv_instance->adv_data_len;
1325 		ptr += adv_instance->adv_data_len;
1326 	}
1327 
1328 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1329 		s8 adv_tx_power;
1330 
1331 		if (ext_adv_capable(hdev)) {
1332 			if (adv_instance)
1333 				adv_tx_power = adv_instance->tx_power;
1334 			else
1335 				adv_tx_power = hdev->adv_tx_power;
1336 		} else {
1337 			adv_tx_power = hdev->adv_tx_power;
1338 		}
1339 
1340 		/* Provide Tx Power only if we can provide a valid value for it */
1341 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1342 			ptr[0] = 0x02;
1343 			ptr[1] = EIR_TX_POWER;
1344 			ptr[2] = (u8)adv_tx_power;
1345 
1346 			ad_len += 3;
1347 			ptr += 3;
1348 		}
1349 	}
1350 
1351 	return ad_len;
1352 }
1353 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1354 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1355 {
1356 	struct hci_dev *hdev = req->hdev;
1357 	u8 len;
1358 
1359 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1360 		return;
1361 
1362 	if (ext_adv_capable(hdev)) {
1363 		struct hci_cp_le_set_ext_adv_data cp;
1364 
1365 		memset(&cp, 0, sizeof(cp));
1366 
1367 		len = create_instance_adv_data(hdev, instance, cp.data);
1368 
1369 		/* There's nothing to do if the data hasn't changed */
1370 		if (hdev->adv_data_len == len &&
1371 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1372 			return;
1373 
1374 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1375 		hdev->adv_data_len = len;
1376 
1377 		cp.length = len;
1378 		cp.handle = 0;
1379 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1380 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1381 
1382 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1383 	} else {
1384 		struct hci_cp_le_set_adv_data cp;
1385 
1386 		memset(&cp, 0, sizeof(cp));
1387 
1388 		len = create_instance_adv_data(hdev, instance, cp.data);
1389 
1390 		/* There's nothing to do if the data hasn't changed */
1391 		if (hdev->adv_data_len == len &&
1392 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1393 			return;
1394 
1395 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1396 		hdev->adv_data_len = len;
1397 
1398 		cp.length = len;
1399 
1400 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1401 	}
1402 }
1403 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1404 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1405 {
1406 	struct hci_request req;
1407 
1408 	hci_req_init(&req, hdev);
1409 	__hci_req_update_adv_data(&req, instance);
1410 
1411 	return hci_req_run(&req, NULL);
1412 }
1413 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1414 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1415 {
1416 	BT_DBG("%s status %u", hdev->name, status);
1417 }
1418 
hci_req_reenable_advertising(struct hci_dev * hdev)1419 void hci_req_reenable_advertising(struct hci_dev *hdev)
1420 {
1421 	struct hci_request req;
1422 
1423 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1424 	    list_empty(&hdev->adv_instances))
1425 		return;
1426 
1427 	hci_req_init(&req, hdev);
1428 
1429 	if (hdev->cur_adv_instance) {
1430 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1431 						true);
1432 	} else {
1433 		if (ext_adv_capable(hdev)) {
1434 			__hci_req_start_ext_adv(&req, 0x00);
1435 		} else {
1436 			__hci_req_update_adv_data(&req, 0x00);
1437 			__hci_req_update_scan_rsp_data(&req, 0x00);
1438 			__hci_req_enable_advertising(&req);
1439 		}
1440 	}
1441 
1442 	hci_req_run(&req, adv_enable_complete);
1443 }
1444 
adv_timeout_expire(struct work_struct * work)1445 static void adv_timeout_expire(struct work_struct *work)
1446 {
1447 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1448 					    adv_instance_expire.work);
1449 
1450 	struct hci_request req;
1451 	u8 instance;
1452 
1453 	BT_DBG("%s", hdev->name);
1454 
1455 	hci_dev_lock(hdev);
1456 
1457 	hdev->adv_instance_timeout = 0;
1458 
1459 	instance = hdev->cur_adv_instance;
1460 	if (instance == 0x00)
1461 		goto unlock;
1462 
1463 	hci_req_init(&req, hdev);
1464 
1465 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1466 
1467 	if (list_empty(&hdev->adv_instances))
1468 		__hci_req_disable_advertising(&req);
1469 
1470 	hci_req_run(&req, NULL);
1471 
1472 unlock:
1473 	hci_dev_unlock(hdev);
1474 }
1475 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1476 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1477 			   bool use_rpa, struct adv_info *adv_instance,
1478 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1479 {
1480 	int err;
1481 
1482 	bacpy(rand_addr, BDADDR_ANY);
1483 
1484 	/* If privacy is enabled use a resolvable private address. If
1485 	 * current RPA has expired then generate a new one.
1486 	 */
1487 	if (use_rpa) {
1488 		int to;
1489 
1490 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1491 
1492 		if (adv_instance) {
1493 			if (!adv_instance->rpa_expired &&
1494 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1495 				return 0;
1496 
1497 			adv_instance->rpa_expired = false;
1498 		} else {
1499 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1500 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1501 				return 0;
1502 		}
1503 
1504 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1505 		if (err < 0) {
1506 			BT_ERR("%s failed to generate new RPA", hdev->name);
1507 			return err;
1508 		}
1509 
1510 		bacpy(rand_addr, &hdev->rpa);
1511 
1512 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1513 		if (adv_instance)
1514 			queue_delayed_work(hdev->workqueue,
1515 					   &adv_instance->rpa_expired_cb, to);
1516 		else
1517 			queue_delayed_work(hdev->workqueue,
1518 					   &hdev->rpa_expired, to);
1519 
1520 		return 0;
1521 	}
1522 
1523 	/* In case of required privacy without resolvable private address,
1524 	 * use an non-resolvable private address. This is useful for
1525 	 * non-connectable advertising.
1526 	 */
1527 	if (require_privacy) {
1528 		bdaddr_t nrpa;
1529 
1530 		while (true) {
1531 			/* The non-resolvable private address is generated
1532 			 * from random six bytes with the two most significant
1533 			 * bits cleared.
1534 			 */
1535 			get_random_bytes(&nrpa, 6);
1536 			nrpa.b[5] &= 0x3f;
1537 
1538 			/* The non-resolvable private address shall not be
1539 			 * equal to the public address.
1540 			 */
1541 			if (bacmp(&hdev->bdaddr, &nrpa))
1542 				break;
1543 		}
1544 
1545 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1546 		bacpy(rand_addr, &nrpa);
1547 
1548 		return 0;
1549 	}
1550 
1551 	/* No privacy so use a public address. */
1552 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1553 
1554 	return 0;
1555 }
1556 
__hci_req_clear_ext_adv_sets(struct hci_request * req)1557 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1558 {
1559 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1560 }
1561 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1562 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1563 {
1564 	struct hci_cp_le_set_ext_adv_params cp;
1565 	struct hci_dev *hdev = req->hdev;
1566 	bool connectable;
1567 	u32 flags;
1568 	bdaddr_t random_addr;
1569 	u8 own_addr_type;
1570 	int err;
1571 	struct adv_info *adv_instance;
1572 	bool secondary_adv;
1573 	/* In ext adv set param interval is 3 octets */
1574 	const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1575 
1576 	if (instance > 0) {
1577 		adv_instance = hci_find_adv_instance(hdev, instance);
1578 		if (!adv_instance)
1579 			return -EINVAL;
1580 	} else {
1581 		adv_instance = NULL;
1582 	}
1583 
1584 	flags = get_adv_instance_flags(hdev, instance);
1585 
1586 	/* If the "connectable" instance flag was not set, then choose between
1587 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1588 	 */
1589 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1590 		      mgmt_get_connectable(hdev);
1591 
1592 	if (!is_advertising_allowed(hdev, connectable))
1593 		return -EPERM;
1594 
1595 	/* Set require_privacy to true only when non-connectable
1596 	 * advertising is used. In that case it is fine to use a
1597 	 * non-resolvable private address.
1598 	 */
1599 	err = hci_get_random_address(hdev, !connectable,
1600 				     adv_use_rpa(hdev, flags), adv_instance,
1601 				     &own_addr_type, &random_addr);
1602 	if (err < 0)
1603 		return err;
1604 
1605 	memset(&cp, 0, sizeof(cp));
1606 
1607 	memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1608 	memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1609 
1610 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1611 
1612 	if (connectable) {
1613 		if (secondary_adv)
1614 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1615 		else
1616 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1617 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1618 		if (secondary_adv)
1619 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1620 		else
1621 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1622 	} else {
1623 		if (secondary_adv)
1624 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1625 		else
1626 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1627 	}
1628 
1629 	cp.own_addr_type = own_addr_type;
1630 	cp.channel_map = hdev->le_adv_channel_map;
1631 	cp.tx_power = 127;
1632 	cp.handle = instance;
1633 
1634 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1635 		cp.primary_phy = HCI_ADV_PHY_1M;
1636 		cp.secondary_phy = HCI_ADV_PHY_2M;
1637 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1638 		cp.primary_phy = HCI_ADV_PHY_CODED;
1639 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1640 	} else {
1641 		/* In all other cases use 1M */
1642 		cp.primary_phy = HCI_ADV_PHY_1M;
1643 		cp.secondary_phy = HCI_ADV_PHY_1M;
1644 	}
1645 
1646 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1647 
1648 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1649 	    bacmp(&random_addr, BDADDR_ANY)) {
1650 		struct hci_cp_le_set_adv_set_rand_addr cp;
1651 
1652 		/* Check if random address need to be updated */
1653 		if (adv_instance) {
1654 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1655 				return 0;
1656 		} else {
1657 			if (!bacmp(&random_addr, &hdev->random_addr))
1658 				return 0;
1659 		}
1660 
1661 		memset(&cp, 0, sizeof(cp));
1662 
1663 		cp.handle = 0;
1664 		bacpy(&cp.bdaddr, &random_addr);
1665 
1666 		hci_req_add(req,
1667 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1668 			    sizeof(cp), &cp);
1669 	}
1670 
1671 	return 0;
1672 }
1673 
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)1674 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1675 {
1676 	struct hci_dev *hdev = req->hdev;
1677 	struct hci_cp_le_set_ext_adv_enable *cp;
1678 	struct hci_cp_ext_adv_set *adv_set;
1679 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1680 	struct adv_info *adv_instance;
1681 
1682 	if (instance > 0) {
1683 		adv_instance = hci_find_adv_instance(hdev, instance);
1684 		if (!adv_instance)
1685 			return -EINVAL;
1686 	} else {
1687 		adv_instance = NULL;
1688 	}
1689 
1690 	cp = (void *) data;
1691 	adv_set = (void *) cp->data;
1692 
1693 	memset(cp, 0, sizeof(*cp));
1694 
1695 	cp->enable = 0x01;
1696 	cp->num_of_sets = 0x01;
1697 
1698 	memset(adv_set, 0, sizeof(*adv_set));
1699 
1700 	adv_set->handle = instance;
1701 
1702 	/* Set duration per instance since controller is responsible for
1703 	 * scheduling it.
1704 	 */
1705 	if (adv_instance && adv_instance->duration) {
1706 		u16 duration = adv_instance->duration * MSEC_PER_SEC;
1707 
1708 		/* Time = N * 10 ms */
1709 		adv_set->duration = cpu_to_le16(duration / 10);
1710 	}
1711 
1712 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1713 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1714 		    data);
1715 
1716 	return 0;
1717 }
1718 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)1719 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1720 {
1721 	struct hci_dev *hdev = req->hdev;
1722 	int err;
1723 
1724 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1725 		__hci_req_disable_advertising(req);
1726 
1727 	err = __hci_req_setup_ext_adv_instance(req, instance);
1728 	if (err < 0)
1729 		return err;
1730 
1731 	__hci_req_update_scan_rsp_data(req, instance);
1732 	__hci_req_enable_ext_advertising(req, instance);
1733 
1734 	return 0;
1735 }
1736 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1737 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1738 				    bool force)
1739 {
1740 	struct hci_dev *hdev = req->hdev;
1741 	struct adv_info *adv_instance = NULL;
1742 	u16 timeout;
1743 
1744 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1745 	    list_empty(&hdev->adv_instances))
1746 		return -EPERM;
1747 
1748 	if (hdev->adv_instance_timeout)
1749 		return -EBUSY;
1750 
1751 	adv_instance = hci_find_adv_instance(hdev, instance);
1752 	if (!adv_instance)
1753 		return -ENOENT;
1754 
1755 	/* A zero timeout means unlimited advertising. As long as there is
1756 	 * only one instance, duration should be ignored. We still set a timeout
1757 	 * in case further instances are being added later on.
1758 	 *
1759 	 * If the remaining lifetime of the instance is more than the duration
1760 	 * then the timeout corresponds to the duration, otherwise it will be
1761 	 * reduced to the remaining instance lifetime.
1762 	 */
1763 	if (adv_instance->timeout == 0 ||
1764 	    adv_instance->duration <= adv_instance->remaining_time)
1765 		timeout = adv_instance->duration;
1766 	else
1767 		timeout = adv_instance->remaining_time;
1768 
1769 	/* The remaining time is being reduced unless the instance is being
1770 	 * advertised without time limit.
1771 	 */
1772 	if (adv_instance->timeout)
1773 		adv_instance->remaining_time =
1774 				adv_instance->remaining_time - timeout;
1775 
1776 	/* Only use work for scheduling instances with legacy advertising */
1777 	if (!ext_adv_capable(hdev)) {
1778 		hdev->adv_instance_timeout = timeout;
1779 		queue_delayed_work(hdev->req_workqueue,
1780 			   &hdev->adv_instance_expire,
1781 			   msecs_to_jiffies(timeout * 1000));
1782 	}
1783 
1784 	/* If we're just re-scheduling the same instance again then do not
1785 	 * execute any HCI commands. This happens when a single instance is
1786 	 * being advertised.
1787 	 */
1788 	if (!force && hdev->cur_adv_instance == instance &&
1789 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1790 		return 0;
1791 
1792 	hdev->cur_adv_instance = instance;
1793 	if (ext_adv_capable(hdev)) {
1794 		__hci_req_start_ext_adv(req, instance);
1795 	} else {
1796 		__hci_req_update_adv_data(req, instance);
1797 		__hci_req_update_scan_rsp_data(req, instance);
1798 		__hci_req_enable_advertising(req);
1799 	}
1800 
1801 	return 0;
1802 }
1803 
cancel_adv_timeout(struct hci_dev * hdev)1804 static void cancel_adv_timeout(struct hci_dev *hdev)
1805 {
1806 	if (hdev->adv_instance_timeout) {
1807 		hdev->adv_instance_timeout = 0;
1808 		cancel_delayed_work(&hdev->adv_instance_expire);
1809 	}
1810 }
1811 
1812 /* For a single instance:
1813  * - force == true: The instance will be removed even when its remaining
1814  *   lifetime is not zero.
1815  * - force == false: the instance will be deactivated but kept stored unless
1816  *   the remaining lifetime is zero.
1817  *
1818  * For instance == 0x00:
1819  * - force == true: All instances will be removed regardless of their timeout
1820  *   setting.
1821  * - force == false: Only instances that have a timeout will be removed.
1822  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)1823 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1824 				struct hci_request *req, u8 instance,
1825 				bool force)
1826 {
1827 	struct adv_info *adv_instance, *n, *next_instance = NULL;
1828 	int err;
1829 	u8 rem_inst;
1830 
1831 	/* Cancel any timeout concerning the removed instance(s). */
1832 	if (!instance || hdev->cur_adv_instance == instance)
1833 		cancel_adv_timeout(hdev);
1834 
1835 	/* Get the next instance to advertise BEFORE we remove
1836 	 * the current one. This can be the same instance again
1837 	 * if there is only one instance.
1838 	 */
1839 	if (instance && hdev->cur_adv_instance == instance)
1840 		next_instance = hci_get_next_instance(hdev, instance);
1841 
1842 	if (instance == 0x00) {
1843 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1844 					 list) {
1845 			if (!(force || adv_instance->timeout))
1846 				continue;
1847 
1848 			rem_inst = adv_instance->instance;
1849 			err = hci_remove_adv_instance(hdev, rem_inst);
1850 			if (!err)
1851 				mgmt_advertising_removed(sk, hdev, rem_inst);
1852 		}
1853 	} else {
1854 		adv_instance = hci_find_adv_instance(hdev, instance);
1855 
1856 		if (force || (adv_instance && adv_instance->timeout &&
1857 			      !adv_instance->remaining_time)) {
1858 			/* Don't advertise a removed instance. */
1859 			if (next_instance &&
1860 			    next_instance->instance == instance)
1861 				next_instance = NULL;
1862 
1863 			err = hci_remove_adv_instance(hdev, instance);
1864 			if (!err)
1865 				mgmt_advertising_removed(sk, hdev, instance);
1866 		}
1867 	}
1868 
1869 	if (!req || !hdev_is_powered(hdev) ||
1870 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1871 		return;
1872 
1873 	if (next_instance)
1874 		__hci_req_schedule_adv_instance(req, next_instance->instance,
1875 						false);
1876 }
1877 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)1878 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1879 {
1880 	struct hci_dev *hdev = req->hdev;
1881 
1882 	/* If we're advertising or initiating an LE connection we can't
1883 	 * go ahead and change the random address at this time. This is
1884 	 * because the eventual initiator address used for the
1885 	 * subsequently created connection will be undefined (some
1886 	 * controllers use the new address and others the one we had
1887 	 * when the operation started).
1888 	 *
1889 	 * In this kind of scenario skip the update and let the random
1890 	 * address be updated at the next cycle.
1891 	 */
1892 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1893 	    hci_lookup_le_connect(hdev)) {
1894 		BT_DBG("Deferring random address update");
1895 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1896 		return;
1897 	}
1898 
1899 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1900 }
1901 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)1902 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1903 			      bool use_rpa, u8 *own_addr_type)
1904 {
1905 	struct hci_dev *hdev = req->hdev;
1906 	int err;
1907 
1908 	/* If privacy is enabled use a resolvable private address. If
1909 	 * current RPA has expired or there is something else than
1910 	 * the current RPA in use, then generate a new one.
1911 	 */
1912 	if (use_rpa) {
1913 		int to;
1914 
1915 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1916 
1917 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1918 		    !bacmp(&hdev->random_addr, &hdev->rpa))
1919 			return 0;
1920 
1921 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1922 		if (err < 0) {
1923 			bt_dev_err(hdev, "failed to generate new RPA");
1924 			return err;
1925 		}
1926 
1927 		set_random_addr(req, &hdev->rpa);
1928 
1929 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1930 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1931 
1932 		return 0;
1933 	}
1934 
1935 	/* In case of required privacy without resolvable private address,
1936 	 * use an non-resolvable private address. This is useful for active
1937 	 * scanning and non-connectable advertising.
1938 	 */
1939 	if (require_privacy) {
1940 		bdaddr_t nrpa;
1941 
1942 		while (true) {
1943 			/* The non-resolvable private address is generated
1944 			 * from random six bytes with the two most significant
1945 			 * bits cleared.
1946 			 */
1947 			get_random_bytes(&nrpa, 6);
1948 			nrpa.b[5] &= 0x3f;
1949 
1950 			/* The non-resolvable private address shall not be
1951 			 * equal to the public address.
1952 			 */
1953 			if (bacmp(&hdev->bdaddr, &nrpa))
1954 				break;
1955 		}
1956 
1957 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1958 		set_random_addr(req, &nrpa);
1959 		return 0;
1960 	}
1961 
1962 	/* If forcing static address is in use or there is no public
1963 	 * address use the static address as random address (but skip
1964 	 * the HCI command if the current random address is already the
1965 	 * static one.
1966 	 *
1967 	 * In case BR/EDR has been disabled on a dual-mode controller
1968 	 * and a static address has been configured, then use that
1969 	 * address instead of the public BR/EDR address.
1970 	 */
1971 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1972 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1973 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1974 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1975 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1976 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1977 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1978 				    &hdev->static_addr);
1979 		return 0;
1980 	}
1981 
1982 	/* Neither privacy nor static address is being used so use a
1983 	 * public address.
1984 	 */
1985 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1986 
1987 	return 0;
1988 }
1989 
disconnected_whitelist_entries(struct hci_dev * hdev)1990 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1991 {
1992 	struct bdaddr_list *b;
1993 
1994 	list_for_each_entry(b, &hdev->whitelist, list) {
1995 		struct hci_conn *conn;
1996 
1997 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1998 		if (!conn)
1999 			return true;
2000 
2001 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2002 			return true;
2003 	}
2004 
2005 	return false;
2006 }
2007 
__hci_req_update_scan(struct hci_request * req)2008 void __hci_req_update_scan(struct hci_request *req)
2009 {
2010 	struct hci_dev *hdev = req->hdev;
2011 	u8 scan;
2012 
2013 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2014 		return;
2015 
2016 	if (!hdev_is_powered(hdev))
2017 		return;
2018 
2019 	if (mgmt_powering_down(hdev))
2020 		return;
2021 
2022 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2023 	    disconnected_whitelist_entries(hdev))
2024 		scan = SCAN_PAGE;
2025 	else
2026 		scan = SCAN_DISABLED;
2027 
2028 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2029 		scan |= SCAN_INQUIRY;
2030 
2031 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2032 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2033 		return;
2034 
2035 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2036 }
2037 
update_scan(struct hci_request * req,unsigned long opt)2038 static int update_scan(struct hci_request *req, unsigned long opt)
2039 {
2040 	hci_dev_lock(req->hdev);
2041 	__hci_req_update_scan(req);
2042 	hci_dev_unlock(req->hdev);
2043 	return 0;
2044 }
2045 
scan_update_work(struct work_struct * work)2046 static void scan_update_work(struct work_struct *work)
2047 {
2048 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2049 
2050 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2051 }
2052 
connectable_update(struct hci_request * req,unsigned long opt)2053 static int connectable_update(struct hci_request *req, unsigned long opt)
2054 {
2055 	struct hci_dev *hdev = req->hdev;
2056 
2057 	hci_dev_lock(hdev);
2058 
2059 	__hci_req_update_scan(req);
2060 
2061 	/* If BR/EDR is not enabled and we disable advertising as a
2062 	 * by-product of disabling connectable, we need to update the
2063 	 * advertising flags.
2064 	 */
2065 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2066 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2067 
2068 	/* Update the advertising parameters if necessary */
2069 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2070 	    !list_empty(&hdev->adv_instances)) {
2071 		if (ext_adv_capable(hdev))
2072 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2073 		else
2074 			__hci_req_enable_advertising(req);
2075 	}
2076 
2077 	__hci_update_background_scan(req);
2078 
2079 	hci_dev_unlock(hdev);
2080 
2081 	return 0;
2082 }
2083 
connectable_update_work(struct work_struct * work)2084 static void connectable_update_work(struct work_struct *work)
2085 {
2086 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2087 					    connectable_update);
2088 	u8 status;
2089 
2090 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2091 	mgmt_set_connectable_complete(hdev, status);
2092 }
2093 
get_service_classes(struct hci_dev * hdev)2094 static u8 get_service_classes(struct hci_dev *hdev)
2095 {
2096 	struct bt_uuid *uuid;
2097 	u8 val = 0;
2098 
2099 	list_for_each_entry(uuid, &hdev->uuids, list)
2100 		val |= uuid->svc_hint;
2101 
2102 	return val;
2103 }
2104 
__hci_req_update_class(struct hci_request * req)2105 void __hci_req_update_class(struct hci_request *req)
2106 {
2107 	struct hci_dev *hdev = req->hdev;
2108 	u8 cod[3];
2109 
2110 	BT_DBG("%s", hdev->name);
2111 
2112 	if (!hdev_is_powered(hdev))
2113 		return;
2114 
2115 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2116 		return;
2117 
2118 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2119 		return;
2120 
2121 	cod[0] = hdev->minor_class;
2122 	cod[1] = hdev->major_class;
2123 	cod[2] = get_service_classes(hdev);
2124 
2125 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2126 		cod[1] |= 0x20;
2127 
2128 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2129 		return;
2130 
2131 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2132 }
2133 
write_iac(struct hci_request * req)2134 static void write_iac(struct hci_request *req)
2135 {
2136 	struct hci_dev *hdev = req->hdev;
2137 	struct hci_cp_write_current_iac_lap cp;
2138 
2139 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2140 		return;
2141 
2142 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2143 		/* Limited discoverable mode */
2144 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2145 		cp.iac_lap[0] = 0x00;	/* LIAC */
2146 		cp.iac_lap[1] = 0x8b;
2147 		cp.iac_lap[2] = 0x9e;
2148 		cp.iac_lap[3] = 0x33;	/* GIAC */
2149 		cp.iac_lap[4] = 0x8b;
2150 		cp.iac_lap[5] = 0x9e;
2151 	} else {
2152 		/* General discoverable mode */
2153 		cp.num_iac = 1;
2154 		cp.iac_lap[0] = 0x33;	/* GIAC */
2155 		cp.iac_lap[1] = 0x8b;
2156 		cp.iac_lap[2] = 0x9e;
2157 	}
2158 
2159 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2160 		    (cp.num_iac * 3) + 1, &cp);
2161 }
2162 
discoverable_update(struct hci_request * req,unsigned long opt)2163 static int discoverable_update(struct hci_request *req, unsigned long opt)
2164 {
2165 	struct hci_dev *hdev = req->hdev;
2166 
2167 	hci_dev_lock(hdev);
2168 
2169 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2170 		write_iac(req);
2171 		__hci_req_update_scan(req);
2172 		__hci_req_update_class(req);
2173 	}
2174 
2175 	/* Advertising instances don't use the global discoverable setting, so
2176 	 * only update AD if advertising was enabled using Set Advertising.
2177 	 */
2178 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2179 		__hci_req_update_adv_data(req, 0x00);
2180 
2181 		/* Discoverable mode affects the local advertising
2182 		 * address in limited privacy mode.
2183 		 */
2184 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2185 			if (ext_adv_capable(hdev))
2186 				__hci_req_start_ext_adv(req, 0x00);
2187 			else
2188 				__hci_req_enable_advertising(req);
2189 		}
2190 	}
2191 
2192 	hci_dev_unlock(hdev);
2193 
2194 	return 0;
2195 }
2196 
discoverable_update_work(struct work_struct * work)2197 static void discoverable_update_work(struct work_struct *work)
2198 {
2199 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2200 					    discoverable_update);
2201 	u8 status;
2202 
2203 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2204 	mgmt_set_discoverable_complete(hdev, status);
2205 }
2206 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)2207 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2208 		      u8 reason)
2209 {
2210 	switch (conn->state) {
2211 	case BT_CONNECTED:
2212 	case BT_CONFIG:
2213 		if (conn->type == AMP_LINK) {
2214 			struct hci_cp_disconn_phy_link cp;
2215 
2216 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2217 			cp.reason = reason;
2218 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2219 				    &cp);
2220 		} else {
2221 			struct hci_cp_disconnect dc;
2222 
2223 			dc.handle = cpu_to_le16(conn->handle);
2224 			dc.reason = reason;
2225 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2226 		}
2227 
2228 		conn->state = BT_DISCONN;
2229 
2230 		break;
2231 	case BT_CONNECT:
2232 		if (conn->type == LE_LINK) {
2233 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2234 				break;
2235 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2236 				    0, NULL);
2237 		} else if (conn->type == ACL_LINK) {
2238 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2239 				break;
2240 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2241 				    6, &conn->dst);
2242 		}
2243 		break;
2244 	case BT_CONNECT2:
2245 		if (conn->type == ACL_LINK) {
2246 			struct hci_cp_reject_conn_req rej;
2247 
2248 			bacpy(&rej.bdaddr, &conn->dst);
2249 			rej.reason = reason;
2250 
2251 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2252 				    sizeof(rej), &rej);
2253 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2254 			struct hci_cp_reject_sync_conn_req rej;
2255 
2256 			bacpy(&rej.bdaddr, &conn->dst);
2257 
2258 			/* SCO rejection has its own limited set of
2259 			 * allowed error values (0x0D-0x0F) which isn't
2260 			 * compatible with most values passed to this
2261 			 * function. To be safe hard-code one of the
2262 			 * values that's suitable for SCO.
2263 			 */
2264 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2265 
2266 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2267 				    sizeof(rej), &rej);
2268 		}
2269 		break;
2270 	default:
2271 		conn->state = BT_CLOSED;
2272 		break;
2273 	}
2274 }
2275 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2276 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2277 {
2278 	if (status)
2279 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2280 }
2281 
hci_abort_conn(struct hci_conn * conn,u8 reason)2282 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2283 {
2284 	struct hci_request req;
2285 	int err;
2286 
2287 	hci_req_init(&req, conn->hdev);
2288 
2289 	__hci_abort_conn(&req, conn, reason);
2290 
2291 	err = hci_req_run(&req, abort_conn_complete);
2292 	if (err && err != -ENODATA) {
2293 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2294 		return err;
2295 	}
2296 
2297 	return 0;
2298 }
2299 
update_bg_scan(struct hci_request * req,unsigned long opt)2300 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2301 {
2302 	hci_dev_lock(req->hdev);
2303 	__hci_update_background_scan(req);
2304 	hci_dev_unlock(req->hdev);
2305 	return 0;
2306 }
2307 
bg_scan_update(struct work_struct * work)2308 static void bg_scan_update(struct work_struct *work)
2309 {
2310 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2311 					    bg_scan_update);
2312 	struct hci_conn *conn;
2313 	u8 status;
2314 	int err;
2315 
2316 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2317 	if (!err)
2318 		return;
2319 
2320 	hci_dev_lock(hdev);
2321 
2322 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2323 	if (conn)
2324 		hci_le_conn_failed(conn, status);
2325 
2326 	hci_dev_unlock(hdev);
2327 }
2328 
le_scan_disable(struct hci_request * req,unsigned long opt)2329 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2330 {
2331 	hci_req_add_le_scan_disable(req);
2332 	return 0;
2333 }
2334 
bredr_inquiry(struct hci_request * req,unsigned long opt)2335 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2336 {
2337 	u8 length = opt;
2338 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2339 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2340 	struct hci_cp_inquiry cp;
2341 
2342 	BT_DBG("%s", req->hdev->name);
2343 
2344 	hci_dev_lock(req->hdev);
2345 	hci_inquiry_cache_flush(req->hdev);
2346 	hci_dev_unlock(req->hdev);
2347 
2348 	memset(&cp, 0, sizeof(cp));
2349 
2350 	if (req->hdev->discovery.limited)
2351 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2352 	else
2353 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2354 
2355 	cp.length = length;
2356 
2357 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2358 
2359 	return 0;
2360 }
2361 
le_scan_disable_work(struct work_struct * work)2362 static void le_scan_disable_work(struct work_struct *work)
2363 {
2364 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2365 					    le_scan_disable.work);
2366 	u8 status;
2367 
2368 	BT_DBG("%s", hdev->name);
2369 
2370 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2371 		return;
2372 
2373 	cancel_delayed_work(&hdev->le_scan_restart);
2374 
2375 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2376 	if (status) {
2377 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2378 			   status);
2379 		return;
2380 	}
2381 
2382 	hdev->discovery.scan_start = 0;
2383 
2384 	/* If we were running LE only scan, change discovery state. If
2385 	 * we were running both LE and BR/EDR inquiry simultaneously,
2386 	 * and BR/EDR inquiry is already finished, stop discovery,
2387 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2388 	 * If we will resolve remote device name, do not change
2389 	 * discovery state.
2390 	 */
2391 
2392 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2393 		goto discov_stopped;
2394 
2395 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2396 		return;
2397 
2398 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2399 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2400 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2401 			goto discov_stopped;
2402 
2403 		return;
2404 	}
2405 
2406 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2407 		     HCI_CMD_TIMEOUT, &status);
2408 	if (status) {
2409 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2410 		goto discov_stopped;
2411 	}
2412 
2413 	return;
2414 
2415 discov_stopped:
2416 	hci_dev_lock(hdev);
2417 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2418 	hci_dev_unlock(hdev);
2419 }
2420 
le_scan_restart(struct hci_request * req,unsigned long opt)2421 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2422 {
2423 	struct hci_dev *hdev = req->hdev;
2424 
2425 	/* If controller is not scanning we are done. */
2426 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2427 		return 0;
2428 
2429 	hci_req_add_le_scan_disable(req);
2430 
2431 	if (use_ext_scan(hdev)) {
2432 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2433 
2434 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2435 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2436 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2437 
2438 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2439 			    sizeof(ext_enable_cp), &ext_enable_cp);
2440 	} else {
2441 		struct hci_cp_le_set_scan_enable cp;
2442 
2443 		memset(&cp, 0, sizeof(cp));
2444 		cp.enable = LE_SCAN_ENABLE;
2445 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2446 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2447 	}
2448 
2449 	return 0;
2450 }
2451 
le_scan_restart_work(struct work_struct * work)2452 static void le_scan_restart_work(struct work_struct *work)
2453 {
2454 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2455 					    le_scan_restart.work);
2456 	unsigned long timeout, duration, scan_start, now;
2457 	u8 status;
2458 
2459 	BT_DBG("%s", hdev->name);
2460 
2461 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2462 	if (status) {
2463 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2464 			   status);
2465 		return;
2466 	}
2467 
2468 	hci_dev_lock(hdev);
2469 
2470 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2471 	    !hdev->discovery.scan_start)
2472 		goto unlock;
2473 
2474 	/* When the scan was started, hdev->le_scan_disable has been queued
2475 	 * after duration from scan_start. During scan restart this job
2476 	 * has been canceled, and we need to queue it again after proper
2477 	 * timeout, to make sure that scan does not run indefinitely.
2478 	 */
2479 	duration = hdev->discovery.scan_duration;
2480 	scan_start = hdev->discovery.scan_start;
2481 	now = jiffies;
2482 	if (now - scan_start <= duration) {
2483 		int elapsed;
2484 
2485 		if (now >= scan_start)
2486 			elapsed = now - scan_start;
2487 		else
2488 			elapsed = ULONG_MAX - scan_start + now;
2489 
2490 		timeout = duration - elapsed;
2491 	} else {
2492 		timeout = 0;
2493 	}
2494 
2495 	queue_delayed_work(hdev->req_workqueue,
2496 			   &hdev->le_scan_disable, timeout);
2497 
2498 unlock:
2499 	hci_dev_unlock(hdev);
2500 }
2501 
active_scan(struct hci_request * req,unsigned long opt)2502 static int active_scan(struct hci_request *req, unsigned long opt)
2503 {
2504 	uint16_t interval = opt;
2505 	struct hci_dev *hdev = req->hdev;
2506 	u8 own_addr_type;
2507 	int err;
2508 
2509 	BT_DBG("%s", hdev->name);
2510 
2511 	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2512 		hci_dev_lock(hdev);
2513 
2514 		/* Don't let discovery abort an outgoing connection attempt
2515 		 * that's using directed advertising.
2516 		 */
2517 		if (hci_lookup_le_connect(hdev)) {
2518 			hci_dev_unlock(hdev);
2519 			return -EBUSY;
2520 		}
2521 
2522 		cancel_adv_timeout(hdev);
2523 		hci_dev_unlock(hdev);
2524 
2525 		__hci_req_disable_advertising(req);
2526 	}
2527 
2528 	/* If controller is scanning, it means the background scanning is
2529 	 * running. Thus, we should temporarily stop it in order to set the
2530 	 * discovery scanning parameters.
2531 	 */
2532 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2533 		hci_req_add_le_scan_disable(req);
2534 
2535 	/* All active scans will be done with either a resolvable private
2536 	 * address (when privacy feature has been enabled) or non-resolvable
2537 	 * private address.
2538 	 */
2539 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2540 					&own_addr_type);
2541 	if (err < 0)
2542 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2543 
2544 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2545 			   own_addr_type, 0);
2546 	return 0;
2547 }
2548 
interleaved_discov(struct hci_request * req,unsigned long opt)2549 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2550 {
2551 	int err;
2552 
2553 	BT_DBG("%s", req->hdev->name);
2554 
2555 	err = active_scan(req, opt);
2556 	if (err)
2557 		return err;
2558 
2559 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2560 }
2561 
start_discovery(struct hci_dev * hdev,u8 * status)2562 static void start_discovery(struct hci_dev *hdev, u8 *status)
2563 {
2564 	unsigned long timeout;
2565 
2566 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2567 
2568 	switch (hdev->discovery.type) {
2569 	case DISCOV_TYPE_BREDR:
2570 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2571 			hci_req_sync(hdev, bredr_inquiry,
2572 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2573 				     status);
2574 		return;
2575 	case DISCOV_TYPE_INTERLEAVED:
2576 		/* When running simultaneous discovery, the LE scanning time
2577 		 * should occupy the whole discovery time sine BR/EDR inquiry
2578 		 * and LE scanning are scheduled by the controller.
2579 		 *
2580 		 * For interleaving discovery in comparison, BR/EDR inquiry
2581 		 * and LE scanning are done sequentially with separate
2582 		 * timeouts.
2583 		 */
2584 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2585 			     &hdev->quirks)) {
2586 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2587 			/* During simultaneous discovery, we double LE scan
2588 			 * interval. We must leave some time for the controller
2589 			 * to do BR/EDR inquiry.
2590 			 */
2591 			hci_req_sync(hdev, interleaved_discov,
2592 				     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2593 				     status);
2594 			break;
2595 		}
2596 
2597 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2598 		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2599 			     HCI_CMD_TIMEOUT, status);
2600 		break;
2601 	case DISCOV_TYPE_LE:
2602 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2603 		hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2604 			     HCI_CMD_TIMEOUT, status);
2605 		break;
2606 	default:
2607 		*status = HCI_ERROR_UNSPECIFIED;
2608 		return;
2609 	}
2610 
2611 	if (*status)
2612 		return;
2613 
2614 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2615 
2616 	/* When service discovery is used and the controller has a
2617 	 * strict duplicate filter, it is important to remember the
2618 	 * start and duration of the scan. This is required for
2619 	 * restarting scanning during the discovery phase.
2620 	 */
2621 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2622 		     hdev->discovery.result_filtering) {
2623 		hdev->discovery.scan_start = jiffies;
2624 		hdev->discovery.scan_duration = timeout;
2625 	}
2626 
2627 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2628 			   timeout);
2629 }
2630 
hci_req_stop_discovery(struct hci_request * req)2631 bool hci_req_stop_discovery(struct hci_request *req)
2632 {
2633 	struct hci_dev *hdev = req->hdev;
2634 	struct discovery_state *d = &hdev->discovery;
2635 	struct hci_cp_remote_name_req_cancel cp;
2636 	struct inquiry_entry *e;
2637 	bool ret = false;
2638 
2639 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2640 
2641 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2642 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2643 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2644 
2645 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2646 			cancel_delayed_work(&hdev->le_scan_disable);
2647 			hci_req_add_le_scan_disable(req);
2648 		}
2649 
2650 		ret = true;
2651 	} else {
2652 		/* Passive scanning */
2653 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2654 			hci_req_add_le_scan_disable(req);
2655 			ret = true;
2656 		}
2657 	}
2658 
2659 	/* No further actions needed for LE-only discovery */
2660 	if (d->type == DISCOV_TYPE_LE)
2661 		return ret;
2662 
2663 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2664 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2665 						     NAME_PENDING);
2666 		if (!e)
2667 			return ret;
2668 
2669 		bacpy(&cp.bdaddr, &e->data.bdaddr);
2670 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2671 			    &cp);
2672 		ret = true;
2673 	}
2674 
2675 	return ret;
2676 }
2677 
stop_discovery(struct hci_request * req,unsigned long opt)2678 static int stop_discovery(struct hci_request *req, unsigned long opt)
2679 {
2680 	hci_dev_lock(req->hdev);
2681 	hci_req_stop_discovery(req);
2682 	hci_dev_unlock(req->hdev);
2683 
2684 	return 0;
2685 }
2686 
discov_update(struct work_struct * work)2687 static void discov_update(struct work_struct *work)
2688 {
2689 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2690 					    discov_update);
2691 	u8 status = 0;
2692 
2693 	switch (hdev->discovery.state) {
2694 	case DISCOVERY_STARTING:
2695 		start_discovery(hdev, &status);
2696 		mgmt_start_discovery_complete(hdev, status);
2697 		if (status)
2698 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2699 		else
2700 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2701 		break;
2702 	case DISCOVERY_STOPPING:
2703 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2704 		mgmt_stop_discovery_complete(hdev, status);
2705 		if (!status)
2706 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2707 		break;
2708 	case DISCOVERY_STOPPED:
2709 	default:
2710 		return;
2711 	}
2712 }
2713 
discov_off(struct work_struct * work)2714 static void discov_off(struct work_struct *work)
2715 {
2716 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2717 					    discov_off.work);
2718 
2719 	BT_DBG("%s", hdev->name);
2720 
2721 	hci_dev_lock(hdev);
2722 
2723 	/* When discoverable timeout triggers, then just make sure
2724 	 * the limited discoverable flag is cleared. Even in the case
2725 	 * of a timeout triggered from general discoverable, it is
2726 	 * safe to unconditionally clear the flag.
2727 	 */
2728 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2729 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2730 	hdev->discov_timeout = 0;
2731 
2732 	hci_dev_unlock(hdev);
2733 
2734 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2735 	mgmt_new_settings(hdev);
2736 }
2737 
powered_update_hci(struct hci_request * req,unsigned long opt)2738 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2739 {
2740 	struct hci_dev *hdev = req->hdev;
2741 	u8 link_sec;
2742 
2743 	hci_dev_lock(hdev);
2744 
2745 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2746 	    !lmp_host_ssp_capable(hdev)) {
2747 		u8 mode = 0x01;
2748 
2749 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2750 
2751 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2752 			u8 support = 0x01;
2753 
2754 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2755 				    sizeof(support), &support);
2756 		}
2757 	}
2758 
2759 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2760 	    lmp_bredr_capable(hdev)) {
2761 		struct hci_cp_write_le_host_supported cp;
2762 
2763 		cp.le = 0x01;
2764 		cp.simul = 0x00;
2765 
2766 		/* Check first if we already have the right
2767 		 * host state (host features set)
2768 		 */
2769 		if (cp.le != lmp_host_le_capable(hdev) ||
2770 		    cp.simul != lmp_host_le_br_capable(hdev))
2771 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2772 				    sizeof(cp), &cp);
2773 	}
2774 
2775 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2776 		/* Make sure the controller has a good default for
2777 		 * advertising data. This also applies to the case
2778 		 * where BR/EDR was toggled during the AUTO_OFF phase.
2779 		 */
2780 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2781 		    list_empty(&hdev->adv_instances)) {
2782 			int err;
2783 
2784 			if (ext_adv_capable(hdev)) {
2785 				err = __hci_req_setup_ext_adv_instance(req,
2786 								       0x00);
2787 				if (!err)
2788 					__hci_req_update_scan_rsp_data(req,
2789 								       0x00);
2790 			} else {
2791 				err = 0;
2792 				__hci_req_update_adv_data(req, 0x00);
2793 				__hci_req_update_scan_rsp_data(req, 0x00);
2794 			}
2795 
2796 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2797 				if (!ext_adv_capable(hdev))
2798 					__hci_req_enable_advertising(req);
2799 				else if (!err)
2800 					__hci_req_enable_ext_advertising(req,
2801 									 0x00);
2802 			}
2803 		} else if (!list_empty(&hdev->adv_instances)) {
2804 			struct adv_info *adv_instance;
2805 
2806 			adv_instance = list_first_entry(&hdev->adv_instances,
2807 							struct adv_info, list);
2808 			__hci_req_schedule_adv_instance(req,
2809 							adv_instance->instance,
2810 							true);
2811 		}
2812 	}
2813 
2814 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2815 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2816 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2817 			    sizeof(link_sec), &link_sec);
2818 
2819 	if (lmp_bredr_capable(hdev)) {
2820 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2821 			__hci_req_write_fast_connectable(req, true);
2822 		else
2823 			__hci_req_write_fast_connectable(req, false);
2824 		__hci_req_update_scan(req);
2825 		__hci_req_update_class(req);
2826 		__hci_req_update_name(req);
2827 		__hci_req_update_eir(req);
2828 	}
2829 
2830 	hci_dev_unlock(hdev);
2831 	return 0;
2832 }
2833 
__hci_req_hci_power_on(struct hci_dev * hdev)2834 int __hci_req_hci_power_on(struct hci_dev *hdev)
2835 {
2836 	/* Register the available SMP channels (BR/EDR and LE) only when
2837 	 * successfully powering on the controller. This late
2838 	 * registration is required so that LE SMP can clearly decide if
2839 	 * the public address or static address is used.
2840 	 */
2841 	smp_register(hdev);
2842 
2843 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2844 			      NULL);
2845 }
2846 
hci_request_setup(struct hci_dev * hdev)2847 void hci_request_setup(struct hci_dev *hdev)
2848 {
2849 	INIT_WORK(&hdev->discov_update, discov_update);
2850 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2851 	INIT_WORK(&hdev->scan_update, scan_update_work);
2852 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
2853 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2854 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2855 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2856 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2857 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2858 }
2859 
hci_request_cancel_all(struct hci_dev * hdev)2860 void hci_request_cancel_all(struct hci_dev *hdev)
2861 {
2862 	hci_req_sync_cancel(hdev, ENODEV);
2863 
2864 	cancel_work_sync(&hdev->discov_update);
2865 	cancel_work_sync(&hdev->bg_scan_update);
2866 	cancel_work_sync(&hdev->scan_update);
2867 	cancel_work_sync(&hdev->connectable_update);
2868 	cancel_work_sync(&hdev->discoverable_update);
2869 	cancel_delayed_work_sync(&hdev->discov_off);
2870 	cancel_delayed_work_sync(&hdev->le_scan_disable);
2871 	cancel_delayed_work_sync(&hdev->le_scan_restart);
2872 
2873 	if (hdev->adv_instance_timeout) {
2874 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2875 		hdev->adv_instance_timeout = 0;
2876 	}
2877 }
2878