• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 
31 #include <linux/rfkill.h>
32 
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35 
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39 
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43 
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47 
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50 
51 /* ---- HCI notifications ---- */
52 
hci_notify(struct hci_dev * hdev,int event)53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 	hci_sock_dev_event(hdev, event);
56 }
57 
58 /* ---- HCI requests ---- */
59 
hci_req_sync_complete(struct hci_dev * hdev,u8 result)60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
63 
64 	if (hdev->req_status == HCI_REQ_PEND) {
65 		hdev->req_result = result;
66 		hdev->req_status = HCI_REQ_DONE;
67 		wake_up_interruptible(&hdev->req_wait_q);
68 	}
69 }
70 
hci_req_cancel(struct hci_dev * hdev,int err)71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
74 
75 	if (hdev->req_status == HCI_REQ_PEND) {
76 		hdev->req_result = err;
77 		hdev->req_status = HCI_REQ_CANCELED;
78 		wake_up_interruptible(&hdev->req_wait_q);
79 	}
80 }
81 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event)82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 					    u8 event)
84 {
85 	struct hci_ev_cmd_complete *ev;
86 	struct hci_event_hdr *hdr;
87 	struct sk_buff *skb;
88 
89 	hci_dev_lock(hdev);
90 
91 	skb = hdev->recv_evt;
92 	hdev->recv_evt = NULL;
93 
94 	hci_dev_unlock(hdev);
95 
96 	if (!skb)
97 		return ERR_PTR(-ENODATA);
98 
99 	if (skb->len < sizeof(*hdr)) {
100 		BT_ERR("Too short HCI event");
101 		goto failed;
102 	}
103 
104 	hdr = (void *) skb->data;
105 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
106 
107 	if (event) {
108 		if (hdr->evt != event)
109 			goto failed;
110 		return skb;
111 	}
112 
113 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 		goto failed;
116 	}
117 
118 	if (skb->len < sizeof(*ev)) {
119 		BT_ERR("Too short cmd_complete event");
120 		goto failed;
121 	}
122 
123 	ev = (void *) skb->data;
124 	skb_pull(skb, sizeof(*ev));
125 
126 	if (opcode == __le16_to_cpu(ev->opcode))
127 		return skb;
128 
129 	BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 	       __le16_to_cpu(ev->opcode));
131 
132 failed:
133 	kfree_skb(skb);
134 	return ERR_PTR(-ENODATA);
135 }
136 
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 				  const void *param, u8 event, u32 timeout)
139 {
140 	DECLARE_WAITQUEUE(wait, current);
141 	struct hci_request req;
142 	int err = 0;
143 
144 	BT_DBG("%s", hdev->name);
145 
146 	hci_req_init(&req, hdev);
147 
148 	hci_req_add_ev(&req, opcode, plen, param, event);
149 
150 	hdev->req_status = HCI_REQ_PEND;
151 
152 	err = hci_req_run(&req, hci_req_sync_complete);
153 	if (err < 0)
154 		return ERR_PTR(err);
155 
156 	add_wait_queue(&hdev->req_wait_q, &wait);
157 	set_current_state(TASK_INTERRUPTIBLE);
158 
159 	schedule_timeout(timeout);
160 
161 	remove_wait_queue(&hdev->req_wait_q, &wait);
162 
163 	if (signal_pending(current))
164 		return ERR_PTR(-EINTR);
165 
166 	switch (hdev->req_status) {
167 	case HCI_REQ_DONE:
168 		err = -bt_to_errno(hdev->req_result);
169 		break;
170 
171 	case HCI_REQ_CANCELED:
172 		err = -hdev->req_result;
173 		break;
174 
175 	default:
176 		err = -ETIMEDOUT;
177 		break;
178 	}
179 
180 	hdev->req_status = hdev->req_result = 0;
181 
182 	BT_DBG("%s end: err %d", hdev->name, err);
183 
184 	if (err < 0)
185 		return ERR_PTR(err);
186 
187 	return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190 
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 			       const void *param, u32 timeout)
193 {
194 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197 
198 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,void (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)199 static int __hci_req_sync(struct hci_dev *hdev,
200 			  void (*func)(struct hci_request *req,
201 				      unsigned long opt),
202 			  unsigned long opt, __u32 timeout)
203 {
204 	struct hci_request req;
205 	DECLARE_WAITQUEUE(wait, current);
206 	int err = 0;
207 
208 	BT_DBG("%s start", hdev->name);
209 
210 	hci_req_init(&req, hdev);
211 
212 	hdev->req_status = HCI_REQ_PEND;
213 
214 	func(&req, opt);
215 
216 	err = hci_req_run(&req, hci_req_sync_complete);
217 	if (err < 0) {
218 		hdev->req_status = 0;
219 
220 		/* ENODATA means the HCI request command queue is empty.
221 		 * This can happen when a request with conditionals doesn't
222 		 * trigger any commands to be sent. This is normal behavior
223 		 * and should not trigger an error return.
224 		 */
225 		if (err == -ENODATA)
226 			return 0;
227 
228 		return err;
229 	}
230 
231 	add_wait_queue(&hdev->req_wait_q, &wait);
232 	set_current_state(TASK_INTERRUPTIBLE);
233 
234 	schedule_timeout(timeout);
235 
236 	remove_wait_queue(&hdev->req_wait_q, &wait);
237 
238 	if (signal_pending(current))
239 		return -EINTR;
240 
241 	switch (hdev->req_status) {
242 	case HCI_REQ_DONE:
243 		err = -bt_to_errno(hdev->req_result);
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		break;
249 
250 	default:
251 		err = -ETIMEDOUT;
252 		break;
253 	}
254 
255 	hdev->req_status = hdev->req_result = 0;
256 
257 	BT_DBG("%s end: err %d", hdev->name, err);
258 
259 	return err;
260 }
261 
hci_req_sync(struct hci_dev * hdev,void (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,__u32 timeout)262 static int hci_req_sync(struct hci_dev *hdev,
263 			void (*req)(struct hci_request *req,
264 				    unsigned long opt),
265 			unsigned long opt, __u32 timeout)
266 {
267 	int ret;
268 
269 	if (!test_bit(HCI_UP, &hdev->flags))
270 		return -ENETDOWN;
271 
272 	/* Serialize all requests */
273 	hci_req_lock(hdev);
274 	ret = __hci_req_sync(hdev, req, opt, timeout);
275 	hci_req_unlock(hdev);
276 
277 	return ret;
278 }
279 
hci_reset_req(struct hci_request * req,unsigned long opt)280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282 	BT_DBG("%s %ld", req->hdev->name, opt);
283 
284 	/* Reset device */
285 	set_bit(HCI_RESET, &req->hdev->flags);
286 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288 
bredr_init(struct hci_request * req)289 static void bredr_init(struct hci_request *req)
290 {
291 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292 
293 	/* Read Local Supported Features */
294 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295 
296 	/* Read Local Version */
297 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298 
299 	/* Read BD Address */
300 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302 
amp_init(struct hci_request * req)303 static void amp_init(struct hci_request *req)
304 {
305 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306 
307 	/* Read Local Version */
308 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309 
310 	/* Read Local AMP Info */
311 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312 
313 	/* Read Data Blk size */
314 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316 
hci_init1_req(struct hci_request * req,unsigned long opt)317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319 	struct hci_dev *hdev = req->hdev;
320 
321 	BT_DBG("%s %ld", hdev->name, opt);
322 
323 	/* Reset */
324 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 		hci_reset_req(req, 0);
326 
327 	switch (hdev->dev_type) {
328 	case HCI_BREDR:
329 		bredr_init(req);
330 		break;
331 
332 	case HCI_AMP:
333 		amp_init(req);
334 		break;
335 
336 	default:
337 		BT_ERR("Unknown device type %d", hdev->dev_type);
338 		break;
339 	}
340 }
341 
bredr_setup(struct hci_request * req)342 static void bredr_setup(struct hci_request *req)
343 {
344 	__le16 param;
345 	__u8 flt_type;
346 
347 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349 
350 	/* Read Class of Device */
351 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352 
353 	/* Read Local Name */
354 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355 
356 	/* Read Voice Setting */
357 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358 
359 	/* Clear Event Filters */
360 	flt_type = HCI_FLT_CLEAR_ALL;
361 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362 
363 	/* Connection accept timeout ~20 secs */
364 	param = __constant_cpu_to_le16(0x7d00);
365 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366 
367 	/* Read page scan parameters */
368 	if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 	}
372 }
373 
le_setup(struct hci_request * req)374 static void le_setup(struct hci_request *req)
375 {
376 	struct hci_dev *hdev = req->hdev;
377 
378 	/* Read LE Buffer Size */
379 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380 
381 	/* Read LE Local Supported Features */
382 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383 
384 	/* Read LE Advertising Channel TX Power */
385 	hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386 
387 	/* Read LE White List Size */
388 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389 
390 	/* Read LE Supported States */
391 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392 
393 	/* LE-only controllers have LE implicitly enabled */
394 	if (!lmp_bredr_capable(hdev))
395 		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397 
hci_get_inquiry_mode(struct hci_dev * hdev)398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400 	if (lmp_ext_inq_capable(hdev))
401 		return 0x02;
402 
403 	if (lmp_inq_rssi_capable(hdev))
404 		return 0x01;
405 
406 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 	    hdev->lmp_subver == 0x0757)
408 		return 0x01;
409 
410 	if (hdev->manufacturer == 15) {
411 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 			return 0x01;
413 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 			return 0x01;
415 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 			return 0x01;
417 	}
418 
419 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 	    hdev->lmp_subver == 0x1805)
421 		return 0x01;
422 
423 	return 0x00;
424 }
425 
hci_setup_inquiry_mode(struct hci_request * req)426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428 	u8 mode;
429 
430 	mode = hci_get_inquiry_mode(req->hdev);
431 
432 	hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434 
hci_setup_event_mask(struct hci_request * req)435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437 	struct hci_dev *hdev = req->hdev;
438 
439 	/* The second byte is 0xff instead of 0x9f (two reserved bits
440 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 	 * command otherwise.
442 	 */
443 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444 
445 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 	 * any event mask for pre 1.2 devices.
447 	 */
448 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 		return;
450 
451 	if (lmp_bredr_capable(hdev)) {
452 		events[4] |= 0x01; /* Flow Specification Complete */
453 		events[4] |= 0x02; /* Inquiry Result with RSSI */
454 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 		events[5] |= 0x08; /* Synchronous Connection Complete */
456 		events[5] |= 0x10; /* Synchronous Connection Changed */
457 	}
458 
459 	if (lmp_inq_rssi_capable(hdev))
460 		events[4] |= 0x02; /* Inquiry Result with RSSI */
461 
462 	if (lmp_sniffsubr_capable(hdev))
463 		events[5] |= 0x20; /* Sniff Subrating */
464 
465 	if (lmp_pause_enc_capable(hdev))
466 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
467 
468 	if (lmp_ext_inq_capable(hdev))
469 		events[5] |= 0x40; /* Extended Inquiry Result */
470 
471 	if (lmp_no_flush_capable(hdev))
472 		events[7] |= 0x01; /* Enhanced Flush Complete */
473 
474 	if (lmp_lsto_capable(hdev))
475 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
476 
477 	if (lmp_ssp_capable(hdev)) {
478 		events[6] |= 0x01;	/* IO Capability Request */
479 		events[6] |= 0x02;	/* IO Capability Response */
480 		events[6] |= 0x04;	/* User Confirmation Request */
481 		events[6] |= 0x08;	/* User Passkey Request */
482 		events[6] |= 0x10;	/* Remote OOB Data Request */
483 		events[6] |= 0x20;	/* Simple Pairing Complete */
484 		events[7] |= 0x04;	/* User Passkey Notification */
485 		events[7] |= 0x08;	/* Keypress Notification */
486 		events[7] |= 0x10;	/* Remote Host Supported
487 					 * Features Notification
488 					 */
489 	}
490 
491 	if (lmp_le_capable(hdev))
492 		events[7] |= 0x20;	/* LE Meta-Event */
493 
494 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495 
496 	if (lmp_le_capable(hdev)) {
497 		memset(events, 0, sizeof(events));
498 		events[0] = 0x1f;
499 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 			    sizeof(events), events);
501 	}
502 }
503 
hci_init2_req(struct hci_request * req,unsigned long opt)504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506 	struct hci_dev *hdev = req->hdev;
507 
508 	if (lmp_bredr_capable(hdev))
509 		bredr_setup(req);
510 
511 	if (lmp_le_capable(hdev))
512 		le_setup(req);
513 
514 	hci_setup_event_mask(req);
515 
516 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518 
519 	if (lmp_ssp_capable(hdev)) {
520 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 			u8 mode = 0x01;
522 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 				    sizeof(mode), &mode);
524 		} else {
525 			struct hci_cp_write_eir cp;
526 
527 			memset(hdev->eir, 0, sizeof(hdev->eir));
528 			memset(&cp, 0, sizeof(cp));
529 
530 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
531 		}
532 	}
533 
534 	if (lmp_inq_rssi_capable(hdev))
535 		hci_setup_inquiry_mode(req);
536 
537 	if (lmp_inq_tx_pwr_capable(hdev))
538 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539 
540 	if (lmp_ext_feat_capable(hdev)) {
541 		struct hci_cp_read_local_ext_features cp;
542 
543 		cp.page = 0x01;
544 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 			    sizeof(cp), &cp);
546 	}
547 
548 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 		u8 enable = 1;
550 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 			    &enable);
552 	}
553 }
554 
hci_setup_link_policy(struct hci_request * req)555 static void hci_setup_link_policy(struct hci_request *req)
556 {
557 	struct hci_dev *hdev = req->hdev;
558 	struct hci_cp_write_def_link_policy cp;
559 	u16 link_policy = 0;
560 
561 	if (lmp_rswitch_capable(hdev))
562 		link_policy |= HCI_LP_RSWITCH;
563 	if (lmp_hold_capable(hdev))
564 		link_policy |= HCI_LP_HOLD;
565 	if (lmp_sniff_capable(hdev))
566 		link_policy |= HCI_LP_SNIFF;
567 	if (lmp_park_capable(hdev))
568 		link_policy |= HCI_LP_PARK;
569 
570 	cp.policy = cpu_to_le16(link_policy);
571 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 }
573 
hci_set_le_support(struct hci_request * req)574 static void hci_set_le_support(struct hci_request *req)
575 {
576 	struct hci_dev *hdev = req->hdev;
577 	struct hci_cp_write_le_host_supported cp;
578 
579 	/* LE-only devices do not support explicit enablement */
580 	if (!lmp_bredr_capable(hdev))
581 		return;
582 
583 	memset(&cp, 0, sizeof(cp));
584 
585 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 		cp.le = 0x01;
587 		cp.simul = lmp_le_br_capable(hdev);
588 	}
589 
590 	if (cp.le != lmp_host_le_capable(hdev))
591 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 			    &cp);
593 }
594 
hci_init3_req(struct hci_request * req,unsigned long opt)595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
596 {
597 	struct hci_dev *hdev = req->hdev;
598 	u8 p;
599 
600 	/* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 	if (hdev->commands[6] & 0x80) {
602 		struct hci_cp_delete_stored_link_key cp;
603 
604 		bacpy(&cp.bdaddr, BDADDR_ANY);
605 		cp.delete_all = 0x01;
606 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 			    sizeof(cp), &cp);
608 	}
609 
610 	if (hdev->commands[5] & 0x10)
611 		hci_setup_link_policy(req);
612 
613 	if (lmp_le_capable(hdev)) {
614 		hci_set_le_support(req);
615 		hci_update_ad(req);
616 	}
617 
618 	/* Read features beyond page 1 if available */
619 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 		struct hci_cp_read_local_ext_features cp;
621 
622 		cp.page = p;
623 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624 			    sizeof(cp), &cp);
625 	}
626 }
627 
__hci_init(struct hci_dev * hdev)628 static int __hci_init(struct hci_dev *hdev)
629 {
630 	int err;
631 
632 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633 	if (err < 0)
634 		return err;
635 
636 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 	 * BR/EDR/LE type controllers. AMP controllers only need the
638 	 * first stage init.
639 	 */
640 	if (hdev->dev_type != HCI_BREDR)
641 		return 0;
642 
643 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644 	if (err < 0)
645 		return err;
646 
647 	return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648 }
649 
hci_scan_req(struct hci_request * req,unsigned long opt)650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
651 {
652 	__u8 scan = opt;
653 
654 	BT_DBG("%s %x", req->hdev->name, scan);
655 
656 	/* Inquiry and Page scans */
657 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
658 }
659 
hci_auth_req(struct hci_request * req,unsigned long opt)660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
661 {
662 	__u8 auth = opt;
663 
664 	BT_DBG("%s %x", req->hdev->name, auth);
665 
666 	/* Authentication */
667 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
668 }
669 
hci_encrypt_req(struct hci_request * req,unsigned long opt)670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
671 {
672 	__u8 encrypt = opt;
673 
674 	BT_DBG("%s %x", req->hdev->name, encrypt);
675 
676 	/* Encryption */
677 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
678 }
679 
hci_linkpol_req(struct hci_request * req,unsigned long opt)680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
681 {
682 	__le16 policy = cpu_to_le16(opt);
683 
684 	BT_DBG("%s %x", req->hdev->name, policy);
685 
686 	/* Default link policy */
687 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
688 }
689 
690 /* Get HCI device by index.
691  * Device is held on return. */
hci_dev_get(int index)692 struct hci_dev *hci_dev_get(int index)
693 {
694 	struct hci_dev *hdev = NULL, *d;
695 
696 	BT_DBG("%d", index);
697 
698 	if (index < 0)
699 		return NULL;
700 
701 	read_lock(&hci_dev_list_lock);
702 	list_for_each_entry(d, &hci_dev_list, list) {
703 		if (d->id == index) {
704 			hdev = hci_dev_hold(d);
705 			break;
706 		}
707 	}
708 	read_unlock(&hci_dev_list_lock);
709 	return hdev;
710 }
711 
712 /* ---- Inquiry support ---- */
713 
hci_discovery_active(struct hci_dev * hdev)714 bool hci_discovery_active(struct hci_dev *hdev)
715 {
716 	struct discovery_state *discov = &hdev->discovery;
717 
718 	switch (discov->state) {
719 	case DISCOVERY_FINDING:
720 	case DISCOVERY_RESOLVING:
721 		return true;
722 
723 	default:
724 		return false;
725 	}
726 }
727 
hci_discovery_set_state(struct hci_dev * hdev,int state)728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
729 {
730 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731 
732 	if (hdev->discovery.state == state)
733 		return;
734 
735 	switch (state) {
736 	case DISCOVERY_STOPPED:
737 		if (hdev->discovery.state != DISCOVERY_STARTING)
738 			mgmt_discovering(hdev, 0);
739 		break;
740 	case DISCOVERY_STARTING:
741 		break;
742 	case DISCOVERY_FINDING:
743 		mgmt_discovering(hdev, 1);
744 		break;
745 	case DISCOVERY_RESOLVING:
746 		break;
747 	case DISCOVERY_STOPPING:
748 		break;
749 	}
750 
751 	hdev->discovery.state = state;
752 }
753 
inquiry_cache_flush(struct hci_dev * hdev)754 static void inquiry_cache_flush(struct hci_dev *hdev)
755 {
756 	struct discovery_state *cache = &hdev->discovery;
757 	struct inquiry_entry *p, *n;
758 
759 	list_for_each_entry_safe(p, n, &cache->all, all) {
760 		list_del(&p->all);
761 		kfree(p);
762 	}
763 
764 	INIT_LIST_HEAD(&cache->unknown);
765 	INIT_LIST_HEAD(&cache->resolve);
766 }
767 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769 					       bdaddr_t *bdaddr)
770 {
771 	struct discovery_state *cache = &hdev->discovery;
772 	struct inquiry_entry *e;
773 
774 	BT_DBG("cache %p, %pMR", cache, bdaddr);
775 
776 	list_for_each_entry(e, &cache->all, all) {
777 		if (!bacmp(&e->data.bdaddr, bdaddr))
778 			return e;
779 	}
780 
781 	return NULL;
782 }
783 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
785 						       bdaddr_t *bdaddr)
786 {
787 	struct discovery_state *cache = &hdev->discovery;
788 	struct inquiry_entry *e;
789 
790 	BT_DBG("cache %p, %pMR", cache, bdaddr);
791 
792 	list_for_each_entry(e, &cache->unknown, list) {
793 		if (!bacmp(&e->data.bdaddr, bdaddr))
794 			return e;
795 	}
796 
797 	return NULL;
798 }
799 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
801 						       bdaddr_t *bdaddr,
802 						       int state)
803 {
804 	struct discovery_state *cache = &hdev->discovery;
805 	struct inquiry_entry *e;
806 
807 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
808 
809 	list_for_each_entry(e, &cache->resolve, list) {
810 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811 			return e;
812 		if (!bacmp(&e->data.bdaddr, bdaddr))
813 			return e;
814 	}
815 
816 	return NULL;
817 }
818 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820 				      struct inquiry_entry *ie)
821 {
822 	struct discovery_state *cache = &hdev->discovery;
823 	struct list_head *pos = &cache->resolve;
824 	struct inquiry_entry *p;
825 
826 	list_del(&ie->list);
827 
828 	list_for_each_entry(p, &cache->resolve, list) {
829 		if (p->name_state != NAME_PENDING &&
830 		    abs(p->data.rssi) >= abs(ie->data.rssi))
831 			break;
832 		pos = &p->list;
833 	}
834 
835 	list_add(&ie->list, pos);
836 }
837 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known,bool * ssp)838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839 			      bool name_known, bool *ssp)
840 {
841 	struct discovery_state *cache = &hdev->discovery;
842 	struct inquiry_entry *ie;
843 
844 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
845 
846 	hci_remove_remote_oob_data(hdev, &data->bdaddr);
847 
848 	if (ssp)
849 		*ssp = data->ssp_mode;
850 
851 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
852 	if (ie) {
853 		if (ie->data.ssp_mode && ssp)
854 			*ssp = true;
855 
856 		if (ie->name_state == NAME_NEEDED &&
857 		    data->rssi != ie->data.rssi) {
858 			ie->data.rssi = data->rssi;
859 			hci_inquiry_cache_update_resolve(hdev, ie);
860 		}
861 
862 		goto update;
863 	}
864 
865 	/* Entry not in the cache. Add new one. */
866 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867 	if (!ie)
868 		return false;
869 
870 	list_add(&ie->all, &cache->all);
871 
872 	if (name_known) {
873 		ie->name_state = NAME_KNOWN;
874 	} else {
875 		ie->name_state = NAME_NOT_KNOWN;
876 		list_add(&ie->list, &cache->unknown);
877 	}
878 
879 update:
880 	if (name_known && ie->name_state != NAME_KNOWN &&
881 	    ie->name_state != NAME_PENDING) {
882 		ie->name_state = NAME_KNOWN;
883 		list_del(&ie->list);
884 	}
885 
886 	memcpy(&ie->data, data, sizeof(*data));
887 	ie->timestamp = jiffies;
888 	cache->timestamp = jiffies;
889 
890 	if (ie->name_state == NAME_NOT_KNOWN)
891 		return false;
892 
893 	return true;
894 }
895 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897 {
898 	struct discovery_state *cache = &hdev->discovery;
899 	struct inquiry_info *info = (struct inquiry_info *) buf;
900 	struct inquiry_entry *e;
901 	int copied = 0;
902 
903 	list_for_each_entry(e, &cache->all, all) {
904 		struct inquiry_data *data = &e->data;
905 
906 		if (copied >= num)
907 			break;
908 
909 		bacpy(&info->bdaddr, &data->bdaddr);
910 		info->pscan_rep_mode	= data->pscan_rep_mode;
911 		info->pscan_period_mode	= data->pscan_period_mode;
912 		info->pscan_mode	= data->pscan_mode;
913 		memcpy(info->dev_class, data->dev_class, 3);
914 		info->clock_offset	= data->clock_offset;
915 
916 		info++;
917 		copied++;
918 	}
919 
920 	BT_DBG("cache %p, copied %d", cache, copied);
921 	return copied;
922 }
923 
hci_inq_req(struct hci_request * req,unsigned long opt)924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
925 {
926 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927 	struct hci_dev *hdev = req->hdev;
928 	struct hci_cp_inquiry cp;
929 
930 	BT_DBG("%s", hdev->name);
931 
932 	if (test_bit(HCI_INQUIRY, &hdev->flags))
933 		return;
934 
935 	/* Start Inquiry */
936 	memcpy(&cp.lap, &ir->lap, 3);
937 	cp.length  = ir->length;
938 	cp.num_rsp = ir->num_rsp;
939 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
940 }
941 
wait_inquiry(void * word)942 static int wait_inquiry(void *word)
943 {
944 	schedule();
945 	return signal_pending(current);
946 }
947 
hci_inquiry(void __user * arg)948 int hci_inquiry(void __user *arg)
949 {
950 	__u8 __user *ptr = arg;
951 	struct hci_inquiry_req ir;
952 	struct hci_dev *hdev;
953 	int err = 0, do_inquiry = 0, max_rsp;
954 	long timeo;
955 	__u8 *buf;
956 
957 	if (copy_from_user(&ir, ptr, sizeof(ir)))
958 		return -EFAULT;
959 
960 	hdev = hci_dev_get(ir.dev_id);
961 	if (!hdev)
962 		return -ENODEV;
963 
964 	hci_dev_lock(hdev);
965 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967 		inquiry_cache_flush(hdev);
968 		do_inquiry = 1;
969 	}
970 	hci_dev_unlock(hdev);
971 
972 	timeo = ir.length * msecs_to_jiffies(2000);
973 
974 	if (do_inquiry) {
975 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976 				   timeo);
977 		if (err < 0)
978 			goto done;
979 
980 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 		 * cleared). If it is interrupted by a signal, return -EINTR.
982 		 */
983 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984 				TASK_INTERRUPTIBLE))
985 			return -EINTR;
986 	}
987 
988 	/* for unlimited number of responses we will use buffer with
989 	 * 255 entries
990 	 */
991 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992 
993 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 	 * copy it to the user space.
995 	 */
996 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
997 	if (!buf) {
998 		err = -ENOMEM;
999 		goto done;
1000 	}
1001 
1002 	hci_dev_lock(hdev);
1003 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004 	hci_dev_unlock(hdev);
1005 
1006 	BT_DBG("num_rsp %d", ir.num_rsp);
1007 
1008 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009 		ptr += sizeof(ir);
1010 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1011 				 ir.num_rsp))
1012 			err = -EFAULT;
1013 	} else
1014 		err = -EFAULT;
1015 
1016 	kfree(buf);
1017 
1018 done:
1019 	hci_dev_put(hdev);
1020 	return err;
1021 }
1022 
create_ad(struct hci_dev * hdev,u8 * ptr)1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024 {
1025 	u8 ad_len = 0, flags = 0;
1026 	size_t name_len;
1027 
1028 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 		flags |= LE_AD_GENERAL;
1030 
1031 	if (!lmp_bredr_capable(hdev))
1032 		flags |= LE_AD_NO_BREDR;
1033 
1034 	if (lmp_le_br_capable(hdev))
1035 		flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036 
1037 	if (lmp_host_le_br_capable(hdev))
1038 		flags |= LE_AD_SIM_LE_BREDR_HOST;
1039 
1040 	if (flags) {
1041 		BT_DBG("adv flags 0x%02x", flags);
1042 
1043 		ptr[0] = 2;
1044 		ptr[1] = EIR_FLAGS;
1045 		ptr[2] = flags;
1046 
1047 		ad_len += 3;
1048 		ptr += 3;
1049 	}
1050 
1051 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052 		ptr[0] = 2;
1053 		ptr[1] = EIR_TX_POWER;
1054 		ptr[2] = (u8) hdev->adv_tx_power;
1055 
1056 		ad_len += 3;
1057 		ptr += 3;
1058 	}
1059 
1060 	name_len = strlen(hdev->dev_name);
1061 	if (name_len > 0) {
1062 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063 
1064 		if (name_len > max_len) {
1065 			name_len = max_len;
1066 			ptr[1] = EIR_NAME_SHORT;
1067 		} else
1068 			ptr[1] = EIR_NAME_COMPLETE;
1069 
1070 		ptr[0] = name_len + 1;
1071 
1072 		memcpy(ptr + 2, hdev->dev_name, name_len);
1073 
1074 		ad_len += (name_len + 2);
1075 		ptr += (name_len + 2);
1076 	}
1077 
1078 	return ad_len;
1079 }
1080 
hci_update_ad(struct hci_request * req)1081 void hci_update_ad(struct hci_request *req)
1082 {
1083 	struct hci_dev *hdev = req->hdev;
1084 	struct hci_cp_le_set_adv_data cp;
1085 	u8 len;
1086 
1087 	if (!lmp_le_capable(hdev))
1088 		return;
1089 
1090 	memset(&cp, 0, sizeof(cp));
1091 
1092 	len = create_ad(hdev, cp.data);
1093 
1094 	if (hdev->adv_data_len == len &&
1095 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1096 		return;
1097 
1098 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 	hdev->adv_data_len = len;
1100 
1101 	cp.length = len;
1102 
1103 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1104 }
1105 
1106 /* ---- HCI ioctl helpers ---- */
1107 
hci_dev_open(__u16 dev)1108 int hci_dev_open(__u16 dev)
1109 {
1110 	struct hci_dev *hdev;
1111 	int ret = 0;
1112 
1113 	hdev = hci_dev_get(dev);
1114 	if (!hdev)
1115 		return -ENODEV;
1116 
1117 	BT_DBG("%s %p", hdev->name, hdev);
1118 
1119 	hci_req_lock(hdev);
1120 
1121 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122 		ret = -ENODEV;
1123 		goto done;
1124 	}
1125 
1126 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1127 		ret = -ERFKILL;
1128 		goto done;
1129 	}
1130 
1131 	if (test_bit(HCI_UP, &hdev->flags)) {
1132 		ret = -EALREADY;
1133 		goto done;
1134 	}
1135 
1136 	if (hdev->open(hdev)) {
1137 		ret = -EIO;
1138 		goto done;
1139 	}
1140 
1141 	atomic_set(&hdev->cmd_cnt, 1);
1142 	set_bit(HCI_INIT, &hdev->flags);
1143 
1144 	if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145 		ret = hdev->setup(hdev);
1146 
1147 	if (!ret) {
1148 		/* Treat all non BR/EDR controllers as raw devices if
1149 		 * enable_hs is not set.
1150 		 */
1151 		if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152 			set_bit(HCI_RAW, &hdev->flags);
1153 
1154 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155 			set_bit(HCI_RAW, &hdev->flags);
1156 
1157 		if (!test_bit(HCI_RAW, &hdev->flags))
1158 			ret = __hci_init(hdev);
1159 	}
1160 
1161 	clear_bit(HCI_INIT, &hdev->flags);
1162 
1163 	if (!ret) {
1164 		hci_dev_hold(hdev);
1165 		set_bit(HCI_UP, &hdev->flags);
1166 		hci_notify(hdev, HCI_DEV_UP);
1167 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168 		    mgmt_valid_hdev(hdev)) {
1169 			hci_dev_lock(hdev);
1170 			mgmt_powered(hdev, 1);
1171 			hci_dev_unlock(hdev);
1172 		}
1173 	} else {
1174 		/* Init failed, cleanup */
1175 		flush_work(&hdev->tx_work);
1176 		flush_work(&hdev->cmd_work);
1177 		flush_work(&hdev->rx_work);
1178 
1179 		skb_queue_purge(&hdev->cmd_q);
1180 		skb_queue_purge(&hdev->rx_q);
1181 
1182 		if (hdev->flush)
1183 			hdev->flush(hdev);
1184 
1185 		if (hdev->sent_cmd) {
1186 			kfree_skb(hdev->sent_cmd);
1187 			hdev->sent_cmd = NULL;
1188 		}
1189 
1190 		hdev->close(hdev);
1191 		hdev->flags = 0;
1192 	}
1193 
1194 done:
1195 	hci_req_unlock(hdev);
1196 	hci_dev_put(hdev);
1197 	return ret;
1198 }
1199 
hci_dev_do_close(struct hci_dev * hdev)1200 static int hci_dev_do_close(struct hci_dev *hdev)
1201 {
1202 	BT_DBG("%s %p", hdev->name, hdev);
1203 
1204 	cancel_work_sync(&hdev->le_scan);
1205 
1206 	cancel_delayed_work(&hdev->power_off);
1207 
1208 	hci_req_cancel(hdev, ENODEV);
1209 	hci_req_lock(hdev);
1210 
1211 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1212 		del_timer_sync(&hdev->cmd_timer);
1213 		hci_req_unlock(hdev);
1214 		return 0;
1215 	}
1216 
1217 	/* Flush RX and TX works */
1218 	flush_work(&hdev->tx_work);
1219 	flush_work(&hdev->rx_work);
1220 
1221 	if (hdev->discov_timeout > 0) {
1222 		cancel_delayed_work(&hdev->discov_off);
1223 		hdev->discov_timeout = 0;
1224 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1225 	}
1226 
1227 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1228 		cancel_delayed_work(&hdev->service_cache);
1229 
1230 	cancel_delayed_work_sync(&hdev->le_scan_disable);
1231 
1232 	hci_dev_lock(hdev);
1233 	inquiry_cache_flush(hdev);
1234 	hci_conn_hash_flush(hdev);
1235 	hci_dev_unlock(hdev);
1236 
1237 	hci_notify(hdev, HCI_DEV_DOWN);
1238 
1239 	if (hdev->flush)
1240 		hdev->flush(hdev);
1241 
1242 	/* Reset device */
1243 	skb_queue_purge(&hdev->cmd_q);
1244 	atomic_set(&hdev->cmd_cnt, 1);
1245 	if (!test_bit(HCI_RAW, &hdev->flags) &&
1246 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1247 		set_bit(HCI_INIT, &hdev->flags);
1248 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1249 		clear_bit(HCI_INIT, &hdev->flags);
1250 	}
1251 
1252 	/* flush cmd  work */
1253 	flush_work(&hdev->cmd_work);
1254 
1255 	/* Drop queues */
1256 	skb_queue_purge(&hdev->rx_q);
1257 	skb_queue_purge(&hdev->cmd_q);
1258 	skb_queue_purge(&hdev->raw_q);
1259 
1260 	/* Drop last sent command */
1261 	if (hdev->sent_cmd) {
1262 		del_timer_sync(&hdev->cmd_timer);
1263 		kfree_skb(hdev->sent_cmd);
1264 		hdev->sent_cmd = NULL;
1265 	}
1266 
1267 	kfree_skb(hdev->recv_evt);
1268 	hdev->recv_evt = NULL;
1269 
1270 	/* After this point our queues are empty
1271 	 * and no tasks are scheduled. */
1272 	hdev->close(hdev);
1273 
1274 	/* Clear flags */
1275 	hdev->flags = 0;
1276 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1277 
1278 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279 	    mgmt_valid_hdev(hdev)) {
1280 		hci_dev_lock(hdev);
1281 		mgmt_powered(hdev, 0);
1282 		hci_dev_unlock(hdev);
1283 	}
1284 
1285 	/* Controller radio is available but is currently powered down */
1286 	hdev->amp_status = 0;
1287 
1288 	memset(hdev->eir, 0, sizeof(hdev->eir));
1289 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1290 
1291 	hci_req_unlock(hdev);
1292 
1293 	hci_dev_put(hdev);
1294 	return 0;
1295 }
1296 
hci_dev_close(__u16 dev)1297 int hci_dev_close(__u16 dev)
1298 {
1299 	struct hci_dev *hdev;
1300 	int err;
1301 
1302 	hdev = hci_dev_get(dev);
1303 	if (!hdev)
1304 		return -ENODEV;
1305 
1306 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307 		cancel_delayed_work(&hdev->power_off);
1308 
1309 	err = hci_dev_do_close(hdev);
1310 
1311 	hci_dev_put(hdev);
1312 	return err;
1313 }
1314 
hci_dev_reset(__u16 dev)1315 int hci_dev_reset(__u16 dev)
1316 {
1317 	struct hci_dev *hdev;
1318 	int ret = 0;
1319 
1320 	hdev = hci_dev_get(dev);
1321 	if (!hdev)
1322 		return -ENODEV;
1323 
1324 	hci_req_lock(hdev);
1325 
1326 	if (!test_bit(HCI_UP, &hdev->flags))
1327 		goto done;
1328 
1329 	/* Drop queues */
1330 	skb_queue_purge(&hdev->rx_q);
1331 	skb_queue_purge(&hdev->cmd_q);
1332 
1333 	hci_dev_lock(hdev);
1334 	inquiry_cache_flush(hdev);
1335 	hci_conn_hash_flush(hdev);
1336 	hci_dev_unlock(hdev);
1337 
1338 	if (hdev->flush)
1339 		hdev->flush(hdev);
1340 
1341 	atomic_set(&hdev->cmd_cnt, 1);
1342 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1343 
1344 	if (!test_bit(HCI_RAW, &hdev->flags))
1345 		ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1346 
1347 done:
1348 	hci_req_unlock(hdev);
1349 	hci_dev_put(hdev);
1350 	return ret;
1351 }
1352 
hci_dev_reset_stat(__u16 dev)1353 int hci_dev_reset_stat(__u16 dev)
1354 {
1355 	struct hci_dev *hdev;
1356 	int ret = 0;
1357 
1358 	hdev = hci_dev_get(dev);
1359 	if (!hdev)
1360 		return -ENODEV;
1361 
1362 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1363 
1364 	hci_dev_put(hdev);
1365 
1366 	return ret;
1367 }
1368 
hci_dev_cmd(unsigned int cmd,void __user * arg)1369 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1370 {
1371 	struct hci_dev *hdev;
1372 	struct hci_dev_req dr;
1373 	int err = 0;
1374 
1375 	if (copy_from_user(&dr, arg, sizeof(dr)))
1376 		return -EFAULT;
1377 
1378 	hdev = hci_dev_get(dr.dev_id);
1379 	if (!hdev)
1380 		return -ENODEV;
1381 
1382 	switch (cmd) {
1383 	case HCISETAUTH:
1384 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1385 				   HCI_INIT_TIMEOUT);
1386 		break;
1387 
1388 	case HCISETENCRYPT:
1389 		if (!lmp_encrypt_capable(hdev)) {
1390 			err = -EOPNOTSUPP;
1391 			break;
1392 		}
1393 
1394 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395 			/* Auth must be enabled first */
1396 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397 					   HCI_INIT_TIMEOUT);
1398 			if (err)
1399 				break;
1400 		}
1401 
1402 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1403 				   HCI_INIT_TIMEOUT);
1404 		break;
1405 
1406 	case HCISETSCAN:
1407 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1408 				   HCI_INIT_TIMEOUT);
1409 		break;
1410 
1411 	case HCISETLINKPOL:
1412 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1413 				   HCI_INIT_TIMEOUT);
1414 		break;
1415 
1416 	case HCISETLINKMODE:
1417 		hdev->link_mode = ((__u16) dr.dev_opt) &
1418 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1419 		break;
1420 
1421 	case HCISETPTYPE:
1422 		hdev->pkt_type = (__u16) dr.dev_opt;
1423 		break;
1424 
1425 	case HCISETACLMTU:
1426 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1427 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1428 		break;
1429 
1430 	case HCISETSCOMTU:
1431 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1432 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1433 		break;
1434 
1435 	default:
1436 		err = -EINVAL;
1437 		break;
1438 	}
1439 
1440 	hci_dev_put(hdev);
1441 	return err;
1442 }
1443 
hci_get_dev_list(void __user * arg)1444 int hci_get_dev_list(void __user *arg)
1445 {
1446 	struct hci_dev *hdev;
1447 	struct hci_dev_list_req *dl;
1448 	struct hci_dev_req *dr;
1449 	int n = 0, size, err;
1450 	__u16 dev_num;
1451 
1452 	if (get_user(dev_num, (__u16 __user *) arg))
1453 		return -EFAULT;
1454 
1455 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1456 		return -EINVAL;
1457 
1458 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1459 
1460 	dl = kzalloc(size, GFP_KERNEL);
1461 	if (!dl)
1462 		return -ENOMEM;
1463 
1464 	dr = dl->dev_req;
1465 
1466 	read_lock(&hci_dev_list_lock);
1467 	list_for_each_entry(hdev, &hci_dev_list, list) {
1468 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1469 			cancel_delayed_work(&hdev->power_off);
1470 
1471 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1473 
1474 		(dr + n)->dev_id  = hdev->id;
1475 		(dr + n)->dev_opt = hdev->flags;
1476 
1477 		if (++n >= dev_num)
1478 			break;
1479 	}
1480 	read_unlock(&hci_dev_list_lock);
1481 
1482 	dl->dev_num = n;
1483 	size = sizeof(*dl) + n * sizeof(*dr);
1484 
1485 	err = copy_to_user(arg, dl, size);
1486 	kfree(dl);
1487 
1488 	return err ? -EFAULT : 0;
1489 }
1490 
hci_get_dev_info(void __user * arg)1491 int hci_get_dev_info(void __user *arg)
1492 {
1493 	struct hci_dev *hdev;
1494 	struct hci_dev_info di;
1495 	int err = 0;
1496 
1497 	if (copy_from_user(&di, arg, sizeof(di)))
1498 		return -EFAULT;
1499 
1500 	hdev = hci_dev_get(di.dev_id);
1501 	if (!hdev)
1502 		return -ENODEV;
1503 
1504 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1505 		cancel_delayed_work_sync(&hdev->power_off);
1506 
1507 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1509 
1510 	strcpy(di.name, hdev->name);
1511 	di.bdaddr   = hdev->bdaddr;
1512 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1513 	di.flags    = hdev->flags;
1514 	di.pkt_type = hdev->pkt_type;
1515 	if (lmp_bredr_capable(hdev)) {
1516 		di.acl_mtu  = hdev->acl_mtu;
1517 		di.acl_pkts = hdev->acl_pkts;
1518 		di.sco_mtu  = hdev->sco_mtu;
1519 		di.sco_pkts = hdev->sco_pkts;
1520 	} else {
1521 		di.acl_mtu  = hdev->le_mtu;
1522 		di.acl_pkts = hdev->le_pkts;
1523 		di.sco_mtu  = 0;
1524 		di.sco_pkts = 0;
1525 	}
1526 	di.link_policy = hdev->link_policy;
1527 	di.link_mode   = hdev->link_mode;
1528 
1529 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1531 
1532 	if (copy_to_user(arg, &di, sizeof(di)))
1533 		err = -EFAULT;
1534 
1535 	hci_dev_put(hdev);
1536 
1537 	return err;
1538 }
1539 
1540 /* ---- Interface to HCI drivers ---- */
1541 
hci_rfkill_set_block(void * data,bool blocked)1542 static int hci_rfkill_set_block(void *data, bool blocked)
1543 {
1544 	struct hci_dev *hdev = data;
1545 
1546 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1547 
1548 	if (!blocked)
1549 		return 0;
1550 
1551 	hci_dev_do_close(hdev);
1552 
1553 	return 0;
1554 }
1555 
1556 static const struct rfkill_ops hci_rfkill_ops = {
1557 	.set_block = hci_rfkill_set_block,
1558 };
1559 
hci_power_on(struct work_struct * work)1560 static void hci_power_on(struct work_struct *work)
1561 {
1562 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1563 	int err;
1564 
1565 	BT_DBG("%s", hdev->name);
1566 
1567 	err = hci_dev_open(hdev->id);
1568 	if (err < 0) {
1569 		mgmt_set_powered_failed(hdev, err);
1570 		return;
1571 	}
1572 
1573 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1574 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1575 				   HCI_AUTO_OFF_TIMEOUT);
1576 
1577 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1578 		mgmt_index_added(hdev);
1579 }
1580 
hci_power_off(struct work_struct * work)1581 static void hci_power_off(struct work_struct *work)
1582 {
1583 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1584 					    power_off.work);
1585 
1586 	BT_DBG("%s", hdev->name);
1587 
1588 	hci_dev_do_close(hdev);
1589 }
1590 
hci_discov_off(struct work_struct * work)1591 static void hci_discov_off(struct work_struct *work)
1592 {
1593 	struct hci_dev *hdev;
1594 	u8 scan = SCAN_PAGE;
1595 
1596 	hdev = container_of(work, struct hci_dev, discov_off.work);
1597 
1598 	BT_DBG("%s", hdev->name);
1599 
1600 	hci_dev_lock(hdev);
1601 
1602 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1603 
1604 	hdev->discov_timeout = 0;
1605 
1606 	hci_dev_unlock(hdev);
1607 }
1608 
hci_uuids_clear(struct hci_dev * hdev)1609 int hci_uuids_clear(struct hci_dev *hdev)
1610 {
1611 	struct bt_uuid *uuid, *tmp;
1612 
1613 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1614 		list_del(&uuid->list);
1615 		kfree(uuid);
1616 	}
1617 
1618 	return 0;
1619 }
1620 
hci_link_keys_clear(struct hci_dev * hdev)1621 int hci_link_keys_clear(struct hci_dev *hdev)
1622 {
1623 	struct list_head *p, *n;
1624 
1625 	list_for_each_safe(p, n, &hdev->link_keys) {
1626 		struct link_key *key;
1627 
1628 		key = list_entry(p, struct link_key, list);
1629 
1630 		list_del(p);
1631 		kfree(key);
1632 	}
1633 
1634 	return 0;
1635 }
1636 
hci_smp_ltks_clear(struct hci_dev * hdev)1637 int hci_smp_ltks_clear(struct hci_dev *hdev)
1638 {
1639 	struct smp_ltk *k, *tmp;
1640 
1641 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1642 		list_del(&k->list);
1643 		kfree(k);
1644 	}
1645 
1646 	return 0;
1647 }
1648 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1649 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1650 {
1651 	struct link_key *k;
1652 
1653 	list_for_each_entry(k, &hdev->link_keys, list)
1654 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1655 			return k;
1656 
1657 	return NULL;
1658 }
1659 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1660 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1661 			       u8 key_type, u8 old_key_type)
1662 {
1663 	/* Legacy key */
1664 	if (key_type < 0x03)
1665 		return true;
1666 
1667 	/* Debug keys are insecure so don't store them persistently */
1668 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1669 		return false;
1670 
1671 	/* Changed combination key and there's no previous one */
1672 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1673 		return false;
1674 
1675 	/* Security mode 3 case */
1676 	if (!conn)
1677 		return true;
1678 
1679 	/* Neither local nor remote side had no-bonding as requirement */
1680 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1681 		return true;
1682 
1683 	/* Local side had dedicated bonding as requirement */
1684 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1685 		return true;
1686 
1687 	/* Remote side had dedicated bonding as requirement */
1688 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1689 		return true;
1690 
1691 	/* If none of the above criteria match, then don't store the key
1692 	 * persistently */
1693 	return false;
1694 }
1695 
hci_find_ltk(struct hci_dev * hdev,__le16 ediv,u8 rand[8])1696 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1697 {
1698 	struct smp_ltk *k;
1699 
1700 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1701 		if (k->ediv != ediv ||
1702 		    memcmp(rand, k->rand, sizeof(k->rand)))
1703 			continue;
1704 
1705 		return k;
1706 	}
1707 
1708 	return NULL;
1709 }
1710 
hci_find_ltk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1711 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1712 				     u8 addr_type)
1713 {
1714 	struct smp_ltk *k;
1715 
1716 	list_for_each_entry(k, &hdev->long_term_keys, list)
1717 		if (addr_type == k->bdaddr_type &&
1718 		    bacmp(bdaddr, &k->bdaddr) == 0)
1719 			return k;
1720 
1721 	return NULL;
1722 }
1723 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,int new_key,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len)1724 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1725 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1726 {
1727 	struct link_key *key, *old_key;
1728 	u8 old_key_type;
1729 	bool persistent;
1730 
1731 	old_key = hci_find_link_key(hdev, bdaddr);
1732 	if (old_key) {
1733 		old_key_type = old_key->type;
1734 		key = old_key;
1735 	} else {
1736 		old_key_type = conn ? conn->key_type : 0xff;
1737 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1738 		if (!key)
1739 			return -ENOMEM;
1740 		list_add(&key->list, &hdev->link_keys);
1741 	}
1742 
1743 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1744 
1745 	/* Some buggy controller combinations generate a changed
1746 	 * combination key for legacy pairing even when there's no
1747 	 * previous key */
1748 	if (type == HCI_LK_CHANGED_COMBINATION &&
1749 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1750 		type = HCI_LK_COMBINATION;
1751 		if (conn)
1752 			conn->key_type = type;
1753 	}
1754 
1755 	bacpy(&key->bdaddr, bdaddr);
1756 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1757 	key->pin_len = pin_len;
1758 
1759 	if (type == HCI_LK_CHANGED_COMBINATION)
1760 		key->type = old_key_type;
1761 	else
1762 		key->type = type;
1763 
1764 	if (!new_key)
1765 		return 0;
1766 
1767 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1768 
1769 	mgmt_new_link_key(hdev, key, persistent);
1770 
1771 	if (conn)
1772 		conn->flush_key = !persistent;
1773 
1774 	return 0;
1775 }
1776 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,int new_key,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,u8 rand[8])1777 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1778 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1779 		ediv, u8 rand[8])
1780 {
1781 	struct smp_ltk *key, *old_key;
1782 
1783 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1784 		return 0;
1785 
1786 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1787 	if (old_key)
1788 		key = old_key;
1789 	else {
1790 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1791 		if (!key)
1792 			return -ENOMEM;
1793 		list_add(&key->list, &hdev->long_term_keys);
1794 	}
1795 
1796 	bacpy(&key->bdaddr, bdaddr);
1797 	key->bdaddr_type = addr_type;
1798 	memcpy(key->val, tk, sizeof(key->val));
1799 	key->authenticated = authenticated;
1800 	key->ediv = ediv;
1801 	key->enc_size = enc_size;
1802 	key->type = type;
1803 	memcpy(key->rand, rand, sizeof(key->rand));
1804 
1805 	if (!new_key)
1806 		return 0;
1807 
1808 	if (type & HCI_SMP_LTK)
1809 		mgmt_new_ltk(hdev, key, 1);
1810 
1811 	return 0;
1812 }
1813 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1814 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1815 {
1816 	struct link_key *key;
1817 
1818 	key = hci_find_link_key(hdev, bdaddr);
1819 	if (!key)
1820 		return -ENOENT;
1821 
1822 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1823 
1824 	list_del(&key->list);
1825 	kfree(key);
1826 
1827 	return 0;
1828 }
1829 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr)1830 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1831 {
1832 	struct smp_ltk *k, *tmp;
1833 
1834 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1835 		if (bacmp(bdaddr, &k->bdaddr))
1836 			continue;
1837 
1838 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1839 
1840 		list_del(&k->list);
1841 		kfree(k);
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 /* HCI command timer function */
hci_cmd_timeout(unsigned long arg)1848 static void hci_cmd_timeout(unsigned long arg)
1849 {
1850 	struct hci_dev *hdev = (void *) arg;
1851 
1852 	if (hdev->sent_cmd) {
1853 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1854 		u16 opcode = __le16_to_cpu(sent->opcode);
1855 
1856 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1857 	} else {
1858 		BT_ERR("%s command tx timeout", hdev->name);
1859 	}
1860 
1861 	atomic_set(&hdev->cmd_cnt, 1);
1862 	queue_work(hdev->workqueue, &hdev->cmd_work);
1863 }
1864 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1865 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1866 					  bdaddr_t *bdaddr)
1867 {
1868 	struct oob_data *data;
1869 
1870 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1871 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1872 			return data;
1873 
1874 	return NULL;
1875 }
1876 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1877 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1878 {
1879 	struct oob_data *data;
1880 
1881 	data = hci_find_remote_oob_data(hdev, bdaddr);
1882 	if (!data)
1883 		return -ENOENT;
1884 
1885 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1886 
1887 	list_del(&data->list);
1888 	kfree(data);
1889 
1890 	return 0;
1891 }
1892 
hci_remote_oob_data_clear(struct hci_dev * hdev)1893 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1894 {
1895 	struct oob_data *data, *n;
1896 
1897 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1898 		list_del(&data->list);
1899 		kfree(data);
1900 	}
1901 
1902 	return 0;
1903 }
1904 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * hash,u8 * randomizer)1905 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1906 			    u8 *randomizer)
1907 {
1908 	struct oob_data *data;
1909 
1910 	data = hci_find_remote_oob_data(hdev, bdaddr);
1911 
1912 	if (!data) {
1913 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1914 		if (!data)
1915 			return -ENOMEM;
1916 
1917 		bacpy(&data->bdaddr, bdaddr);
1918 		list_add(&data->list, &hdev->remote_oob_data);
1919 	}
1920 
1921 	memcpy(data->hash, hash, sizeof(data->hash));
1922 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1923 
1924 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1925 
1926 	return 0;
1927 }
1928 
hci_blacklist_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1929 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1930 {
1931 	struct bdaddr_list *b;
1932 
1933 	list_for_each_entry(b, &hdev->blacklist, list)
1934 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1935 			return b;
1936 
1937 	return NULL;
1938 }
1939 
hci_blacklist_clear(struct hci_dev * hdev)1940 int hci_blacklist_clear(struct hci_dev *hdev)
1941 {
1942 	struct list_head *p, *n;
1943 
1944 	list_for_each_safe(p, n, &hdev->blacklist) {
1945 		struct bdaddr_list *b;
1946 
1947 		b = list_entry(p, struct bdaddr_list, list);
1948 
1949 		list_del(p);
1950 		kfree(b);
1951 	}
1952 
1953 	return 0;
1954 }
1955 
hci_blacklist_add(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1956 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1957 {
1958 	struct bdaddr_list *entry;
1959 
1960 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1961 		return -EBADF;
1962 
1963 	if (hci_blacklist_lookup(hdev, bdaddr))
1964 		return -EEXIST;
1965 
1966 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1967 	if (!entry)
1968 		return -ENOMEM;
1969 
1970 	bacpy(&entry->bdaddr, bdaddr);
1971 
1972 	list_add(&entry->list, &hdev->blacklist);
1973 
1974 	return mgmt_device_blocked(hdev, bdaddr, type);
1975 }
1976 
hci_blacklist_del(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1977 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1978 {
1979 	struct bdaddr_list *entry;
1980 
1981 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982 		return hci_blacklist_clear(hdev);
1983 
1984 	entry = hci_blacklist_lookup(hdev, bdaddr);
1985 	if (!entry)
1986 		return -ENOENT;
1987 
1988 	list_del(&entry->list);
1989 	kfree(entry);
1990 
1991 	return mgmt_device_unblocked(hdev, bdaddr, type);
1992 }
1993 
le_scan_param_req(struct hci_request * req,unsigned long opt)1994 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1995 {
1996 	struct le_scan_params *param =  (struct le_scan_params *) opt;
1997 	struct hci_cp_le_set_scan_param cp;
1998 
1999 	memset(&cp, 0, sizeof(cp));
2000 	cp.type = param->type;
2001 	cp.interval = cpu_to_le16(param->interval);
2002 	cp.window = cpu_to_le16(param->window);
2003 
2004 	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2005 }
2006 
le_scan_enable_req(struct hci_request * req,unsigned long opt)2007 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2008 {
2009 	struct hci_cp_le_set_scan_enable cp;
2010 
2011 	memset(&cp, 0, sizeof(cp));
2012 	cp.enable = LE_SCAN_ENABLE;
2013 	cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2014 
2015 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2016 }
2017 
hci_do_le_scan(struct hci_dev * hdev,u8 type,u16 interval,u16 window,int timeout)2018 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2019 			  u16 window, int timeout)
2020 {
2021 	long timeo = msecs_to_jiffies(3000);
2022 	struct le_scan_params param;
2023 	int err;
2024 
2025 	BT_DBG("%s", hdev->name);
2026 
2027 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2028 		return -EINPROGRESS;
2029 
2030 	param.type = type;
2031 	param.interval = interval;
2032 	param.window = window;
2033 
2034 	hci_req_lock(hdev);
2035 
2036 	err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2037 			     timeo);
2038 	if (!err)
2039 		err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2040 
2041 	hci_req_unlock(hdev);
2042 
2043 	if (err < 0)
2044 		return err;
2045 
2046 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2047 			   timeout);
2048 
2049 	return 0;
2050 }
2051 
hci_cancel_le_scan(struct hci_dev * hdev)2052 int hci_cancel_le_scan(struct hci_dev *hdev)
2053 {
2054 	BT_DBG("%s", hdev->name);
2055 
2056 	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2057 		return -EALREADY;
2058 
2059 	if (cancel_delayed_work(&hdev->le_scan_disable)) {
2060 		struct hci_cp_le_set_scan_enable cp;
2061 
2062 		/* Send HCI command to disable LE Scan */
2063 		memset(&cp, 0, sizeof(cp));
2064 		hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2065 	}
2066 
2067 	return 0;
2068 }
2069 
le_scan_disable_work(struct work_struct * work)2070 static void le_scan_disable_work(struct work_struct *work)
2071 {
2072 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2073 					    le_scan_disable.work);
2074 	struct hci_cp_le_set_scan_enable cp;
2075 
2076 	BT_DBG("%s", hdev->name);
2077 
2078 	memset(&cp, 0, sizeof(cp));
2079 
2080 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2081 }
2082 
le_scan_work(struct work_struct * work)2083 static void le_scan_work(struct work_struct *work)
2084 {
2085 	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2086 	struct le_scan_params *param = &hdev->le_scan_params;
2087 
2088 	BT_DBG("%s", hdev->name);
2089 
2090 	hci_do_le_scan(hdev, param->type, param->interval, param->window,
2091 		       param->timeout);
2092 }
2093 
hci_le_scan(struct hci_dev * hdev,u8 type,u16 interval,u16 window,int timeout)2094 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2095 		int timeout)
2096 {
2097 	struct le_scan_params *param = &hdev->le_scan_params;
2098 
2099 	BT_DBG("%s", hdev->name);
2100 
2101 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2102 		return -ENOTSUPP;
2103 
2104 	if (work_busy(&hdev->le_scan))
2105 		return -EINPROGRESS;
2106 
2107 	param->type = type;
2108 	param->interval = interval;
2109 	param->window = window;
2110 	param->timeout = timeout;
2111 
2112 	queue_work(system_long_wq, &hdev->le_scan);
2113 
2114 	return 0;
2115 }
2116 
2117 /* Alloc HCI device */
hci_alloc_dev(void)2118 struct hci_dev *hci_alloc_dev(void)
2119 {
2120 	struct hci_dev *hdev;
2121 
2122 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2123 	if (!hdev)
2124 		return NULL;
2125 
2126 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2127 	hdev->esco_type = (ESCO_HV1);
2128 	hdev->link_mode = (HCI_LM_ACCEPT);
2129 	hdev->io_capability = 0x03; /* No Input No Output */
2130 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2131 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2132 
2133 	hdev->sniff_max_interval = 800;
2134 	hdev->sniff_min_interval = 80;
2135 
2136 	mutex_init(&hdev->lock);
2137 	mutex_init(&hdev->req_lock);
2138 
2139 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2140 	INIT_LIST_HEAD(&hdev->blacklist);
2141 	INIT_LIST_HEAD(&hdev->uuids);
2142 	INIT_LIST_HEAD(&hdev->link_keys);
2143 	INIT_LIST_HEAD(&hdev->long_term_keys);
2144 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2145 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2146 
2147 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2148 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2149 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2150 	INIT_WORK(&hdev->power_on, hci_power_on);
2151 	INIT_WORK(&hdev->le_scan, le_scan_work);
2152 
2153 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2154 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2155 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2156 
2157 	skb_queue_head_init(&hdev->rx_q);
2158 	skb_queue_head_init(&hdev->cmd_q);
2159 	skb_queue_head_init(&hdev->raw_q);
2160 
2161 	init_waitqueue_head(&hdev->req_wait_q);
2162 
2163 	setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2164 
2165 	hci_init_sysfs(hdev);
2166 	discovery_init(hdev);
2167 
2168 	return hdev;
2169 }
2170 EXPORT_SYMBOL(hci_alloc_dev);
2171 
2172 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2173 void hci_free_dev(struct hci_dev *hdev)
2174 {
2175 	/* will free via device release */
2176 	put_device(&hdev->dev);
2177 }
2178 EXPORT_SYMBOL(hci_free_dev);
2179 
2180 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2181 int hci_register_dev(struct hci_dev *hdev)
2182 {
2183 	int id, error;
2184 
2185 	if (!hdev->open || !hdev->close)
2186 		return -EINVAL;
2187 
2188 	/* Do not allow HCI_AMP devices to register at index 0,
2189 	 * so the index can be used as the AMP controller ID.
2190 	 */
2191 	switch (hdev->dev_type) {
2192 	case HCI_BREDR:
2193 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2194 		break;
2195 	case HCI_AMP:
2196 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2197 		break;
2198 	default:
2199 		return -EINVAL;
2200 	}
2201 
2202 	if (id < 0)
2203 		return id;
2204 
2205 	sprintf(hdev->name, "hci%d", id);
2206 	hdev->id = id;
2207 
2208 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2209 
2210 	write_lock(&hci_dev_list_lock);
2211 	list_add(&hdev->list, &hci_dev_list);
2212 	write_unlock(&hci_dev_list_lock);
2213 
2214 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2215 					  WQ_MEM_RECLAIM, 1);
2216 	if (!hdev->workqueue) {
2217 		error = -ENOMEM;
2218 		goto err;
2219 	}
2220 
2221 	hdev->req_workqueue = alloc_workqueue(hdev->name,
2222 					      WQ_HIGHPRI | WQ_UNBOUND |
2223 					      WQ_MEM_RECLAIM, 1);
2224 	if (!hdev->req_workqueue) {
2225 		destroy_workqueue(hdev->workqueue);
2226 		error = -ENOMEM;
2227 		goto err;
2228 	}
2229 
2230 	error = hci_add_sysfs(hdev);
2231 	if (error < 0)
2232 		goto err_wqueue;
2233 
2234 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2235 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2236 				    hdev);
2237 	if (hdev->rfkill) {
2238 		if (rfkill_register(hdev->rfkill) < 0) {
2239 			rfkill_destroy(hdev->rfkill);
2240 			hdev->rfkill = NULL;
2241 		}
2242 	}
2243 
2244 	set_bit(HCI_SETUP, &hdev->dev_flags);
2245 
2246 	if (hdev->dev_type != HCI_AMP)
2247 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2248 
2249 	hci_notify(hdev, HCI_DEV_REG);
2250 	hci_dev_hold(hdev);
2251 
2252 	queue_work(hdev->req_workqueue, &hdev->power_on);
2253 
2254 	return id;
2255 
2256 err_wqueue:
2257 	destroy_workqueue(hdev->workqueue);
2258 	destroy_workqueue(hdev->req_workqueue);
2259 err:
2260 	ida_simple_remove(&hci_index_ida, hdev->id);
2261 	write_lock(&hci_dev_list_lock);
2262 	list_del(&hdev->list);
2263 	write_unlock(&hci_dev_list_lock);
2264 
2265 	return error;
2266 }
2267 EXPORT_SYMBOL(hci_register_dev);
2268 
2269 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2270 void hci_unregister_dev(struct hci_dev *hdev)
2271 {
2272 	int i, id;
2273 
2274 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2275 
2276 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2277 
2278 	id = hdev->id;
2279 
2280 	write_lock(&hci_dev_list_lock);
2281 	list_del(&hdev->list);
2282 	write_unlock(&hci_dev_list_lock);
2283 
2284 	hci_dev_do_close(hdev);
2285 
2286 	for (i = 0; i < NUM_REASSEMBLY; i++)
2287 		kfree_skb(hdev->reassembly[i]);
2288 
2289 	cancel_work_sync(&hdev->power_on);
2290 
2291 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2292 	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2293 		hci_dev_lock(hdev);
2294 		mgmt_index_removed(hdev);
2295 		hci_dev_unlock(hdev);
2296 	}
2297 
2298 	/* mgmt_index_removed should take care of emptying the
2299 	 * pending list */
2300 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2301 
2302 	hci_notify(hdev, HCI_DEV_UNREG);
2303 
2304 	if (hdev->rfkill) {
2305 		rfkill_unregister(hdev->rfkill);
2306 		rfkill_destroy(hdev->rfkill);
2307 	}
2308 
2309 	hci_del_sysfs(hdev);
2310 
2311 	destroy_workqueue(hdev->workqueue);
2312 	destroy_workqueue(hdev->req_workqueue);
2313 
2314 	hci_dev_lock(hdev);
2315 	hci_blacklist_clear(hdev);
2316 	hci_uuids_clear(hdev);
2317 	hci_link_keys_clear(hdev);
2318 	hci_smp_ltks_clear(hdev);
2319 	hci_remote_oob_data_clear(hdev);
2320 	hci_dev_unlock(hdev);
2321 
2322 	hci_dev_put(hdev);
2323 
2324 	ida_simple_remove(&hci_index_ida, id);
2325 }
2326 EXPORT_SYMBOL(hci_unregister_dev);
2327 
2328 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2329 int hci_suspend_dev(struct hci_dev *hdev)
2330 {
2331 	hci_notify(hdev, HCI_DEV_SUSPEND);
2332 	return 0;
2333 }
2334 EXPORT_SYMBOL(hci_suspend_dev);
2335 
2336 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2337 int hci_resume_dev(struct hci_dev *hdev)
2338 {
2339 	hci_notify(hdev, HCI_DEV_RESUME);
2340 	return 0;
2341 }
2342 EXPORT_SYMBOL(hci_resume_dev);
2343 
2344 /* Receive frame from HCI drivers */
hci_recv_frame(struct sk_buff * skb)2345 int hci_recv_frame(struct sk_buff *skb)
2346 {
2347 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2348 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2349 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2350 		kfree_skb(skb);
2351 		return -ENXIO;
2352 	}
2353 
2354 	/* Incoming skb */
2355 	bt_cb(skb)->incoming = 1;
2356 
2357 	/* Time stamp */
2358 	__net_timestamp(skb);
2359 
2360 	skb_queue_tail(&hdev->rx_q, skb);
2361 	queue_work(hdev->workqueue, &hdev->rx_work);
2362 
2363 	return 0;
2364 }
2365 EXPORT_SYMBOL(hci_recv_frame);
2366 
hci_reassembly(struct hci_dev * hdev,int type,void * data,int count,__u8 index)2367 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2368 			  int count, __u8 index)
2369 {
2370 	int len = 0;
2371 	int hlen = 0;
2372 	int remain = count;
2373 	struct sk_buff *skb;
2374 	struct bt_skb_cb *scb;
2375 
2376 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2377 	    index >= NUM_REASSEMBLY)
2378 		return -EILSEQ;
2379 
2380 	skb = hdev->reassembly[index];
2381 
2382 	if (!skb) {
2383 		switch (type) {
2384 		case HCI_ACLDATA_PKT:
2385 			len = HCI_MAX_FRAME_SIZE;
2386 			hlen = HCI_ACL_HDR_SIZE;
2387 			break;
2388 		case HCI_EVENT_PKT:
2389 			len = HCI_MAX_EVENT_SIZE;
2390 			hlen = HCI_EVENT_HDR_SIZE;
2391 			break;
2392 		case HCI_SCODATA_PKT:
2393 			len = HCI_MAX_SCO_SIZE;
2394 			hlen = HCI_SCO_HDR_SIZE;
2395 			break;
2396 		}
2397 
2398 		skb = bt_skb_alloc(len, GFP_ATOMIC);
2399 		if (!skb)
2400 			return -ENOMEM;
2401 
2402 		scb = (void *) skb->cb;
2403 		scb->expect = hlen;
2404 		scb->pkt_type = type;
2405 
2406 		skb->dev = (void *) hdev;
2407 		hdev->reassembly[index] = skb;
2408 	}
2409 
2410 	while (count) {
2411 		scb = (void *) skb->cb;
2412 		len = min_t(uint, scb->expect, count);
2413 
2414 		memcpy(skb_put(skb, len), data, len);
2415 
2416 		count -= len;
2417 		data += len;
2418 		scb->expect -= len;
2419 		remain = count;
2420 
2421 		switch (type) {
2422 		case HCI_EVENT_PKT:
2423 			if (skb->len == HCI_EVENT_HDR_SIZE) {
2424 				struct hci_event_hdr *h = hci_event_hdr(skb);
2425 				scb->expect = h->plen;
2426 
2427 				if (skb_tailroom(skb) < scb->expect) {
2428 					kfree_skb(skb);
2429 					hdev->reassembly[index] = NULL;
2430 					return -ENOMEM;
2431 				}
2432 			}
2433 			break;
2434 
2435 		case HCI_ACLDATA_PKT:
2436 			if (skb->len  == HCI_ACL_HDR_SIZE) {
2437 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
2438 				scb->expect = __le16_to_cpu(h->dlen);
2439 
2440 				if (skb_tailroom(skb) < scb->expect) {
2441 					kfree_skb(skb);
2442 					hdev->reassembly[index] = NULL;
2443 					return -ENOMEM;
2444 				}
2445 			}
2446 			break;
2447 
2448 		case HCI_SCODATA_PKT:
2449 			if (skb->len == HCI_SCO_HDR_SIZE) {
2450 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
2451 				scb->expect = h->dlen;
2452 
2453 				if (skb_tailroom(skb) < scb->expect) {
2454 					kfree_skb(skb);
2455 					hdev->reassembly[index] = NULL;
2456 					return -ENOMEM;
2457 				}
2458 			}
2459 			break;
2460 		}
2461 
2462 		if (scb->expect == 0) {
2463 			/* Complete frame */
2464 
2465 			bt_cb(skb)->pkt_type = type;
2466 			hci_recv_frame(skb);
2467 
2468 			hdev->reassembly[index] = NULL;
2469 			return remain;
2470 		}
2471 	}
2472 
2473 	return remain;
2474 }
2475 
hci_recv_fragment(struct hci_dev * hdev,int type,void * data,int count)2476 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2477 {
2478 	int rem = 0;
2479 
2480 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2481 		return -EILSEQ;
2482 
2483 	while (count) {
2484 		rem = hci_reassembly(hdev, type, data, count, type - 1);
2485 		if (rem < 0)
2486 			return rem;
2487 
2488 		data += (count - rem);
2489 		count = rem;
2490 	}
2491 
2492 	return rem;
2493 }
2494 EXPORT_SYMBOL(hci_recv_fragment);
2495 
2496 #define STREAM_REASSEMBLY 0
2497 
hci_recv_stream_fragment(struct hci_dev * hdev,void * data,int count)2498 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2499 {
2500 	int type;
2501 	int rem = 0;
2502 
2503 	while (count) {
2504 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2505 
2506 		if (!skb) {
2507 			struct { char type; } *pkt;
2508 
2509 			/* Start of the frame */
2510 			pkt = data;
2511 			type = pkt->type;
2512 
2513 			data++;
2514 			count--;
2515 		} else
2516 			type = bt_cb(skb)->pkt_type;
2517 
2518 		rem = hci_reassembly(hdev, type, data, count,
2519 				     STREAM_REASSEMBLY);
2520 		if (rem < 0)
2521 			return rem;
2522 
2523 		data += (count - rem);
2524 		count = rem;
2525 	}
2526 
2527 	return rem;
2528 }
2529 EXPORT_SYMBOL(hci_recv_stream_fragment);
2530 
2531 /* ---- Interface to upper protocols ---- */
2532 
hci_register_cb(struct hci_cb * cb)2533 int hci_register_cb(struct hci_cb *cb)
2534 {
2535 	BT_DBG("%p name %s", cb, cb->name);
2536 
2537 	write_lock(&hci_cb_list_lock);
2538 	list_add(&cb->list, &hci_cb_list);
2539 	write_unlock(&hci_cb_list_lock);
2540 
2541 	return 0;
2542 }
2543 EXPORT_SYMBOL(hci_register_cb);
2544 
hci_unregister_cb(struct hci_cb * cb)2545 int hci_unregister_cb(struct hci_cb *cb)
2546 {
2547 	BT_DBG("%p name %s", cb, cb->name);
2548 
2549 	write_lock(&hci_cb_list_lock);
2550 	list_del(&cb->list);
2551 	write_unlock(&hci_cb_list_lock);
2552 
2553 	return 0;
2554 }
2555 EXPORT_SYMBOL(hci_unregister_cb);
2556 
hci_send_frame(struct sk_buff * skb)2557 static int hci_send_frame(struct sk_buff *skb)
2558 {
2559 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2560 
2561 	if (!hdev) {
2562 		kfree_skb(skb);
2563 		return -ENODEV;
2564 	}
2565 
2566 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2567 
2568 	/* Time stamp */
2569 	__net_timestamp(skb);
2570 
2571 	/* Send copy to monitor */
2572 	hci_send_to_monitor(hdev, skb);
2573 
2574 	if (atomic_read(&hdev->promisc)) {
2575 		/* Send copy to the sockets */
2576 		hci_send_to_sock(hdev, skb);
2577 	}
2578 
2579 	/* Get rid of skb owner, prior to sending to the driver. */
2580 	skb_orphan(skb);
2581 
2582 	return hdev->send(skb);
2583 }
2584 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)2585 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2586 {
2587 	skb_queue_head_init(&req->cmd_q);
2588 	req->hdev = hdev;
2589 	req->err = 0;
2590 }
2591 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)2592 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2593 {
2594 	struct hci_dev *hdev = req->hdev;
2595 	struct sk_buff *skb;
2596 	unsigned long flags;
2597 
2598 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2599 
2600 	/* If an error occured during request building, remove all HCI
2601 	 * commands queued on the HCI request queue.
2602 	 */
2603 	if (req->err) {
2604 		skb_queue_purge(&req->cmd_q);
2605 		return req->err;
2606 	}
2607 
2608 	/* Do not allow empty requests */
2609 	if (skb_queue_empty(&req->cmd_q))
2610 		return -ENODATA;
2611 
2612 	skb = skb_peek_tail(&req->cmd_q);
2613 	bt_cb(skb)->req.complete = complete;
2614 
2615 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2616 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2617 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2618 
2619 	queue_work(hdev->workqueue, &hdev->cmd_work);
2620 
2621 	return 0;
2622 }
2623 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)2624 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2625 				       u32 plen, const void *param)
2626 {
2627 	int len = HCI_COMMAND_HDR_SIZE + plen;
2628 	struct hci_command_hdr *hdr;
2629 	struct sk_buff *skb;
2630 
2631 	skb = bt_skb_alloc(len, GFP_ATOMIC);
2632 	if (!skb)
2633 		return NULL;
2634 
2635 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2636 	hdr->opcode = cpu_to_le16(opcode);
2637 	hdr->plen   = plen;
2638 
2639 	if (plen)
2640 		memcpy(skb_put(skb, plen), param, plen);
2641 
2642 	BT_DBG("skb len %d", skb->len);
2643 
2644 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2645 	skb->dev = (void *) hdev;
2646 
2647 	return skb;
2648 }
2649 
2650 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)2651 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2652 		 const void *param)
2653 {
2654 	struct sk_buff *skb;
2655 
2656 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2657 
2658 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
2659 	if (!skb) {
2660 		BT_ERR("%s no memory for command", hdev->name);
2661 		return -ENOMEM;
2662 	}
2663 
2664 	/* Stand-alone HCI commands must be flaged as
2665 	 * single-command requests.
2666 	 */
2667 	bt_cb(skb)->req.start = true;
2668 
2669 	skb_queue_tail(&hdev->cmd_q, skb);
2670 	queue_work(hdev->workqueue, &hdev->cmd_work);
2671 
2672 	return 0;
2673 }
2674 
2675 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)2676 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2677 		    const void *param, u8 event)
2678 {
2679 	struct hci_dev *hdev = req->hdev;
2680 	struct sk_buff *skb;
2681 
2682 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2683 
2684 	/* If an error occured during request building, there is no point in
2685 	 * queueing the HCI command. We can simply return.
2686 	 */
2687 	if (req->err)
2688 		return;
2689 
2690 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
2691 	if (!skb) {
2692 		BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2693 		       hdev->name, opcode);
2694 		req->err = -ENOMEM;
2695 		return;
2696 	}
2697 
2698 	if (skb_queue_empty(&req->cmd_q))
2699 		bt_cb(skb)->req.start = true;
2700 
2701 	bt_cb(skb)->req.event = event;
2702 
2703 	skb_queue_tail(&req->cmd_q, skb);
2704 }
2705 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)2706 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2707 		 const void *param)
2708 {
2709 	hci_req_add_ev(req, opcode, plen, param, 0);
2710 }
2711 
2712 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)2713 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2714 {
2715 	struct hci_command_hdr *hdr;
2716 
2717 	if (!hdev->sent_cmd)
2718 		return NULL;
2719 
2720 	hdr = (void *) hdev->sent_cmd->data;
2721 
2722 	if (hdr->opcode != cpu_to_le16(opcode))
2723 		return NULL;
2724 
2725 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2726 
2727 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2728 }
2729 
2730 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)2731 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2732 {
2733 	struct hci_acl_hdr *hdr;
2734 	int len = skb->len;
2735 
2736 	skb_push(skb, HCI_ACL_HDR_SIZE);
2737 	skb_reset_transport_header(skb);
2738 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2739 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2740 	hdr->dlen   = cpu_to_le16(len);
2741 }
2742 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)2743 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2744 			  struct sk_buff *skb, __u16 flags)
2745 {
2746 	struct hci_conn *conn = chan->conn;
2747 	struct hci_dev *hdev = conn->hdev;
2748 	struct sk_buff *list;
2749 
2750 	skb->len = skb_headlen(skb);
2751 	skb->data_len = 0;
2752 
2753 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2754 
2755 	switch (hdev->dev_type) {
2756 	case HCI_BREDR:
2757 		hci_add_acl_hdr(skb, conn->handle, flags);
2758 		break;
2759 	case HCI_AMP:
2760 		hci_add_acl_hdr(skb, chan->handle, flags);
2761 		break;
2762 	default:
2763 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2764 		return;
2765 	}
2766 
2767 	list = skb_shinfo(skb)->frag_list;
2768 	if (!list) {
2769 		/* Non fragmented */
2770 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2771 
2772 		skb_queue_tail(queue, skb);
2773 	} else {
2774 		/* Fragmented */
2775 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2776 
2777 		skb_shinfo(skb)->frag_list = NULL;
2778 
2779 		/* Queue all fragments atomically */
2780 		spin_lock(&queue->lock);
2781 
2782 		__skb_queue_tail(queue, skb);
2783 
2784 		flags &= ~ACL_START;
2785 		flags |= ACL_CONT;
2786 		do {
2787 			skb = list; list = list->next;
2788 
2789 			skb->dev = (void *) hdev;
2790 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2791 			hci_add_acl_hdr(skb, conn->handle, flags);
2792 
2793 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2794 
2795 			__skb_queue_tail(queue, skb);
2796 		} while (list);
2797 
2798 		spin_unlock(&queue->lock);
2799 	}
2800 }
2801 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)2802 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2803 {
2804 	struct hci_dev *hdev = chan->conn->hdev;
2805 
2806 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2807 
2808 	skb->dev = (void *) hdev;
2809 
2810 	hci_queue_acl(chan, &chan->data_q, skb, flags);
2811 
2812 	queue_work(hdev->workqueue, &hdev->tx_work);
2813 }
2814 
2815 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)2816 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2817 {
2818 	struct hci_dev *hdev = conn->hdev;
2819 	struct hci_sco_hdr hdr;
2820 
2821 	BT_DBG("%s len %d", hdev->name, skb->len);
2822 
2823 	hdr.handle = cpu_to_le16(conn->handle);
2824 	hdr.dlen   = skb->len;
2825 
2826 	skb_push(skb, HCI_SCO_HDR_SIZE);
2827 	skb_reset_transport_header(skb);
2828 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2829 
2830 	skb->dev = (void *) hdev;
2831 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2832 
2833 	skb_queue_tail(&conn->data_q, skb);
2834 	queue_work(hdev->workqueue, &hdev->tx_work);
2835 }
2836 
2837 /* ---- HCI TX task (outgoing data) ---- */
2838 
2839 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)2840 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2841 				     int *quote)
2842 {
2843 	struct hci_conn_hash *h = &hdev->conn_hash;
2844 	struct hci_conn *conn = NULL, *c;
2845 	unsigned int num = 0, min = ~0;
2846 
2847 	/* We don't have to lock device here. Connections are always
2848 	 * added and removed with TX task disabled. */
2849 
2850 	rcu_read_lock();
2851 
2852 	list_for_each_entry_rcu(c, &h->list, list) {
2853 		if (c->type != type || skb_queue_empty(&c->data_q))
2854 			continue;
2855 
2856 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2857 			continue;
2858 
2859 		num++;
2860 
2861 		if (c->sent < min) {
2862 			min  = c->sent;
2863 			conn = c;
2864 		}
2865 
2866 		if (hci_conn_num(hdev, type) == num)
2867 			break;
2868 	}
2869 
2870 	rcu_read_unlock();
2871 
2872 	if (conn) {
2873 		int cnt, q;
2874 
2875 		switch (conn->type) {
2876 		case ACL_LINK:
2877 			cnt = hdev->acl_cnt;
2878 			break;
2879 		case SCO_LINK:
2880 		case ESCO_LINK:
2881 			cnt = hdev->sco_cnt;
2882 			break;
2883 		case LE_LINK:
2884 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2885 			break;
2886 		default:
2887 			cnt = 0;
2888 			BT_ERR("Unknown link type");
2889 		}
2890 
2891 		q = cnt / num;
2892 		*quote = q ? q : 1;
2893 	} else
2894 		*quote = 0;
2895 
2896 	BT_DBG("conn %p quote %d", conn, *quote);
2897 	return conn;
2898 }
2899 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)2900 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2901 {
2902 	struct hci_conn_hash *h = &hdev->conn_hash;
2903 	struct hci_conn *c;
2904 
2905 	BT_ERR("%s link tx timeout", hdev->name);
2906 
2907 	rcu_read_lock();
2908 
2909 	/* Kill stalled connections */
2910 	list_for_each_entry_rcu(c, &h->list, list) {
2911 		if (c->type == type && c->sent) {
2912 			BT_ERR("%s killing stalled connection %pMR",
2913 			       hdev->name, &c->dst);
2914 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2915 		}
2916 	}
2917 
2918 	rcu_read_unlock();
2919 }
2920 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)2921 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2922 				      int *quote)
2923 {
2924 	struct hci_conn_hash *h = &hdev->conn_hash;
2925 	struct hci_chan *chan = NULL;
2926 	unsigned int num = 0, min = ~0, cur_prio = 0;
2927 	struct hci_conn *conn;
2928 	int cnt, q, conn_num = 0;
2929 
2930 	BT_DBG("%s", hdev->name);
2931 
2932 	rcu_read_lock();
2933 
2934 	list_for_each_entry_rcu(conn, &h->list, list) {
2935 		struct hci_chan *tmp;
2936 
2937 		if (conn->type != type)
2938 			continue;
2939 
2940 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2941 			continue;
2942 
2943 		conn_num++;
2944 
2945 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2946 			struct sk_buff *skb;
2947 
2948 			if (skb_queue_empty(&tmp->data_q))
2949 				continue;
2950 
2951 			skb = skb_peek(&tmp->data_q);
2952 			if (skb->priority < cur_prio)
2953 				continue;
2954 
2955 			if (skb->priority > cur_prio) {
2956 				num = 0;
2957 				min = ~0;
2958 				cur_prio = skb->priority;
2959 			}
2960 
2961 			num++;
2962 
2963 			if (conn->sent < min) {
2964 				min  = conn->sent;
2965 				chan = tmp;
2966 			}
2967 		}
2968 
2969 		if (hci_conn_num(hdev, type) == conn_num)
2970 			break;
2971 	}
2972 
2973 	rcu_read_unlock();
2974 
2975 	if (!chan)
2976 		return NULL;
2977 
2978 	switch (chan->conn->type) {
2979 	case ACL_LINK:
2980 		cnt = hdev->acl_cnt;
2981 		break;
2982 	case AMP_LINK:
2983 		cnt = hdev->block_cnt;
2984 		break;
2985 	case SCO_LINK:
2986 	case ESCO_LINK:
2987 		cnt = hdev->sco_cnt;
2988 		break;
2989 	case LE_LINK:
2990 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2991 		break;
2992 	default:
2993 		cnt = 0;
2994 		BT_ERR("Unknown link type");
2995 	}
2996 
2997 	q = cnt / num;
2998 	*quote = q ? q : 1;
2999 	BT_DBG("chan %p quote %d", chan, *quote);
3000 	return chan;
3001 }
3002 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3003 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3004 {
3005 	struct hci_conn_hash *h = &hdev->conn_hash;
3006 	struct hci_conn *conn;
3007 	int num = 0;
3008 
3009 	BT_DBG("%s", hdev->name);
3010 
3011 	rcu_read_lock();
3012 
3013 	list_for_each_entry_rcu(conn, &h->list, list) {
3014 		struct hci_chan *chan;
3015 
3016 		if (conn->type != type)
3017 			continue;
3018 
3019 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3020 			continue;
3021 
3022 		num++;
3023 
3024 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3025 			struct sk_buff *skb;
3026 
3027 			if (chan->sent) {
3028 				chan->sent = 0;
3029 				continue;
3030 			}
3031 
3032 			if (skb_queue_empty(&chan->data_q))
3033 				continue;
3034 
3035 			skb = skb_peek(&chan->data_q);
3036 			if (skb->priority >= HCI_PRIO_MAX - 1)
3037 				continue;
3038 
3039 			skb->priority = HCI_PRIO_MAX - 1;
3040 
3041 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3042 			       skb->priority);
3043 		}
3044 
3045 		if (hci_conn_num(hdev, type) == num)
3046 			break;
3047 	}
3048 
3049 	rcu_read_unlock();
3050 
3051 }
3052 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3053 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3054 {
3055 	/* Calculate count of blocks used by this packet */
3056 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3057 }
3058 
__check_timeout(struct hci_dev * hdev,unsigned int cnt)3059 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3060 {
3061 	if (!test_bit(HCI_RAW, &hdev->flags)) {
3062 		/* ACL tx timeout must be longer than maximum
3063 		 * link supervision timeout (40.9 seconds) */
3064 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3065 				       HCI_ACL_TX_TIMEOUT))
3066 			hci_link_tx_to(hdev, ACL_LINK);
3067 	}
3068 }
3069 
hci_sched_acl_pkt(struct hci_dev * hdev)3070 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3071 {
3072 	unsigned int cnt = hdev->acl_cnt;
3073 	struct hci_chan *chan;
3074 	struct sk_buff *skb;
3075 	int quote;
3076 
3077 	__check_timeout(hdev, cnt);
3078 
3079 	while (hdev->acl_cnt &&
3080 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3081 		u32 priority = (skb_peek(&chan->data_q))->priority;
3082 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3083 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3084 			       skb->len, skb->priority);
3085 
3086 			/* Stop if priority has changed */
3087 			if (skb->priority < priority)
3088 				break;
3089 
3090 			skb = skb_dequeue(&chan->data_q);
3091 
3092 			hci_conn_enter_active_mode(chan->conn,
3093 						   bt_cb(skb)->force_active);
3094 
3095 			hci_send_frame(skb);
3096 			hdev->acl_last_tx = jiffies;
3097 
3098 			hdev->acl_cnt--;
3099 			chan->sent++;
3100 			chan->conn->sent++;
3101 		}
3102 	}
3103 
3104 	if (cnt != hdev->acl_cnt)
3105 		hci_prio_recalculate(hdev, ACL_LINK);
3106 }
3107 
hci_sched_acl_blk(struct hci_dev * hdev)3108 static void hci_sched_acl_blk(struct hci_dev *hdev)
3109 {
3110 	unsigned int cnt = hdev->block_cnt;
3111 	struct hci_chan *chan;
3112 	struct sk_buff *skb;
3113 	int quote;
3114 	u8 type;
3115 
3116 	__check_timeout(hdev, cnt);
3117 
3118 	BT_DBG("%s", hdev->name);
3119 
3120 	if (hdev->dev_type == HCI_AMP)
3121 		type = AMP_LINK;
3122 	else
3123 		type = ACL_LINK;
3124 
3125 	while (hdev->block_cnt > 0 &&
3126 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3127 		u32 priority = (skb_peek(&chan->data_q))->priority;
3128 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3129 			int blocks;
3130 
3131 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3132 			       skb->len, skb->priority);
3133 
3134 			/* Stop if priority has changed */
3135 			if (skb->priority < priority)
3136 				break;
3137 
3138 			skb = skb_dequeue(&chan->data_q);
3139 
3140 			blocks = __get_blocks(hdev, skb);
3141 			if (blocks > hdev->block_cnt)
3142 				return;
3143 
3144 			hci_conn_enter_active_mode(chan->conn,
3145 						   bt_cb(skb)->force_active);
3146 
3147 			hci_send_frame(skb);
3148 			hdev->acl_last_tx = jiffies;
3149 
3150 			hdev->block_cnt -= blocks;
3151 			quote -= blocks;
3152 
3153 			chan->sent += blocks;
3154 			chan->conn->sent += blocks;
3155 		}
3156 	}
3157 
3158 	if (cnt != hdev->block_cnt)
3159 		hci_prio_recalculate(hdev, type);
3160 }
3161 
hci_sched_acl(struct hci_dev * hdev)3162 static void hci_sched_acl(struct hci_dev *hdev)
3163 {
3164 	BT_DBG("%s", hdev->name);
3165 
3166 	/* No ACL link over BR/EDR controller */
3167 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3168 		return;
3169 
3170 	/* No AMP link over AMP controller */
3171 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3172 		return;
3173 
3174 	switch (hdev->flow_ctl_mode) {
3175 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3176 		hci_sched_acl_pkt(hdev);
3177 		break;
3178 
3179 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3180 		hci_sched_acl_blk(hdev);
3181 		break;
3182 	}
3183 }
3184 
3185 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3186 static void hci_sched_sco(struct hci_dev *hdev)
3187 {
3188 	struct hci_conn *conn;
3189 	struct sk_buff *skb;
3190 	int quote;
3191 
3192 	BT_DBG("%s", hdev->name);
3193 
3194 	if (!hci_conn_num(hdev, SCO_LINK))
3195 		return;
3196 
3197 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3198 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3199 			BT_DBG("skb %p len %d", skb, skb->len);
3200 			hci_send_frame(skb);
3201 
3202 			conn->sent++;
3203 			if (conn->sent == ~0)
3204 				conn->sent = 0;
3205 		}
3206 	}
3207 }
3208 
hci_sched_esco(struct hci_dev * hdev)3209 static void hci_sched_esco(struct hci_dev *hdev)
3210 {
3211 	struct hci_conn *conn;
3212 	struct sk_buff *skb;
3213 	int quote;
3214 
3215 	BT_DBG("%s", hdev->name);
3216 
3217 	if (!hci_conn_num(hdev, ESCO_LINK))
3218 		return;
3219 
3220 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3221 						     &quote))) {
3222 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3223 			BT_DBG("skb %p len %d", skb, skb->len);
3224 			hci_send_frame(skb);
3225 
3226 			conn->sent++;
3227 			if (conn->sent == ~0)
3228 				conn->sent = 0;
3229 		}
3230 	}
3231 }
3232 
hci_sched_le(struct hci_dev * hdev)3233 static void hci_sched_le(struct hci_dev *hdev)
3234 {
3235 	struct hci_chan *chan;
3236 	struct sk_buff *skb;
3237 	int quote, cnt, tmp;
3238 
3239 	BT_DBG("%s", hdev->name);
3240 
3241 	if (!hci_conn_num(hdev, LE_LINK))
3242 		return;
3243 
3244 	if (!test_bit(HCI_RAW, &hdev->flags)) {
3245 		/* LE tx timeout must be longer than maximum
3246 		 * link supervision timeout (40.9 seconds) */
3247 		if (!hdev->le_cnt && hdev->le_pkts &&
3248 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3249 			hci_link_tx_to(hdev, LE_LINK);
3250 	}
3251 
3252 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3253 	tmp = cnt;
3254 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3255 		u32 priority = (skb_peek(&chan->data_q))->priority;
3256 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3257 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3258 			       skb->len, skb->priority);
3259 
3260 			/* Stop if priority has changed */
3261 			if (skb->priority < priority)
3262 				break;
3263 
3264 			skb = skb_dequeue(&chan->data_q);
3265 
3266 			hci_send_frame(skb);
3267 			hdev->le_last_tx = jiffies;
3268 
3269 			cnt--;
3270 			chan->sent++;
3271 			chan->conn->sent++;
3272 		}
3273 	}
3274 
3275 	if (hdev->le_pkts)
3276 		hdev->le_cnt = cnt;
3277 	else
3278 		hdev->acl_cnt = cnt;
3279 
3280 	if (cnt != tmp)
3281 		hci_prio_recalculate(hdev, LE_LINK);
3282 }
3283 
hci_tx_work(struct work_struct * work)3284 static void hci_tx_work(struct work_struct *work)
3285 {
3286 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3287 	struct sk_buff *skb;
3288 
3289 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3290 	       hdev->sco_cnt, hdev->le_cnt);
3291 
3292 	/* Schedule queues and send stuff to HCI driver */
3293 
3294 	hci_sched_acl(hdev);
3295 
3296 	hci_sched_sco(hdev);
3297 
3298 	hci_sched_esco(hdev);
3299 
3300 	hci_sched_le(hdev);
3301 
3302 	/* Send next queued raw (unknown type) packet */
3303 	while ((skb = skb_dequeue(&hdev->raw_q)))
3304 		hci_send_frame(skb);
3305 }
3306 
3307 /* ----- HCI RX task (incoming data processing) ----- */
3308 
3309 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3310 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3311 {
3312 	struct hci_acl_hdr *hdr = (void *) skb->data;
3313 	struct hci_conn *conn;
3314 	__u16 handle, flags;
3315 
3316 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3317 
3318 	handle = __le16_to_cpu(hdr->handle);
3319 	flags  = hci_flags(handle);
3320 	handle = hci_handle(handle);
3321 
3322 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3323 	       handle, flags);
3324 
3325 	hdev->stat.acl_rx++;
3326 
3327 	hci_dev_lock(hdev);
3328 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3329 	hci_dev_unlock(hdev);
3330 
3331 	if (conn) {
3332 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3333 
3334 		/* Send to upper protocol */
3335 		l2cap_recv_acldata(conn, skb, flags);
3336 		return;
3337 	} else {
3338 		BT_ERR("%s ACL packet for unknown connection handle %d",
3339 		       hdev->name, handle);
3340 	}
3341 
3342 	kfree_skb(skb);
3343 }
3344 
3345 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3346 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3347 {
3348 	struct hci_sco_hdr *hdr = (void *) skb->data;
3349 	struct hci_conn *conn;
3350 	__u16 handle;
3351 
3352 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3353 
3354 	handle = __le16_to_cpu(hdr->handle);
3355 
3356 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3357 
3358 	hdev->stat.sco_rx++;
3359 
3360 	hci_dev_lock(hdev);
3361 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3362 	hci_dev_unlock(hdev);
3363 
3364 	if (conn) {
3365 		/* Send to upper protocol */
3366 		sco_recv_scodata(conn, skb);
3367 		return;
3368 	} else {
3369 		BT_ERR("%s SCO packet for unknown connection handle %d",
3370 		       hdev->name, handle);
3371 	}
3372 
3373 	kfree_skb(skb);
3374 }
3375 
hci_req_is_complete(struct hci_dev * hdev)3376 static bool hci_req_is_complete(struct hci_dev *hdev)
3377 {
3378 	struct sk_buff *skb;
3379 
3380 	skb = skb_peek(&hdev->cmd_q);
3381 	if (!skb)
3382 		return true;
3383 
3384 	return bt_cb(skb)->req.start;
3385 }
3386 
hci_resend_last(struct hci_dev * hdev)3387 static void hci_resend_last(struct hci_dev *hdev)
3388 {
3389 	struct hci_command_hdr *sent;
3390 	struct sk_buff *skb;
3391 	u16 opcode;
3392 
3393 	if (!hdev->sent_cmd)
3394 		return;
3395 
3396 	sent = (void *) hdev->sent_cmd->data;
3397 	opcode = __le16_to_cpu(sent->opcode);
3398 	if (opcode == HCI_OP_RESET)
3399 		return;
3400 
3401 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3402 	if (!skb)
3403 		return;
3404 
3405 	skb_queue_head(&hdev->cmd_q, skb);
3406 	queue_work(hdev->workqueue, &hdev->cmd_work);
3407 }
3408 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status)3409 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3410 {
3411 	hci_req_complete_t req_complete = NULL;
3412 	struct sk_buff *skb;
3413 	unsigned long flags;
3414 
3415 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3416 
3417 	/* If the completed command doesn't match the last one that was
3418 	 * sent we need to do special handling of it.
3419 	 */
3420 	if (!hci_sent_cmd_data(hdev, opcode)) {
3421 		/* Some CSR based controllers generate a spontaneous
3422 		 * reset complete event during init and any pending
3423 		 * command will never be completed. In such a case we
3424 		 * need to resend whatever was the last sent
3425 		 * command.
3426 		 */
3427 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3428 			hci_resend_last(hdev);
3429 
3430 		return;
3431 	}
3432 
3433 	/* If the command succeeded and there's still more commands in
3434 	 * this request the request is not yet complete.
3435 	 */
3436 	if (!status && !hci_req_is_complete(hdev))
3437 		return;
3438 
3439 	/* If this was the last command in a request the complete
3440 	 * callback would be found in hdev->sent_cmd instead of the
3441 	 * command queue (hdev->cmd_q).
3442 	 */
3443 	if (hdev->sent_cmd) {
3444 		req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3445 		if (req_complete)
3446 			goto call_complete;
3447 	}
3448 
3449 	/* Remove all pending commands belonging to this request */
3450 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3451 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3452 		if (bt_cb(skb)->req.start) {
3453 			__skb_queue_head(&hdev->cmd_q, skb);
3454 			break;
3455 		}
3456 
3457 		req_complete = bt_cb(skb)->req.complete;
3458 		kfree_skb(skb);
3459 	}
3460 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3461 
3462 call_complete:
3463 	if (req_complete)
3464 		req_complete(hdev, status);
3465 }
3466 
hci_rx_work(struct work_struct * work)3467 static void hci_rx_work(struct work_struct *work)
3468 {
3469 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3470 	struct sk_buff *skb;
3471 
3472 	BT_DBG("%s", hdev->name);
3473 
3474 	while ((skb = skb_dequeue(&hdev->rx_q))) {
3475 		/* Send copy to monitor */
3476 		hci_send_to_monitor(hdev, skb);
3477 
3478 		if (atomic_read(&hdev->promisc)) {
3479 			/* Send copy to the sockets */
3480 			hci_send_to_sock(hdev, skb);
3481 		}
3482 
3483 		if (test_bit(HCI_RAW, &hdev->flags)) {
3484 			kfree_skb(skb);
3485 			continue;
3486 		}
3487 
3488 		if (test_bit(HCI_INIT, &hdev->flags)) {
3489 			/* Don't process data packets in this states. */
3490 			switch (bt_cb(skb)->pkt_type) {
3491 			case HCI_ACLDATA_PKT:
3492 			case HCI_SCODATA_PKT:
3493 				kfree_skb(skb);
3494 				continue;
3495 			}
3496 		}
3497 
3498 		/* Process frame */
3499 		switch (bt_cb(skb)->pkt_type) {
3500 		case HCI_EVENT_PKT:
3501 			BT_DBG("%s Event packet", hdev->name);
3502 			hci_event_packet(hdev, skb);
3503 			break;
3504 
3505 		case HCI_ACLDATA_PKT:
3506 			BT_DBG("%s ACL data packet", hdev->name);
3507 			hci_acldata_packet(hdev, skb);
3508 			break;
3509 
3510 		case HCI_SCODATA_PKT:
3511 			BT_DBG("%s SCO data packet", hdev->name);
3512 			hci_scodata_packet(hdev, skb);
3513 			break;
3514 
3515 		default:
3516 			kfree_skb(skb);
3517 			break;
3518 		}
3519 	}
3520 }
3521 
hci_cmd_work(struct work_struct * work)3522 static void hci_cmd_work(struct work_struct *work)
3523 {
3524 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3525 	struct sk_buff *skb;
3526 
3527 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3528 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3529 
3530 	/* Send queued commands */
3531 	if (atomic_read(&hdev->cmd_cnt)) {
3532 		skb = skb_dequeue(&hdev->cmd_q);
3533 		if (!skb)
3534 			return;
3535 
3536 		kfree_skb(hdev->sent_cmd);
3537 
3538 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3539 		if (hdev->sent_cmd) {
3540 			atomic_dec(&hdev->cmd_cnt);
3541 			hci_send_frame(skb);
3542 			if (test_bit(HCI_RESET, &hdev->flags))
3543 				del_timer(&hdev->cmd_timer);
3544 			else
3545 				mod_timer(&hdev->cmd_timer,
3546 					  jiffies + HCI_CMD_TIMEOUT);
3547 		} else {
3548 			skb_queue_head(&hdev->cmd_q, skb);
3549 			queue_work(hdev->workqueue, &hdev->cmd_work);
3550 		}
3551 	}
3552 }
3553 
hci_do_inquiry(struct hci_dev * hdev,u8 length)3554 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3555 {
3556 	/* General inquiry access code (GIAC) */
3557 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3558 	struct hci_cp_inquiry cp;
3559 
3560 	BT_DBG("%s", hdev->name);
3561 
3562 	if (test_bit(HCI_INQUIRY, &hdev->flags))
3563 		return -EINPROGRESS;
3564 
3565 	inquiry_cache_flush(hdev);
3566 
3567 	memset(&cp, 0, sizeof(cp));
3568 	memcpy(&cp.lap, lap, sizeof(cp.lap));
3569 	cp.length  = length;
3570 
3571 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3572 }
3573 
hci_cancel_inquiry(struct hci_dev * hdev)3574 int hci_cancel_inquiry(struct hci_dev *hdev)
3575 {
3576 	BT_DBG("%s", hdev->name);
3577 
3578 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
3579 		return -EALREADY;
3580 
3581 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3582 }
3583 
bdaddr_to_le(u8 bdaddr_type)3584 u8 bdaddr_to_le(u8 bdaddr_type)
3585 {
3586 	switch (bdaddr_type) {
3587 	case BDADDR_LE_PUBLIC:
3588 		return ADDR_LE_DEV_PUBLIC;
3589 
3590 	default:
3591 		/* Fallback to LE Random address type */
3592 		return ADDR_LE_DEV_RANDOM;
3593 	}
3594 }
3595