• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37 
38 struct sco_param {
39 	u16 pkt_type;
40 	u16 max_latency;
41 	u8  retrans_effort;
42 };
43 
44 static const struct sco_param esco_param_cvsd[] = {
45 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
46 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
47 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
48 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
49 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
50 };
51 
52 static const struct sco_param sco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
54 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
55 };
56 
57 static const struct sco_param esco_param_msbc[] = {
58 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
59 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
60 };
61 
62 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn)63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 	struct hci_conn_params *params;
66 	struct hci_dev *hdev = conn->hdev;
67 	struct smp_irk *irk;
68 	bdaddr_t *bdaddr;
69 	u8 bdaddr_type;
70 
71 	bdaddr = &conn->dst;
72 	bdaddr_type = conn->dst_type;
73 
74 	/* Check if we need to convert to identity address */
75 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 	if (irk) {
77 		bdaddr = &irk->bdaddr;
78 		bdaddr_type = irk->addr_type;
79 	}
80 
81 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 					   bdaddr_type);
83 	if (!params || !params->explicit_connect)
84 		return;
85 
86 	/* The connection attempt was doing scan for new RPA, and is
87 	 * in scan phase. If params are not associated with any other
88 	 * autoconnect action, remove them completely. If they are, just unmark
89 	 * them as waiting for connection, by clearing explicit_connect field.
90 	 */
91 	params->explicit_connect = false;
92 
93 	list_del_init(&params->action);
94 
95 	switch (params->auto_connect) {
96 	case HCI_AUTO_CONN_EXPLICIT:
97 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98 		/* return instead of break to avoid duplicate scan update */
99 		return;
100 	case HCI_AUTO_CONN_DIRECT:
101 	case HCI_AUTO_CONN_ALWAYS:
102 		list_add(&params->action, &hdev->pend_le_conns);
103 		break;
104 	case HCI_AUTO_CONN_REPORT:
105 		list_add(&params->action, &hdev->pend_le_reports);
106 		break;
107 	default:
108 		break;
109 	}
110 
111 	hci_update_background_scan(hdev);
112 }
113 
hci_conn_cleanup(struct hci_conn * conn)114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 	struct hci_dev *hdev = conn->hdev;
117 
118 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120 
121 	hci_chan_list_flush(conn);
122 
123 	hci_conn_hash_del(hdev, conn);
124 
125 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
126 		switch (conn->setting & SCO_AIRMODE_MASK) {
127 		case SCO_AIRMODE_CVSD:
128 		case SCO_AIRMODE_TRANSP:
129 			if (hdev->notify)
130 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
131 			break;
132 		}
133 	} else {
134 		if (hdev->notify)
135 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
136 	}
137 
138 	hci_conn_del_sysfs(conn);
139 
140 	debugfs_remove_recursive(conn->debugfs);
141 
142 	hci_dev_put(hdev);
143 
144 	hci_conn_put(conn);
145 }
146 
le_scan_cleanup(struct work_struct * work)147 static void le_scan_cleanup(struct work_struct *work)
148 {
149 	struct hci_conn *conn = container_of(work, struct hci_conn,
150 					     le_scan_cleanup);
151 	struct hci_dev *hdev = conn->hdev;
152 	struct hci_conn *c = NULL;
153 
154 	BT_DBG("%s hcon %p", hdev->name, conn);
155 
156 	hci_dev_lock(hdev);
157 
158 	/* Check that the hci_conn is still around */
159 	rcu_read_lock();
160 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
161 		if (c == conn)
162 			break;
163 	}
164 	rcu_read_unlock();
165 
166 	if (c == conn) {
167 		hci_connect_le_scan_cleanup(conn);
168 		hci_conn_cleanup(conn);
169 	}
170 
171 	hci_dev_unlock(hdev);
172 	hci_dev_put(hdev);
173 	hci_conn_put(conn);
174 }
175 
hci_connect_le_scan_remove(struct hci_conn * conn)176 static void hci_connect_le_scan_remove(struct hci_conn *conn)
177 {
178 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
179 
180 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
181 	 * could deadlock with another hci_conn_del() call that's holding
182 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
183 	 * Instead, grab temporary extra references to the hci_dev and
184 	 * hci_conn and perform the necessary cleanup in a separate work
185 	 * callback.
186 	 */
187 
188 	hci_dev_hold(conn->hdev);
189 	hci_conn_get(conn);
190 
191 	/* Even though we hold a reference to the hdev, many other
192 	 * things might get cleaned up meanwhile, including the hdev's
193 	 * own workqueue, so we can't use that for scheduling.
194 	 */
195 	schedule_work(&conn->le_scan_cleanup);
196 }
197 
hci_acl_create_connection(struct hci_conn * conn)198 static void hci_acl_create_connection(struct hci_conn *conn)
199 {
200 	struct hci_dev *hdev = conn->hdev;
201 	struct inquiry_entry *ie;
202 	struct hci_cp_create_conn cp;
203 
204 	BT_DBG("hcon %p", conn);
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 	}
229 
230 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 		cp.role_switch = 0x01;
233 	else
234 		cp.role_switch = 0x00;
235 
236 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238 
hci_disconnect(struct hci_conn * conn,__u8 reason)239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241 	BT_DBG("hcon %p", conn);
242 
243 	/* When we are central of an established connection and it enters
244 	 * the disconnect timeout, then go ahead and try to read the
245 	 * current clock offset.  Processing of the result is done
246 	 * within the event handling and hci_clock_offset_evt function.
247 	 */
248 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 		struct hci_dev *hdev = conn->hdev;
251 		struct hci_cp_read_clock_offset clkoff_cp;
252 
253 		clkoff_cp.handle = cpu_to_le16(conn->handle);
254 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255 			     &clkoff_cp);
256 	}
257 
258 	return hci_abort_conn(conn, reason);
259 }
260 
hci_add_sco(struct hci_conn * conn,__u16 handle)261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263 	struct hci_dev *hdev = conn->hdev;
264 	struct hci_cp_add_sco cp;
265 
266 	BT_DBG("hcon %p", conn);
267 
268 	conn->state = BT_CONNECT;
269 	conn->out = true;
270 
271 	conn->attempt++;
272 
273 	cp.handle   = cpu_to_le16(handle);
274 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275 
276 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278 
hci_setup_sync(struct hci_conn * conn,__u16 handle)279 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
280 {
281 	struct hci_dev *hdev = conn->hdev;
282 	struct hci_cp_setup_sync_conn cp;
283 	const struct sco_param *param;
284 
285 	BT_DBG("hcon %p", conn);
286 
287 	conn->state = BT_CONNECT;
288 	conn->out = true;
289 
290 	conn->attempt++;
291 
292 	cp.handle   = cpu_to_le16(handle);
293 
294 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
295 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
296 	cp.voice_setting  = cpu_to_le16(conn->setting);
297 
298 	switch (conn->setting & SCO_AIRMODE_MASK) {
299 	case SCO_AIRMODE_TRANSP:
300 		if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
301 			return false;
302 		param = &esco_param_msbc[conn->attempt - 1];
303 		break;
304 	case SCO_AIRMODE_CVSD:
305 		if (lmp_esco_capable(conn->link)) {
306 			if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
307 				return false;
308 			param = &esco_param_cvsd[conn->attempt - 1];
309 		} else {
310 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
311 				return false;
312 			param = &sco_param_cvsd[conn->attempt - 1];
313 		}
314 		break;
315 	default:
316 		return false;
317 	}
318 
319 	cp.retrans_effort = param->retrans_effort;
320 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
321 	cp.max_latency = __cpu_to_le16(param->max_latency);
322 
323 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
324 		return false;
325 
326 	return true;
327 }
328 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)329 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
330 		      u16 to_multiplier)
331 {
332 	struct hci_dev *hdev = conn->hdev;
333 	struct hci_conn_params *params;
334 	struct hci_cp_le_conn_update cp;
335 
336 	hci_dev_lock(hdev);
337 
338 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
339 	if (params) {
340 		params->conn_min_interval = min;
341 		params->conn_max_interval = max;
342 		params->conn_latency = latency;
343 		params->supervision_timeout = to_multiplier;
344 	}
345 
346 	hci_dev_unlock(hdev);
347 
348 	memset(&cp, 0, sizeof(cp));
349 	cp.handle		= cpu_to_le16(conn->handle);
350 	cp.conn_interval_min	= cpu_to_le16(min);
351 	cp.conn_interval_max	= cpu_to_le16(max);
352 	cp.conn_latency		= cpu_to_le16(latency);
353 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
354 	cp.min_ce_len		= cpu_to_le16(0x0000);
355 	cp.max_ce_len		= cpu_to_le16(0x0000);
356 
357 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
358 
359 	if (params)
360 		return 0x01;
361 
362 	return 0x00;
363 }
364 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)365 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
366 		      __u8 ltk[16], __u8 key_size)
367 {
368 	struct hci_dev *hdev = conn->hdev;
369 	struct hci_cp_le_start_enc cp;
370 
371 	BT_DBG("hcon %p", conn);
372 
373 	memset(&cp, 0, sizeof(cp));
374 
375 	cp.handle = cpu_to_le16(conn->handle);
376 	cp.rand = rand;
377 	cp.ediv = ediv;
378 	memcpy(cp.ltk, ltk, key_size);
379 
380 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
381 }
382 
383 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)384 void hci_sco_setup(struct hci_conn *conn, __u8 status)
385 {
386 	struct hci_conn *sco = conn->link;
387 
388 	if (!sco)
389 		return;
390 
391 	BT_DBG("hcon %p", conn);
392 
393 	if (!status) {
394 		if (lmp_esco_capable(conn->hdev))
395 			hci_setup_sync(sco, conn->handle);
396 		else
397 			hci_add_sco(sco, conn->handle);
398 	} else {
399 		hci_connect_cfm(sco, status);
400 		hci_conn_del(sco);
401 	}
402 }
403 
hci_conn_timeout(struct work_struct * work)404 static void hci_conn_timeout(struct work_struct *work)
405 {
406 	struct hci_conn *conn = container_of(work, struct hci_conn,
407 					     disc_work.work);
408 	int refcnt = atomic_read(&conn->refcnt);
409 
410 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
411 
412 	WARN_ON(refcnt < 0);
413 
414 	/* FIXME: It was observed that in pairing failed scenario, refcnt
415 	 * drops below 0. Probably this is because l2cap_conn_del calls
416 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
417 	 * dropped. After that loop hci_chan_del is called which also drops
418 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
419 	 * otherwise drop it.
420 	 */
421 	if (refcnt > 0)
422 		return;
423 
424 	/* LE connections in scanning state need special handling */
425 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
426 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
427 		hci_connect_le_scan_remove(conn);
428 		return;
429 	}
430 
431 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
432 }
433 
434 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)435 static void hci_conn_idle(struct work_struct *work)
436 {
437 	struct hci_conn *conn = container_of(work, struct hci_conn,
438 					     idle_work.work);
439 	struct hci_dev *hdev = conn->hdev;
440 
441 	BT_DBG("hcon %p mode %d", conn, conn->mode);
442 
443 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
444 		return;
445 
446 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
447 		return;
448 
449 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
450 		struct hci_cp_sniff_subrate cp;
451 		cp.handle             = cpu_to_le16(conn->handle);
452 		cp.max_latency        = cpu_to_le16(0);
453 		cp.min_remote_timeout = cpu_to_le16(0);
454 		cp.min_local_timeout  = cpu_to_le16(0);
455 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
456 	}
457 
458 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
459 		struct hci_cp_sniff_mode cp;
460 		cp.handle       = cpu_to_le16(conn->handle);
461 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
462 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
463 		cp.attempt      = cpu_to_le16(4);
464 		cp.timeout      = cpu_to_le16(1);
465 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
466 	}
467 }
468 
hci_conn_auto_accept(struct work_struct * work)469 static void hci_conn_auto_accept(struct work_struct *work)
470 {
471 	struct hci_conn *conn = container_of(work, struct hci_conn,
472 					     auto_accept_work.work);
473 
474 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
475 		     &conn->dst);
476 }
477 
le_disable_advertising(struct hci_dev * hdev)478 static void le_disable_advertising(struct hci_dev *hdev)
479 {
480 	if (ext_adv_capable(hdev)) {
481 		struct hci_cp_le_set_ext_adv_enable cp;
482 
483 		cp.enable = 0x00;
484 		cp.num_of_sets = 0x00;
485 
486 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
487 			     &cp);
488 	} else {
489 		u8 enable = 0x00;
490 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
491 			     &enable);
492 	}
493 }
494 
le_conn_timeout(struct work_struct * work)495 static void le_conn_timeout(struct work_struct *work)
496 {
497 	struct hci_conn *conn = container_of(work, struct hci_conn,
498 					     le_conn_timeout.work);
499 	struct hci_dev *hdev = conn->hdev;
500 
501 	BT_DBG("");
502 
503 	/* We could end up here due to having done directed advertising,
504 	 * so clean up the state if necessary. This should however only
505 	 * happen with broken hardware or if low duty cycle was used
506 	 * (which doesn't have a timeout of its own).
507 	 */
508 	if (conn->role == HCI_ROLE_SLAVE) {
509 		/* Disable LE Advertising */
510 		le_disable_advertising(hdev);
511 		hci_dev_lock(hdev);
512 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
513 		hci_dev_unlock(hdev);
514 		return;
515 	}
516 
517 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
518 }
519 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)520 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
521 			      u8 role)
522 {
523 	struct hci_conn *conn;
524 
525 	BT_DBG("%s dst %pMR", hdev->name, dst);
526 
527 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
528 	if (!conn)
529 		return NULL;
530 
531 	bacpy(&conn->dst, dst);
532 	bacpy(&conn->src, &hdev->bdaddr);
533 	conn->hdev  = hdev;
534 	conn->type  = type;
535 	conn->role  = role;
536 	conn->mode  = HCI_CM_ACTIVE;
537 	conn->state = BT_OPEN;
538 	conn->auth_type = HCI_AT_GENERAL_BONDING;
539 	conn->io_capability = hdev->io_capability;
540 	conn->remote_auth = 0xff;
541 	conn->key_type = 0xff;
542 	conn->rssi = HCI_RSSI_INVALID;
543 	conn->tx_power = HCI_TX_POWER_INVALID;
544 	conn->max_tx_power = HCI_TX_POWER_INVALID;
545 
546 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
547 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
548 
549 	/* Set Default Authenticated payload timeout to 30s */
550 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
551 
552 	if (conn->role == HCI_ROLE_MASTER)
553 		conn->out = true;
554 
555 	switch (type) {
556 	case ACL_LINK:
557 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
558 		break;
559 	case LE_LINK:
560 		/* conn->src should reflect the local identity address */
561 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
562 		break;
563 	case SCO_LINK:
564 		if (lmp_esco_capable(hdev))
565 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
566 					(hdev->esco_type & EDR_ESCO_MASK);
567 		else
568 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
569 		break;
570 	case ESCO_LINK:
571 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
572 		break;
573 	}
574 
575 	skb_queue_head_init(&conn->data_q);
576 
577 	INIT_LIST_HEAD(&conn->chan_list);
578 
579 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
580 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
581 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
582 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
583 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
584 
585 	atomic_set(&conn->refcnt, 0);
586 
587 	hci_dev_hold(hdev);
588 
589 	hci_conn_hash_add(hdev, conn);
590 
591 	/* The SCO and eSCO connections will only be notified when their
592 	 * setup has been completed. This is different to ACL links which
593 	 * can be notified right away.
594 	 */
595 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
596 		if (hdev->notify)
597 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
598 	}
599 
600 	hci_conn_init_sysfs(conn);
601 
602 	return conn;
603 }
604 
hci_conn_del(struct hci_conn * conn)605 int hci_conn_del(struct hci_conn *conn)
606 {
607 	struct hci_dev *hdev = conn->hdev;
608 
609 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
610 
611 	cancel_delayed_work_sync(&conn->disc_work);
612 	cancel_delayed_work_sync(&conn->auto_accept_work);
613 	cancel_delayed_work_sync(&conn->idle_work);
614 
615 	if (conn->type == ACL_LINK) {
616 		struct hci_conn *sco = conn->link;
617 		if (sco)
618 			sco->link = NULL;
619 
620 		/* Unacked frames */
621 		hdev->acl_cnt += conn->sent;
622 	} else if (conn->type == LE_LINK) {
623 		cancel_delayed_work(&conn->le_conn_timeout);
624 
625 		if (hdev->le_pkts)
626 			hdev->le_cnt += conn->sent;
627 		else
628 			hdev->acl_cnt += conn->sent;
629 	} else {
630 		struct hci_conn *acl = conn->link;
631 		if (acl) {
632 			acl->link = NULL;
633 			hci_conn_drop(acl);
634 		}
635 	}
636 
637 	if (conn->amp_mgr)
638 		amp_mgr_put(conn->amp_mgr);
639 
640 	skb_queue_purge(&conn->data_q);
641 
642 	/* Remove the connection from the list and cleanup its remaining
643 	 * state. This is a separate function since for some cases like
644 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
645 	 * rest of hci_conn_del.
646 	 */
647 	hci_conn_cleanup(conn);
648 
649 	return 0;
650 }
651 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)652 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
653 {
654 	int use_src = bacmp(src, BDADDR_ANY);
655 	struct hci_dev *hdev = NULL, *d;
656 
657 	BT_DBG("%pMR -> %pMR", src, dst);
658 
659 	read_lock(&hci_dev_list_lock);
660 
661 	list_for_each_entry(d, &hci_dev_list, list) {
662 		if (!test_bit(HCI_UP, &d->flags) ||
663 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
664 		    d->dev_type != HCI_PRIMARY)
665 			continue;
666 
667 		/* Simple routing:
668 		 *   No source address - find interface with bdaddr != dst
669 		 *   Source address    - find interface with bdaddr == src
670 		 */
671 
672 		if (use_src) {
673 			bdaddr_t id_addr;
674 			u8 id_addr_type;
675 
676 			if (src_type == BDADDR_BREDR) {
677 				if (!lmp_bredr_capable(d))
678 					continue;
679 				bacpy(&id_addr, &d->bdaddr);
680 				id_addr_type = BDADDR_BREDR;
681 			} else {
682 				if (!lmp_le_capable(d))
683 					continue;
684 
685 				hci_copy_identity_address(d, &id_addr,
686 							  &id_addr_type);
687 
688 				/* Convert from HCI to three-value type */
689 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
690 					id_addr_type = BDADDR_LE_PUBLIC;
691 				else
692 					id_addr_type = BDADDR_LE_RANDOM;
693 			}
694 
695 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
696 				hdev = d; break;
697 			}
698 		} else {
699 			if (bacmp(&d->bdaddr, dst)) {
700 				hdev = d; break;
701 			}
702 		}
703 	}
704 
705 	if (hdev)
706 		hdev = hci_dev_hold(hdev);
707 
708 	read_unlock(&hci_dev_list_lock);
709 	return hdev;
710 }
711 EXPORT_SYMBOL(hci_get_route);
712 
713 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)714 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
715 {
716 	struct hci_dev *hdev = conn->hdev;
717 	struct hci_conn_params *params;
718 
719 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
720 					   conn->dst_type);
721 	if (params && params->conn) {
722 		hci_conn_drop(params->conn);
723 		hci_conn_put(params->conn);
724 		params->conn = NULL;
725 	}
726 
727 	conn->state = BT_CLOSED;
728 
729 	/* If the status indicates successful cancellation of
730 	 * the attempt (i.e. Unkown Connection Id) there's no point of
731 	 * notifying failure since we'll go back to keep trying to
732 	 * connect. The only exception is explicit connect requests
733 	 * where a timeout + cancel does indicate an actual failure.
734 	 */
735 	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
736 	    (params && params->explicit_connect))
737 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
738 				    conn->dst_type, status);
739 
740 	hci_connect_cfm(conn, status);
741 
742 	hci_conn_del(conn);
743 
744 	/* Since we may have temporarily stopped the background scanning in
745 	 * favor of connection establishment, we should restart it.
746 	 */
747 	hci_update_background_scan(hdev);
748 
749 	/* Re-enable advertising in case this was a failed connection
750 	 * attempt as a peripheral.
751 	 */
752 	hci_req_reenable_advertising(hdev);
753 }
754 
create_le_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)755 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
756 {
757 	struct hci_conn *conn;
758 
759 	hci_dev_lock(hdev);
760 
761 	conn = hci_lookup_le_connect(hdev);
762 
763 	if (!status) {
764 		hci_connect_le_scan_cleanup(conn);
765 		goto done;
766 	}
767 
768 	bt_dev_err(hdev, "request failed to create LE connection: "
769 		   "status 0x%2.2x", status);
770 
771 	if (!conn)
772 		goto done;
773 
774 	hci_le_conn_failed(conn, status);
775 
776 done:
777 	hci_dev_unlock(hdev);
778 }
779 
conn_use_rpa(struct hci_conn * conn)780 static bool conn_use_rpa(struct hci_conn *conn)
781 {
782 	struct hci_dev *hdev = conn->hdev;
783 
784 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
785 }
786 
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)787 static void set_ext_conn_params(struct hci_conn *conn,
788 				struct hci_cp_le_ext_conn_param *p)
789 {
790 	struct hci_dev *hdev = conn->hdev;
791 
792 	memset(p, 0, sizeof(*p));
793 
794 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
795 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
796 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
797 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
798 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
799 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
800 	p->min_ce_len = cpu_to_le16(0x0000);
801 	p->max_ce_len = cpu_to_le16(0x0000);
802 }
803 
hci_req_add_le_create_conn(struct hci_request * req,struct hci_conn * conn,bdaddr_t * direct_rpa)804 static void hci_req_add_le_create_conn(struct hci_request *req,
805 				       struct hci_conn *conn,
806 				       bdaddr_t *direct_rpa)
807 {
808 	struct hci_dev *hdev = conn->hdev;
809 	u8 own_addr_type;
810 
811 	/* If direct address was provided we use it instead of current
812 	 * address.
813 	 */
814 	if (direct_rpa) {
815 		if (bacmp(&req->hdev->random_addr, direct_rpa))
816 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
817 								direct_rpa);
818 
819 		/* direct address is always RPA */
820 		own_addr_type = ADDR_LE_DEV_RANDOM;
821 	} else {
822 		/* Update random address, but set require_privacy to false so
823 		 * that we never connect with an non-resolvable address.
824 		 */
825 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
826 					      &own_addr_type))
827 			return;
828 	}
829 
830 	if (use_ext_conn(hdev)) {
831 		struct hci_cp_le_ext_create_conn *cp;
832 		struct hci_cp_le_ext_conn_param *p;
833 		u8 data[sizeof(*cp) + sizeof(*p) * 3];
834 		u32 plen;
835 
836 		cp = (void *) data;
837 		p = (void *) cp->data;
838 
839 		memset(cp, 0, sizeof(*cp));
840 
841 		bacpy(&cp->peer_addr, &conn->dst);
842 		cp->peer_addr_type = conn->dst_type;
843 		cp->own_addr_type = own_addr_type;
844 
845 		plen = sizeof(*cp);
846 
847 		if (scan_1m(hdev)) {
848 			cp->phys |= LE_SCAN_PHY_1M;
849 			set_ext_conn_params(conn, p);
850 
851 			p++;
852 			plen += sizeof(*p);
853 		}
854 
855 		if (scan_2m(hdev)) {
856 			cp->phys |= LE_SCAN_PHY_2M;
857 			set_ext_conn_params(conn, p);
858 
859 			p++;
860 			plen += sizeof(*p);
861 		}
862 
863 		if (scan_coded(hdev)) {
864 			cp->phys |= LE_SCAN_PHY_CODED;
865 			set_ext_conn_params(conn, p);
866 
867 			plen += sizeof(*p);
868 		}
869 
870 		hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
871 
872 	} else {
873 		struct hci_cp_le_create_conn cp;
874 
875 		memset(&cp, 0, sizeof(cp));
876 
877 		cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
878 		cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
879 
880 		bacpy(&cp.peer_addr, &conn->dst);
881 		cp.peer_addr_type = conn->dst_type;
882 		cp.own_address_type = own_addr_type;
883 		cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
884 		cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
885 		cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
886 		cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
887 		cp.min_ce_len = cpu_to_le16(0x0000);
888 		cp.max_ce_len = cpu_to_le16(0x0000);
889 
890 		hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
891 	}
892 
893 	conn->state = BT_CONNECT;
894 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
895 }
896 
hci_req_directed_advertising(struct hci_request * req,struct hci_conn * conn)897 static void hci_req_directed_advertising(struct hci_request *req,
898 					 struct hci_conn *conn)
899 {
900 	struct hci_dev *hdev = req->hdev;
901 	u8 own_addr_type;
902 	u8 enable;
903 
904 	if (ext_adv_capable(hdev)) {
905 		struct hci_cp_le_set_ext_adv_params cp;
906 		bdaddr_t random_addr;
907 
908 		/* Set require_privacy to false so that the remote device has a
909 		 * chance of identifying us.
910 		 */
911 		if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
912 					   &own_addr_type, &random_addr) < 0)
913 			return;
914 
915 		memset(&cp, 0, sizeof(cp));
916 
917 		cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
918 		cp.own_addr_type = own_addr_type;
919 		cp.channel_map = hdev->le_adv_channel_map;
920 		cp.tx_power = HCI_TX_POWER_INVALID;
921 		cp.primary_phy = HCI_ADV_PHY_1M;
922 		cp.secondary_phy = HCI_ADV_PHY_1M;
923 		cp.handle = 0; /* Use instance 0 for directed adv */
924 		cp.own_addr_type = own_addr_type;
925 		cp.peer_addr_type = conn->dst_type;
926 		bacpy(&cp.peer_addr, &conn->dst);
927 
928 		/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
929 		 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
930 		 * does not supports advertising data when the advertising set already
931 		 * contains some, the controller shall return erroc code 'Invalid
932 		 * HCI Command Parameters(0x12).
933 		 * So it is required to remove adv set for handle 0x00. since we use
934 		 * instance 0 for directed adv.
935 		 */
936 		__hci_req_remove_ext_adv_instance(req, cp.handle);
937 
938 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
939 
940 		if (own_addr_type == ADDR_LE_DEV_RANDOM &&
941 		    bacmp(&random_addr, BDADDR_ANY) &&
942 		    bacmp(&random_addr, &hdev->random_addr)) {
943 			struct hci_cp_le_set_adv_set_rand_addr cp;
944 
945 			memset(&cp, 0, sizeof(cp));
946 
947 			cp.handle = 0;
948 			bacpy(&cp.bdaddr, &random_addr);
949 
950 			hci_req_add(req,
951 				    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
952 				    sizeof(cp), &cp);
953 		}
954 
955 		__hci_req_enable_ext_advertising(req, 0x00);
956 	} else {
957 		struct hci_cp_le_set_adv_param cp;
958 
959 		/* Clear the HCI_LE_ADV bit temporarily so that the
960 		 * hci_update_random_address knows that it's safe to go ahead
961 		 * and write a new random address. The flag will be set back on
962 		 * as soon as the SET_ADV_ENABLE HCI command completes.
963 		 */
964 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
965 
966 		/* Set require_privacy to false so that the remote device has a
967 		 * chance of identifying us.
968 		 */
969 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
970 					      &own_addr_type) < 0)
971 			return;
972 
973 		memset(&cp, 0, sizeof(cp));
974 
975 		/* Some controllers might reject command if intervals are not
976 		 * within range for undirected advertising.
977 		 * BCM20702A0 is known to be affected by this.
978 		 */
979 		cp.min_interval = cpu_to_le16(0x0020);
980 		cp.max_interval = cpu_to_le16(0x0020);
981 
982 		cp.type = LE_ADV_DIRECT_IND;
983 		cp.own_address_type = own_addr_type;
984 		cp.direct_addr_type = conn->dst_type;
985 		bacpy(&cp.direct_addr, &conn->dst);
986 		cp.channel_map = hdev->le_adv_channel_map;
987 
988 		hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
989 
990 		enable = 0x01;
991 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
992 			    &enable);
993 	}
994 
995 	conn->state = BT_CONNECT;
996 }
997 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,u8 role,bdaddr_t * direct_rpa)998 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
999 				u8 dst_type, u8 sec_level, u16 conn_timeout,
1000 				u8 role, bdaddr_t *direct_rpa)
1001 {
1002 	struct hci_conn_params *params;
1003 	struct hci_conn *conn;
1004 	struct smp_irk *irk;
1005 	struct hci_request req;
1006 	int err;
1007 
1008 	/* This ensures that during disable le_scan address resolution
1009 	 * will not be disabled if it is followed by le_create_conn
1010 	 */
1011 	bool rpa_le_conn = true;
1012 
1013 	/* Let's make sure that le is enabled.*/
1014 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1015 		if (lmp_le_capable(hdev))
1016 			return ERR_PTR(-ECONNREFUSED);
1017 
1018 		return ERR_PTR(-EOPNOTSUPP);
1019 	}
1020 
1021 	/* Since the controller supports only one LE connection attempt at a
1022 	 * time, we return -EBUSY if there is any connection attempt running.
1023 	 */
1024 	if (hci_lookup_le_connect(hdev))
1025 		return ERR_PTR(-EBUSY);
1026 
1027 	/* If there's already a connection object but it's not in
1028 	 * scanning state it means it must already be established, in
1029 	 * which case we can't do anything else except report a failure
1030 	 * to connect.
1031 	 */
1032 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1033 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1034 		return ERR_PTR(-EBUSY);
1035 	}
1036 
1037 	/* When given an identity address with existing identity
1038 	 * resolving key, the connection needs to be established
1039 	 * to a resolvable random address.
1040 	 *
1041 	 * Storing the resolvable random address is required here
1042 	 * to handle connection failures. The address will later
1043 	 * be resolved back into the original identity address
1044 	 * from the connect request.
1045 	 */
1046 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1047 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1048 		dst = &irk->rpa;
1049 		dst_type = ADDR_LE_DEV_RANDOM;
1050 	}
1051 
1052 	if (conn) {
1053 		bacpy(&conn->dst, dst);
1054 	} else {
1055 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1056 		if (!conn)
1057 			return ERR_PTR(-ENOMEM);
1058 		hci_conn_hold(conn);
1059 		conn->pending_sec_level = sec_level;
1060 	}
1061 
1062 	conn->dst_type = dst_type;
1063 	conn->sec_level = BT_SECURITY_LOW;
1064 	conn->conn_timeout = conn_timeout;
1065 
1066 	hci_req_init(&req, hdev);
1067 
1068 	/* Disable advertising if we're active. For central role
1069 	 * connections most controllers will refuse to connect if
1070 	 * advertising is enabled, and for peripheral role connections we
1071 	 * anyway have to disable it in order to start directed
1072 	 * advertising.
1073 	 */
1074 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1075 		 __hci_req_disable_advertising(&req);
1076 
1077 	/* If requested to connect as peripheral use directed advertising */
1078 	if (conn->role == HCI_ROLE_SLAVE) {
1079 		/* If we're active scanning most controllers are unable
1080 		 * to initiate advertising. Simply reject the attempt.
1081 		 */
1082 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1083 		    hdev->le_scan_type == LE_SCAN_ACTIVE) {
1084 			hci_req_purge(&req);
1085 			hci_conn_del(conn);
1086 			return ERR_PTR(-EBUSY);
1087 		}
1088 
1089 		hci_req_directed_advertising(&req, conn);
1090 		goto create_conn;
1091 	}
1092 
1093 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1094 	if (params) {
1095 		conn->le_conn_min_interval = params->conn_min_interval;
1096 		conn->le_conn_max_interval = params->conn_max_interval;
1097 		conn->le_conn_latency = params->conn_latency;
1098 		conn->le_supv_timeout = params->supervision_timeout;
1099 	} else {
1100 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
1101 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
1102 		conn->le_conn_latency = hdev->le_conn_latency;
1103 		conn->le_supv_timeout = hdev->le_supv_timeout;
1104 	}
1105 
1106 	/* If controller is scanning, we stop it since some controllers are
1107 	 * not able to scan and connect at the same time. Also set the
1108 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1109 	 * handler for scan disabling knows to set the correct discovery
1110 	 * state.
1111 	 */
1112 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1113 		hci_req_add_le_scan_disable(&req, rpa_le_conn);
1114 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1115 	}
1116 
1117 	hci_req_add_le_create_conn(&req, conn, direct_rpa);
1118 
1119 create_conn:
1120 	err = hci_req_run(&req, create_le_conn_complete);
1121 	if (err) {
1122 		hci_conn_del(conn);
1123 		return ERR_PTR(err);
1124 	}
1125 
1126 	return conn;
1127 }
1128 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1129 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1130 {
1131 	struct hci_conn *conn;
1132 
1133 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1134 	if (!conn)
1135 		return false;
1136 
1137 	if (conn->state != BT_CONNECTED)
1138 		return false;
1139 
1140 	return true;
1141 }
1142 
1143 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1144 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1145 					bdaddr_t *addr, u8 addr_type)
1146 {
1147 	struct hci_conn_params *params;
1148 
1149 	if (is_connected(hdev, addr, addr_type))
1150 		return -EISCONN;
1151 
1152 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1153 	if (!params) {
1154 		params = hci_conn_params_add(hdev, addr, addr_type);
1155 		if (!params)
1156 			return -ENOMEM;
1157 
1158 		/* If we created new params, mark them to be deleted in
1159 		 * hci_connect_le_scan_cleanup. It's different case than
1160 		 * existing disabled params, those will stay after cleanup.
1161 		 */
1162 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1163 	}
1164 
1165 	/* We're trying to connect, so make sure params are at pend_le_conns */
1166 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1167 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1168 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1169 		list_del_init(&params->action);
1170 		list_add(&params->action, &hdev->pend_le_conns);
1171 	}
1172 
1173 	params->explicit_connect = true;
1174 
1175 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1176 	       params->auto_connect);
1177 
1178 	return 0;
1179 }
1180 
1181 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1182 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1183 				     u8 dst_type, u8 sec_level,
1184 				     u16 conn_timeout,
1185 				     enum conn_reasons conn_reason)
1186 {
1187 	struct hci_conn *conn;
1188 
1189 	/* Let's make sure that le is enabled.*/
1190 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1191 		if (lmp_le_capable(hdev))
1192 			return ERR_PTR(-ECONNREFUSED);
1193 
1194 		return ERR_PTR(-EOPNOTSUPP);
1195 	}
1196 
1197 	/* Some devices send ATT messages as soon as the physical link is
1198 	 * established. To be able to handle these ATT messages, the user-
1199 	 * space first establishes the connection and then starts the pairing
1200 	 * process.
1201 	 *
1202 	 * So if a hci_conn object already exists for the following connection
1203 	 * attempt, we simply update pending_sec_level and auth_type fields
1204 	 * and return the object found.
1205 	 */
1206 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1207 	if (conn) {
1208 		if (conn->pending_sec_level < sec_level)
1209 			conn->pending_sec_level = sec_level;
1210 		goto done;
1211 	}
1212 
1213 	BT_DBG("requesting refresh of dst_addr");
1214 
1215 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1216 	if (!conn)
1217 		return ERR_PTR(-ENOMEM);
1218 
1219 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1220 		hci_conn_del(conn);
1221 		return ERR_PTR(-EBUSY);
1222 	}
1223 
1224 	conn->state = BT_CONNECT;
1225 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1226 	conn->dst_type = dst_type;
1227 	conn->sec_level = BT_SECURITY_LOW;
1228 	conn->pending_sec_level = sec_level;
1229 	conn->conn_timeout = conn_timeout;
1230 	conn->conn_reason = conn_reason;
1231 
1232 	hci_update_background_scan(hdev);
1233 
1234 done:
1235 	hci_conn_hold(conn);
1236 	return conn;
1237 }
1238 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1239 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1240 				 u8 sec_level, u8 auth_type,
1241 				 enum conn_reasons conn_reason)
1242 {
1243 	struct hci_conn *acl;
1244 
1245 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1246 		if (lmp_bredr_capable(hdev))
1247 			return ERR_PTR(-ECONNREFUSED);
1248 
1249 		return ERR_PTR(-EOPNOTSUPP);
1250 	}
1251 
1252 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1253 	if (!acl) {
1254 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1255 		if (!acl)
1256 			return ERR_PTR(-ENOMEM);
1257 	}
1258 
1259 	hci_conn_hold(acl);
1260 
1261 	acl->conn_reason = conn_reason;
1262 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1263 		acl->sec_level = BT_SECURITY_LOW;
1264 		acl->pending_sec_level = sec_level;
1265 		acl->auth_type = auth_type;
1266 		hci_acl_create_connection(acl);
1267 	}
1268 
1269 	return acl;
1270 }
1271 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting)1272 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1273 				 __u16 setting)
1274 {
1275 	struct hci_conn *acl;
1276 	struct hci_conn *sco;
1277 
1278 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1279 			      CONN_REASON_SCO_CONNECT);
1280 	if (IS_ERR(acl))
1281 		return acl;
1282 
1283 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1284 	if (!sco) {
1285 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1286 		if (!sco) {
1287 			hci_conn_drop(acl);
1288 			return ERR_PTR(-ENOMEM);
1289 		}
1290 	}
1291 
1292 	acl->link = sco;
1293 	sco->link = acl;
1294 
1295 	hci_conn_hold(sco);
1296 
1297 	sco->setting = setting;
1298 
1299 	if (acl->state == BT_CONNECTED &&
1300 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1301 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1302 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1303 
1304 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1305 			/* defer SCO setup until mode change completed */
1306 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1307 			return sco;
1308 		}
1309 
1310 		hci_sco_setup(acl, 0x00);
1311 	}
1312 
1313 	return sco;
1314 }
1315 
1316 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)1317 int hci_conn_check_link_mode(struct hci_conn *conn)
1318 {
1319 	BT_DBG("hcon %p", conn);
1320 
1321 	/* In Secure Connections Only mode, it is required that Secure
1322 	 * Connections is used and the link is encrypted with AES-CCM
1323 	 * using a P-256 authenticated combination key.
1324 	 */
1325 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1326 		if (!hci_conn_sc_enabled(conn) ||
1327 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1328 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1329 			return 0;
1330 	}
1331 
1332 	 /* AES encryption is required for Level 4:
1333 	  *
1334 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
1335 	  * page 1319:
1336 	  *
1337 	  * 128-bit equivalent strength for link and encryption keys
1338 	  * required using FIPS approved algorithms (E0 not allowed,
1339 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
1340 	  * not shortened)
1341 	  */
1342 	if (conn->sec_level == BT_SECURITY_FIPS &&
1343 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1344 		bt_dev_err(conn->hdev,
1345 			   "Invalid security: Missing AES-CCM usage");
1346 		return 0;
1347 	}
1348 
1349 	if (hci_conn_ssp_enabled(conn) &&
1350 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1351 		return 0;
1352 
1353 	return 1;
1354 }
1355 
1356 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)1357 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1358 {
1359 	BT_DBG("hcon %p", conn);
1360 
1361 	if (conn->pending_sec_level > sec_level)
1362 		sec_level = conn->pending_sec_level;
1363 
1364 	if (sec_level > conn->sec_level)
1365 		conn->pending_sec_level = sec_level;
1366 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1367 		return 1;
1368 
1369 	/* Make sure we preserve an existing MITM requirement*/
1370 	auth_type |= (conn->auth_type & 0x01);
1371 
1372 	conn->auth_type = auth_type;
1373 
1374 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1375 		struct hci_cp_auth_requested cp;
1376 
1377 		cp.handle = cpu_to_le16(conn->handle);
1378 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1379 			     sizeof(cp), &cp);
1380 
1381 		/* If we're already encrypted set the REAUTH_PEND flag,
1382 		 * otherwise set the ENCRYPT_PEND.
1383 		 */
1384 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1385 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1386 		else
1387 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)1394 static void hci_conn_encrypt(struct hci_conn *conn)
1395 {
1396 	BT_DBG("hcon %p", conn);
1397 
1398 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1399 		struct hci_cp_set_conn_encrypt cp;
1400 		cp.handle  = cpu_to_le16(conn->handle);
1401 		cp.encrypt = 0x01;
1402 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1403 			     &cp);
1404 	}
1405 }
1406 
1407 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)1408 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1409 		      bool initiator)
1410 {
1411 	BT_DBG("hcon %p", conn);
1412 
1413 	if (conn->type == LE_LINK)
1414 		return smp_conn_security(conn, sec_level);
1415 
1416 	/* For sdp we don't need the link key. */
1417 	if (sec_level == BT_SECURITY_SDP)
1418 		return 1;
1419 
1420 	/* For non 2.1 devices and low security level we don't need the link
1421 	   key. */
1422 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1423 		return 1;
1424 
1425 	/* For other security levels we need the link key. */
1426 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1427 		goto auth;
1428 
1429 	/* An authenticated FIPS approved combination key has sufficient
1430 	 * security for security level 4. */
1431 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1432 	    sec_level == BT_SECURITY_FIPS)
1433 		goto encrypt;
1434 
1435 	/* An authenticated combination key has sufficient security for
1436 	   security level 3. */
1437 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1438 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1439 	    sec_level == BT_SECURITY_HIGH)
1440 		goto encrypt;
1441 
1442 	/* An unauthenticated combination key has sufficient security for
1443 	   security level 1 and 2. */
1444 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1445 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1446 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1447 		goto encrypt;
1448 
1449 	/* A combination key has always sufficient security for the security
1450 	   levels 1 or 2. High security level requires the combination key
1451 	   is generated using maximum PIN code length (16).
1452 	   For pre 2.1 units. */
1453 	if (conn->key_type == HCI_LK_COMBINATION &&
1454 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1455 	     conn->pin_length == 16))
1456 		goto encrypt;
1457 
1458 auth:
1459 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1460 		return 0;
1461 
1462 	if (initiator)
1463 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1464 
1465 	if (!hci_conn_auth(conn, sec_level, auth_type))
1466 		return 0;
1467 
1468 encrypt:
1469 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1470 		/* Ensure that the encryption key size has been read,
1471 		 * otherwise stall the upper layer responses.
1472 		 */
1473 		if (!conn->enc_key_size)
1474 			return 0;
1475 
1476 		/* Nothing else needed, all requirements are met */
1477 		return 1;
1478 	}
1479 
1480 	hci_conn_encrypt(conn);
1481 	return 0;
1482 }
1483 EXPORT_SYMBOL(hci_conn_security);
1484 
1485 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)1486 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1487 {
1488 	BT_DBG("hcon %p", conn);
1489 
1490 	/* Accept if non-secure or higher security level is required */
1491 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1492 		return 1;
1493 
1494 	/* Accept if secure or higher security level is already present */
1495 	if (conn->sec_level == BT_SECURITY_HIGH ||
1496 	    conn->sec_level == BT_SECURITY_FIPS)
1497 		return 1;
1498 
1499 	/* Reject not secure link */
1500 	return 0;
1501 }
1502 EXPORT_SYMBOL(hci_conn_check_secure);
1503 
1504 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)1505 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1506 {
1507 	BT_DBG("hcon %p", conn);
1508 
1509 	if (role == conn->role)
1510 		return 1;
1511 
1512 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1513 		struct hci_cp_switch_role cp;
1514 		bacpy(&cp.bdaddr, &conn->dst);
1515 		cp.role = role;
1516 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1517 	}
1518 
1519 	return 0;
1520 }
1521 EXPORT_SYMBOL(hci_conn_switch_role);
1522 
1523 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)1524 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1525 {
1526 	struct hci_dev *hdev = conn->hdev;
1527 
1528 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1529 
1530 	if (conn->mode != HCI_CM_SNIFF)
1531 		goto timer;
1532 
1533 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1534 		goto timer;
1535 
1536 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1537 		struct hci_cp_exit_sniff_mode cp;
1538 		cp.handle = cpu_to_le16(conn->handle);
1539 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1540 	}
1541 
1542 timer:
1543 	if (hdev->idle_timeout > 0)
1544 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1545 				   msecs_to_jiffies(hdev->idle_timeout));
1546 }
1547 
1548 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)1549 void hci_conn_hash_flush(struct hci_dev *hdev)
1550 {
1551 	struct hci_conn_hash *h = &hdev->conn_hash;
1552 	struct hci_conn *c, *n;
1553 
1554 	BT_DBG("hdev %s", hdev->name);
1555 
1556 	list_for_each_entry_safe(c, n, &h->list, list) {
1557 		c->state = BT_CLOSED;
1558 
1559 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1560 		hci_conn_del(c);
1561 	}
1562 }
1563 
1564 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)1565 void hci_conn_check_pending(struct hci_dev *hdev)
1566 {
1567 	struct hci_conn *conn;
1568 
1569 	BT_DBG("hdev %s", hdev->name);
1570 
1571 	hci_dev_lock(hdev);
1572 
1573 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1574 	if (conn)
1575 		hci_acl_create_connection(conn);
1576 
1577 	hci_dev_unlock(hdev);
1578 }
1579 
get_link_mode(struct hci_conn * conn)1580 static u32 get_link_mode(struct hci_conn *conn)
1581 {
1582 	u32 link_mode = 0;
1583 
1584 	if (conn->role == HCI_ROLE_MASTER)
1585 		link_mode |= HCI_LM_MASTER;
1586 
1587 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1588 		link_mode |= HCI_LM_ENCRYPT;
1589 
1590 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1591 		link_mode |= HCI_LM_AUTH;
1592 
1593 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1594 		link_mode |= HCI_LM_SECURE;
1595 
1596 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1597 		link_mode |= HCI_LM_FIPS;
1598 
1599 	return link_mode;
1600 }
1601 
hci_get_conn_list(void __user * arg)1602 int hci_get_conn_list(void __user *arg)
1603 {
1604 	struct hci_conn *c;
1605 	struct hci_conn_list_req req, *cl;
1606 	struct hci_conn_info *ci;
1607 	struct hci_dev *hdev;
1608 	int n = 0, size, err;
1609 
1610 	if (copy_from_user(&req, arg, sizeof(req)))
1611 		return -EFAULT;
1612 
1613 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1614 		return -EINVAL;
1615 
1616 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1617 
1618 	cl = kmalloc(size, GFP_KERNEL);
1619 	if (!cl)
1620 		return -ENOMEM;
1621 
1622 	hdev = hci_dev_get(req.dev_id);
1623 	if (!hdev) {
1624 		kfree(cl);
1625 		return -ENODEV;
1626 	}
1627 
1628 	ci = cl->conn_info;
1629 
1630 	hci_dev_lock(hdev);
1631 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1632 		bacpy(&(ci + n)->bdaddr, &c->dst);
1633 		(ci + n)->handle = c->handle;
1634 		(ci + n)->type  = c->type;
1635 		(ci + n)->out   = c->out;
1636 		(ci + n)->state = c->state;
1637 		(ci + n)->link_mode = get_link_mode(c);
1638 		if (++n >= req.conn_num)
1639 			break;
1640 	}
1641 	hci_dev_unlock(hdev);
1642 
1643 	cl->dev_id = hdev->id;
1644 	cl->conn_num = n;
1645 	size = sizeof(req) + n * sizeof(*ci);
1646 
1647 	hci_dev_put(hdev);
1648 
1649 	err = copy_to_user(arg, cl, size);
1650 	kfree(cl);
1651 
1652 	return err ? -EFAULT : 0;
1653 }
1654 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)1655 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1656 {
1657 	struct hci_conn_info_req req;
1658 	struct hci_conn_info ci;
1659 	struct hci_conn *conn;
1660 	char __user *ptr = arg + sizeof(req);
1661 
1662 	if (copy_from_user(&req, arg, sizeof(req)))
1663 		return -EFAULT;
1664 
1665 	hci_dev_lock(hdev);
1666 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1667 	if (conn) {
1668 		bacpy(&ci.bdaddr, &conn->dst);
1669 		ci.handle = conn->handle;
1670 		ci.type  = conn->type;
1671 		ci.out   = conn->out;
1672 		ci.state = conn->state;
1673 		ci.link_mode = get_link_mode(conn);
1674 	}
1675 	hci_dev_unlock(hdev);
1676 
1677 	if (!conn)
1678 		return -ENOENT;
1679 
1680 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1681 }
1682 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)1683 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1684 {
1685 	struct hci_auth_info_req req;
1686 	struct hci_conn *conn;
1687 
1688 	if (copy_from_user(&req, arg, sizeof(req)))
1689 		return -EFAULT;
1690 
1691 	hci_dev_lock(hdev);
1692 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1693 	if (conn)
1694 		req.type = conn->auth_type;
1695 	hci_dev_unlock(hdev);
1696 
1697 	if (!conn)
1698 		return -ENOENT;
1699 
1700 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1701 }
1702 
hci_chan_create(struct hci_conn * conn)1703 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1704 {
1705 	struct hci_dev *hdev = conn->hdev;
1706 	struct hci_chan *chan;
1707 
1708 	BT_DBG("%s hcon %p", hdev->name, conn);
1709 
1710 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1711 		BT_DBG("Refusing to create new hci_chan");
1712 		return NULL;
1713 	}
1714 
1715 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1716 	if (!chan)
1717 		return NULL;
1718 
1719 	chan->conn = hci_conn_get(conn);
1720 	skb_queue_head_init(&chan->data_q);
1721 	chan->state = BT_CONNECTED;
1722 
1723 	list_add_rcu(&chan->list, &conn->chan_list);
1724 
1725 	return chan;
1726 }
1727 
hci_chan_del(struct hci_chan * chan)1728 void hci_chan_del(struct hci_chan *chan)
1729 {
1730 	struct hci_conn *conn = chan->conn;
1731 	struct hci_dev *hdev = conn->hdev;
1732 
1733 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1734 
1735 	list_del_rcu(&chan->list);
1736 
1737 	synchronize_rcu();
1738 
1739 	/* Prevent new hci_chan's to be created for this hci_conn */
1740 	set_bit(HCI_CONN_DROP, &conn->flags);
1741 
1742 	hci_conn_put(conn);
1743 
1744 	skb_queue_purge(&chan->data_q);
1745 	kfree(chan);
1746 }
1747 
hci_chan_list_flush(struct hci_conn * conn)1748 void hci_chan_list_flush(struct hci_conn *conn)
1749 {
1750 	struct hci_chan *chan, *n;
1751 
1752 	BT_DBG("hcon %p", conn);
1753 
1754 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1755 		hci_chan_del(chan);
1756 }
1757 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)1758 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1759 						 __u16 handle)
1760 {
1761 	struct hci_chan *hchan;
1762 
1763 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1764 		if (hchan->handle == handle)
1765 			return hchan;
1766 	}
1767 
1768 	return NULL;
1769 }
1770 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)1771 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1772 {
1773 	struct hci_conn_hash *h = &hdev->conn_hash;
1774 	struct hci_conn *hcon;
1775 	struct hci_chan *hchan = NULL;
1776 
1777 	rcu_read_lock();
1778 
1779 	list_for_each_entry_rcu(hcon, &h->list, list) {
1780 		hchan = __hci_chan_lookup_handle(hcon, handle);
1781 		if (hchan)
1782 			break;
1783 	}
1784 
1785 	rcu_read_unlock();
1786 
1787 	return hchan;
1788 }
1789 
hci_conn_get_phy(struct hci_conn * conn)1790 u32 hci_conn_get_phy(struct hci_conn *conn)
1791 {
1792 	u32 phys = 0;
1793 
1794 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
1795 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
1796 	 * CSB logical transport types.
1797 	 */
1798 	switch (conn->type) {
1799 	case SCO_LINK:
1800 		/* SCO logical transport (1 Mb/s):
1801 		 * HV1, HV2, HV3 and DV.
1802 		 */
1803 		phys |= BT_PHY_BR_1M_1SLOT;
1804 
1805 		break;
1806 
1807 	case ACL_LINK:
1808 		/* ACL logical transport (1 Mb/s) ptt=0:
1809 		 * DH1, DM3, DH3, DM5 and DH5.
1810 		 */
1811 		phys |= BT_PHY_BR_1M_1SLOT;
1812 
1813 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1814 			phys |= BT_PHY_BR_1M_3SLOT;
1815 
1816 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1817 			phys |= BT_PHY_BR_1M_5SLOT;
1818 
1819 		/* ACL logical transport (2 Mb/s) ptt=1:
1820 		 * 2-DH1, 2-DH3 and 2-DH5.
1821 		 */
1822 		if (!(conn->pkt_type & HCI_2DH1))
1823 			phys |= BT_PHY_EDR_2M_1SLOT;
1824 
1825 		if (!(conn->pkt_type & HCI_2DH3))
1826 			phys |= BT_PHY_EDR_2M_3SLOT;
1827 
1828 		if (!(conn->pkt_type & HCI_2DH5))
1829 			phys |= BT_PHY_EDR_2M_5SLOT;
1830 
1831 		/* ACL logical transport (3 Mb/s) ptt=1:
1832 		 * 3-DH1, 3-DH3 and 3-DH5.
1833 		 */
1834 		if (!(conn->pkt_type & HCI_3DH1))
1835 			phys |= BT_PHY_EDR_3M_1SLOT;
1836 
1837 		if (!(conn->pkt_type & HCI_3DH3))
1838 			phys |= BT_PHY_EDR_3M_3SLOT;
1839 
1840 		if (!(conn->pkt_type & HCI_3DH5))
1841 			phys |= BT_PHY_EDR_3M_5SLOT;
1842 
1843 		break;
1844 
1845 	case ESCO_LINK:
1846 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
1847 		phys |= BT_PHY_BR_1M_1SLOT;
1848 
1849 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1850 			phys |= BT_PHY_BR_1M_3SLOT;
1851 
1852 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
1853 		if (!(conn->pkt_type & ESCO_2EV3))
1854 			phys |= BT_PHY_EDR_2M_1SLOT;
1855 
1856 		if (!(conn->pkt_type & ESCO_2EV5))
1857 			phys |= BT_PHY_EDR_2M_3SLOT;
1858 
1859 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
1860 		if (!(conn->pkt_type & ESCO_3EV3))
1861 			phys |= BT_PHY_EDR_3M_1SLOT;
1862 
1863 		if (!(conn->pkt_type & ESCO_3EV5))
1864 			phys |= BT_PHY_EDR_3M_3SLOT;
1865 
1866 		break;
1867 
1868 	case LE_LINK:
1869 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1870 			phys |= BT_PHY_LE_1M_TX;
1871 
1872 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1873 			phys |= BT_PHY_LE_1M_RX;
1874 
1875 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1876 			phys |= BT_PHY_LE_2M_TX;
1877 
1878 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1879 			phys |= BT_PHY_LE_2M_RX;
1880 
1881 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1882 			phys |= BT_PHY_LE_CODED_TX;
1883 
1884 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1885 			phys |= BT_PHY_LE_CODED_RX;
1886 
1887 		break;
1888 	}
1889 
1890 	return phys;
1891 }
1892