• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37 
38 struct sco_param {
39 	u16 pkt_type;
40 	u16 max_latency;
41 	u8  retrans_effort;
42 };
43 
44 static const struct sco_param esco_param_cvsd[] = {
45 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
46 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
47 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
48 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
49 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
50 };
51 
52 static const struct sco_param sco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
54 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
55 };
56 
57 static const struct sco_param esco_param_msbc[] = {
58 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
59 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
60 };
61 
62 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn)63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 	struct hci_conn_params *params;
66 	struct hci_dev *hdev = conn->hdev;
67 	struct smp_irk *irk;
68 	bdaddr_t *bdaddr;
69 	u8 bdaddr_type;
70 
71 	bdaddr = &conn->dst;
72 	bdaddr_type = conn->dst_type;
73 
74 	/* Check if we need to convert to identity address */
75 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 	if (irk) {
77 		bdaddr = &irk->bdaddr;
78 		bdaddr_type = irk->addr_type;
79 	}
80 
81 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 					   bdaddr_type);
83 	if (!params || !params->explicit_connect)
84 		return;
85 
86 	/* The connection attempt was doing scan for new RPA, and is
87 	 * in scan phase. If params are not associated with any other
88 	 * autoconnect action, remove them completely. If they are, just unmark
89 	 * them as waiting for connection, by clearing explicit_connect field.
90 	 */
91 	params->explicit_connect = false;
92 
93 	list_del_init(&params->action);
94 
95 	switch (params->auto_connect) {
96 	case HCI_AUTO_CONN_EXPLICIT:
97 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98 		/* return instead of break to avoid duplicate scan update */
99 		return;
100 	case HCI_AUTO_CONN_DIRECT:
101 	case HCI_AUTO_CONN_ALWAYS:
102 		list_add(&params->action, &hdev->pend_le_conns);
103 		break;
104 	case HCI_AUTO_CONN_REPORT:
105 		list_add(&params->action, &hdev->pend_le_reports);
106 		break;
107 	default:
108 		break;
109 	}
110 
111 	hci_update_background_scan(hdev);
112 }
113 
hci_conn_cleanup(struct hci_conn * conn)114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 	struct hci_dev *hdev = conn->hdev;
117 
118 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120 
121 	hci_chan_list_flush(conn);
122 
123 	hci_conn_hash_del(hdev, conn);
124 
125 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
126 		switch (conn->setting & SCO_AIRMODE_MASK) {
127 		case SCO_AIRMODE_CVSD:
128 		case SCO_AIRMODE_TRANSP:
129 			if (hdev->notify)
130 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
131 			break;
132 		}
133 	} else {
134 		if (hdev->notify)
135 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
136 	}
137 
138 	debugfs_remove_recursive(conn->debugfs);
139 
140 	hci_conn_del_sysfs(conn);
141 
142 	hci_dev_put(hdev);
143 }
144 
le_scan_cleanup(struct work_struct * work)145 static void le_scan_cleanup(struct work_struct *work)
146 {
147 	struct hci_conn *conn = container_of(work, struct hci_conn,
148 					     le_scan_cleanup);
149 	struct hci_dev *hdev = conn->hdev;
150 	struct hci_conn *c = NULL;
151 
152 	BT_DBG("%s hcon %p", hdev->name, conn);
153 
154 	hci_dev_lock(hdev);
155 
156 	/* Check that the hci_conn is still around */
157 	rcu_read_lock();
158 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
159 		if (c == conn)
160 			break;
161 	}
162 	rcu_read_unlock();
163 
164 	if (c == conn) {
165 		hci_connect_le_scan_cleanup(conn);
166 		hci_conn_cleanup(conn);
167 	}
168 
169 	hci_dev_unlock(hdev);
170 	hci_dev_put(hdev);
171 	hci_conn_put(conn);
172 }
173 
hci_connect_le_scan_remove(struct hci_conn * conn)174 static void hci_connect_le_scan_remove(struct hci_conn *conn)
175 {
176 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
177 
178 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
179 	 * could deadlock with another hci_conn_del() call that's holding
180 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
181 	 * Instead, grab temporary extra references to the hci_dev and
182 	 * hci_conn and perform the necessary cleanup in a separate work
183 	 * callback.
184 	 */
185 
186 	hci_dev_hold(conn->hdev);
187 	hci_conn_get(conn);
188 
189 	/* Even though we hold a reference to the hdev, many other
190 	 * things might get cleaned up meanwhile, including the hdev's
191 	 * own workqueue, so we can't use that for scheduling.
192 	 */
193 	schedule_work(&conn->le_scan_cleanup);
194 }
195 
hci_acl_create_connection(struct hci_conn * conn)196 static void hci_acl_create_connection(struct hci_conn *conn)
197 {
198 	struct hci_dev *hdev = conn->hdev;
199 	struct inquiry_entry *ie;
200 	struct hci_cp_create_conn cp;
201 
202 	BT_DBG("hcon %p", conn);
203 
204 	/* Many controllers disallow HCI Create Connection while it is doing
205 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
206 	 * Connection. This may cause the MGMT discovering state to become false
207 	 * without user space's request but it is okay since the MGMT Discovery
208 	 * APIs do not promise that discovery should be done forever. Instead,
209 	 * the user space monitors the status of MGMT discovering and it may
210 	 * request for discovery again when this flag becomes false.
211 	 */
212 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
213 		/* Put this connection to "pending" state so that it will be
214 		 * executed after the inquiry cancel command complete event.
215 		 */
216 		conn->state = BT_CONNECT2;
217 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
218 		return;
219 	}
220 
221 	conn->state = BT_CONNECT;
222 	conn->out = true;
223 	conn->role = HCI_ROLE_MASTER;
224 
225 	conn->attempt++;
226 
227 	conn->link_policy = hdev->link_policy;
228 
229 	memset(&cp, 0, sizeof(cp));
230 	bacpy(&cp.bdaddr, &conn->dst);
231 	cp.pscan_rep_mode = 0x02;
232 
233 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
234 	if (ie) {
235 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
236 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
237 			cp.pscan_mode     = ie->data.pscan_mode;
238 			cp.clock_offset   = ie->data.clock_offset |
239 					    cpu_to_le16(0x8000);
240 		}
241 
242 		memcpy(conn->dev_class, ie->data.dev_class, 3);
243 	}
244 
245 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
246 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
247 		cp.role_switch = 0x01;
248 	else
249 		cp.role_switch = 0x00;
250 
251 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
252 }
253 
hci_disconnect(struct hci_conn * conn,__u8 reason)254 int hci_disconnect(struct hci_conn *conn, __u8 reason)
255 {
256 	BT_DBG("hcon %p", conn);
257 
258 	/* When we are central of an established connection and it enters
259 	 * the disconnect timeout, then go ahead and try to read the
260 	 * current clock offset.  Processing of the result is done
261 	 * within the event handling and hci_clock_offset_evt function.
262 	 */
263 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
264 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
265 		struct hci_dev *hdev = conn->hdev;
266 		struct hci_cp_read_clock_offset clkoff_cp;
267 
268 		clkoff_cp.handle = cpu_to_le16(conn->handle);
269 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
270 			     &clkoff_cp);
271 	}
272 
273 	return hci_abort_conn(conn, reason);
274 }
275 
hci_add_sco(struct hci_conn * conn,__u16 handle)276 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
277 {
278 	struct hci_dev *hdev = conn->hdev;
279 	struct hci_cp_add_sco cp;
280 
281 	BT_DBG("hcon %p", conn);
282 
283 	conn->state = BT_CONNECT;
284 	conn->out = true;
285 
286 	conn->attempt++;
287 
288 	cp.handle   = cpu_to_le16(handle);
289 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
290 
291 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
292 }
293 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)294 static bool find_next_esco_param(struct hci_conn *conn,
295 				 const struct sco_param *esco_param, int size)
296 {
297 	for (; conn->attempt <= size; conn->attempt++) {
298 		if (lmp_esco_2m_capable(conn->link) ||
299 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
300 			break;
301 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
302 		       conn, conn->attempt);
303 	}
304 
305 	return conn->attempt <= size;
306 }
307 
hci_setup_sync(struct hci_conn * conn,__u16 handle)308 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
309 {
310 	struct hci_dev *hdev = conn->hdev;
311 	struct hci_cp_setup_sync_conn cp;
312 	const struct sco_param *param;
313 
314 	BT_DBG("hcon %p", conn);
315 
316 	conn->state = BT_CONNECT;
317 	conn->out = true;
318 
319 	conn->attempt++;
320 
321 	cp.handle   = cpu_to_le16(handle);
322 
323 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
324 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
325 	cp.voice_setting  = cpu_to_le16(conn->setting);
326 
327 	switch (conn->setting & SCO_AIRMODE_MASK) {
328 	case SCO_AIRMODE_TRANSP:
329 		if (!find_next_esco_param(conn, esco_param_msbc,
330 					  ARRAY_SIZE(esco_param_msbc)))
331 			return false;
332 		param = &esco_param_msbc[conn->attempt - 1];
333 		break;
334 	case SCO_AIRMODE_CVSD:
335 		if (lmp_esco_capable(conn->link)) {
336 			if (!find_next_esco_param(conn, esco_param_cvsd,
337 						  ARRAY_SIZE(esco_param_cvsd)))
338 				return false;
339 			param = &esco_param_cvsd[conn->attempt - 1];
340 		} else {
341 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
342 				return false;
343 			param = &sco_param_cvsd[conn->attempt - 1];
344 		}
345 		break;
346 	default:
347 		return false;
348 	}
349 
350 	cp.retrans_effort = param->retrans_effort;
351 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
352 	cp.max_latency = __cpu_to_le16(param->max_latency);
353 
354 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
355 		return false;
356 
357 	return true;
358 }
359 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)360 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
361 		      u16 to_multiplier)
362 {
363 	struct hci_dev *hdev = conn->hdev;
364 	struct hci_conn_params *params;
365 	struct hci_cp_le_conn_update cp;
366 
367 	hci_dev_lock(hdev);
368 
369 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
370 	if (params) {
371 		params->conn_min_interval = min;
372 		params->conn_max_interval = max;
373 		params->conn_latency = latency;
374 		params->supervision_timeout = to_multiplier;
375 	}
376 
377 	hci_dev_unlock(hdev);
378 
379 	memset(&cp, 0, sizeof(cp));
380 	cp.handle		= cpu_to_le16(conn->handle);
381 	cp.conn_interval_min	= cpu_to_le16(min);
382 	cp.conn_interval_max	= cpu_to_le16(max);
383 	cp.conn_latency		= cpu_to_le16(latency);
384 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
385 	cp.min_ce_len		= cpu_to_le16(0x0000);
386 	cp.max_ce_len		= cpu_to_le16(0x0000);
387 
388 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
389 
390 	if (params)
391 		return 0x01;
392 
393 	return 0x00;
394 }
395 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)396 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
397 		      __u8 ltk[16], __u8 key_size)
398 {
399 	struct hci_dev *hdev = conn->hdev;
400 	struct hci_cp_le_start_enc cp;
401 
402 	BT_DBG("hcon %p", conn);
403 
404 	memset(&cp, 0, sizeof(cp));
405 
406 	cp.handle = cpu_to_le16(conn->handle);
407 	cp.rand = rand;
408 	cp.ediv = ediv;
409 	memcpy(cp.ltk, ltk, key_size);
410 
411 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
412 }
413 
414 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)415 void hci_sco_setup(struct hci_conn *conn, __u8 status)
416 {
417 	struct hci_conn *sco = conn->link;
418 
419 	if (!sco)
420 		return;
421 
422 	BT_DBG("hcon %p", conn);
423 
424 	if (!status) {
425 		if (lmp_esco_capable(conn->hdev))
426 			hci_setup_sync(sco, conn->handle);
427 		else
428 			hci_add_sco(sco, conn->handle);
429 	} else {
430 		hci_connect_cfm(sco, status);
431 		hci_conn_del(sco);
432 	}
433 }
434 
hci_conn_timeout(struct work_struct * work)435 static void hci_conn_timeout(struct work_struct *work)
436 {
437 	struct hci_conn *conn = container_of(work, struct hci_conn,
438 					     disc_work.work);
439 	int refcnt = atomic_read(&conn->refcnt);
440 
441 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
442 
443 	WARN_ON(refcnt < 0);
444 
445 	/* FIXME: It was observed that in pairing failed scenario, refcnt
446 	 * drops below 0. Probably this is because l2cap_conn_del calls
447 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
448 	 * dropped. After that loop hci_chan_del is called which also drops
449 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
450 	 * otherwise drop it.
451 	 */
452 	if (refcnt > 0)
453 		return;
454 
455 	/* LE connections in scanning state need special handling */
456 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
457 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
458 		hci_connect_le_scan_remove(conn);
459 		return;
460 	}
461 
462 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
463 }
464 
465 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)466 static void hci_conn_idle(struct work_struct *work)
467 {
468 	struct hci_conn *conn = container_of(work, struct hci_conn,
469 					     idle_work.work);
470 	struct hci_dev *hdev = conn->hdev;
471 
472 	BT_DBG("hcon %p mode %d", conn, conn->mode);
473 
474 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
475 		return;
476 
477 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
478 		return;
479 
480 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
481 		struct hci_cp_sniff_subrate cp;
482 		cp.handle             = cpu_to_le16(conn->handle);
483 		cp.max_latency        = cpu_to_le16(0);
484 		cp.min_remote_timeout = cpu_to_le16(0);
485 		cp.min_local_timeout  = cpu_to_le16(0);
486 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
487 	}
488 
489 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
490 		struct hci_cp_sniff_mode cp;
491 		cp.handle       = cpu_to_le16(conn->handle);
492 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
493 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
494 		cp.attempt      = cpu_to_le16(4);
495 		cp.timeout      = cpu_to_le16(1);
496 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
497 	}
498 }
499 
hci_conn_auto_accept(struct work_struct * work)500 static void hci_conn_auto_accept(struct work_struct *work)
501 {
502 	struct hci_conn *conn = container_of(work, struct hci_conn,
503 					     auto_accept_work.work);
504 
505 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
506 		     &conn->dst);
507 }
508 
le_disable_advertising(struct hci_dev * hdev)509 static void le_disable_advertising(struct hci_dev *hdev)
510 {
511 	if (ext_adv_capable(hdev)) {
512 		struct hci_cp_le_set_ext_adv_enable cp;
513 
514 		cp.enable = 0x00;
515 		cp.num_of_sets = 0x00;
516 
517 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
518 			     &cp);
519 	} else {
520 		u8 enable = 0x00;
521 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
522 			     &enable);
523 	}
524 }
525 
le_conn_timeout(struct work_struct * work)526 static void le_conn_timeout(struct work_struct *work)
527 {
528 	struct hci_conn *conn = container_of(work, struct hci_conn,
529 					     le_conn_timeout.work);
530 	struct hci_dev *hdev = conn->hdev;
531 
532 	BT_DBG("");
533 
534 	/* We could end up here due to having done directed advertising,
535 	 * so clean up the state if necessary. This should however only
536 	 * happen with broken hardware or if low duty cycle was used
537 	 * (which doesn't have a timeout of its own).
538 	 */
539 	if (conn->role == HCI_ROLE_SLAVE) {
540 		/* Disable LE Advertising */
541 		le_disable_advertising(hdev);
542 		hci_dev_lock(hdev);
543 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
544 		hci_dev_unlock(hdev);
545 		return;
546 	}
547 
548 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
549 }
550 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)551 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
552 			      u8 role)
553 {
554 	struct hci_conn *conn;
555 
556 	BT_DBG("%s dst %pMR", hdev->name, dst);
557 
558 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
559 	if (!conn)
560 		return NULL;
561 
562 	bacpy(&conn->dst, dst);
563 	bacpy(&conn->src, &hdev->bdaddr);
564 	conn->hdev  = hdev;
565 	conn->type  = type;
566 	conn->role  = role;
567 	conn->mode  = HCI_CM_ACTIVE;
568 	conn->state = BT_OPEN;
569 	conn->auth_type = HCI_AT_GENERAL_BONDING;
570 	conn->io_capability = hdev->io_capability;
571 	conn->remote_auth = 0xff;
572 	conn->key_type = 0xff;
573 	conn->rssi = HCI_RSSI_INVALID;
574 	conn->tx_power = HCI_TX_POWER_INVALID;
575 	conn->max_tx_power = HCI_TX_POWER_INVALID;
576 
577 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
578 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
579 
580 	/* Set Default Authenticated payload timeout to 30s */
581 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
582 
583 	if (conn->role == HCI_ROLE_MASTER)
584 		conn->out = true;
585 
586 	switch (type) {
587 	case ACL_LINK:
588 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
589 		break;
590 	case LE_LINK:
591 		/* conn->src should reflect the local identity address */
592 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
593 		break;
594 	case SCO_LINK:
595 		if (lmp_esco_capable(hdev))
596 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
597 					(hdev->esco_type & EDR_ESCO_MASK);
598 		else
599 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
600 		break;
601 	case ESCO_LINK:
602 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
603 		break;
604 	}
605 
606 	skb_queue_head_init(&conn->data_q);
607 
608 	INIT_LIST_HEAD(&conn->chan_list);
609 
610 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
611 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
612 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
613 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
614 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
615 
616 	atomic_set(&conn->refcnt, 0);
617 
618 	hci_dev_hold(hdev);
619 
620 	hci_conn_hash_add(hdev, conn);
621 
622 	/* The SCO and eSCO connections will only be notified when their
623 	 * setup has been completed. This is different to ACL links which
624 	 * can be notified right away.
625 	 */
626 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
627 		if (hdev->notify)
628 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
629 	}
630 
631 	hci_conn_init_sysfs(conn);
632 
633 	return conn;
634 }
635 
hci_conn_del(struct hci_conn * conn)636 int hci_conn_del(struct hci_conn *conn)
637 {
638 	struct hci_dev *hdev = conn->hdev;
639 
640 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
641 
642 	cancel_delayed_work_sync(&conn->disc_work);
643 	cancel_delayed_work_sync(&conn->auto_accept_work);
644 	cancel_delayed_work_sync(&conn->idle_work);
645 
646 	if (conn->type == ACL_LINK) {
647 		struct hci_conn *sco = conn->link;
648 		if (sco)
649 			sco->link = NULL;
650 
651 		/* Unacked frames */
652 		hdev->acl_cnt += conn->sent;
653 	} else if (conn->type == LE_LINK) {
654 		cancel_delayed_work(&conn->le_conn_timeout);
655 
656 		if (hdev->le_pkts)
657 			hdev->le_cnt += conn->sent;
658 		else
659 			hdev->acl_cnt += conn->sent;
660 	} else {
661 		struct hci_conn *acl = conn->link;
662 		if (acl) {
663 			acl->link = NULL;
664 			hci_conn_drop(acl);
665 		}
666 	}
667 
668 	if (conn->amp_mgr)
669 		amp_mgr_put(conn->amp_mgr);
670 
671 	skb_queue_purge(&conn->data_q);
672 
673 	/* Remove the connection from the list and cleanup its remaining
674 	 * state. This is a separate function since for some cases like
675 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
676 	 * rest of hci_conn_del.
677 	 */
678 	hci_conn_cleanup(conn);
679 
680 	return 0;
681 }
682 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)683 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
684 {
685 	int use_src = bacmp(src, BDADDR_ANY);
686 	struct hci_dev *hdev = NULL, *d;
687 
688 	BT_DBG("%pMR -> %pMR", src, dst);
689 
690 	read_lock(&hci_dev_list_lock);
691 
692 	list_for_each_entry(d, &hci_dev_list, list) {
693 		if (!test_bit(HCI_UP, &d->flags) ||
694 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
695 		    d->dev_type != HCI_PRIMARY)
696 			continue;
697 
698 		/* Simple routing:
699 		 *   No source address - find interface with bdaddr != dst
700 		 *   Source address    - find interface with bdaddr == src
701 		 */
702 
703 		if (use_src) {
704 			bdaddr_t id_addr;
705 			u8 id_addr_type;
706 
707 			if (src_type == BDADDR_BREDR) {
708 				if (!lmp_bredr_capable(d))
709 					continue;
710 				bacpy(&id_addr, &d->bdaddr);
711 				id_addr_type = BDADDR_BREDR;
712 			} else {
713 				if (!lmp_le_capable(d))
714 					continue;
715 
716 				hci_copy_identity_address(d, &id_addr,
717 							  &id_addr_type);
718 
719 				/* Convert from HCI to three-value type */
720 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
721 					id_addr_type = BDADDR_LE_PUBLIC;
722 				else
723 					id_addr_type = BDADDR_LE_RANDOM;
724 			}
725 
726 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
727 				hdev = d; break;
728 			}
729 		} else {
730 			if (bacmp(&d->bdaddr, dst)) {
731 				hdev = d; break;
732 			}
733 		}
734 	}
735 
736 	if (hdev)
737 		hdev = hci_dev_hold(hdev);
738 
739 	read_unlock(&hci_dev_list_lock);
740 	return hdev;
741 }
742 EXPORT_SYMBOL(hci_get_route);
743 
744 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)745 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
746 {
747 	struct hci_dev *hdev = conn->hdev;
748 	struct hci_conn_params *params;
749 
750 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
751 					   conn->dst_type);
752 	if (params && params->conn) {
753 		hci_conn_drop(params->conn);
754 		hci_conn_put(params->conn);
755 		params->conn = NULL;
756 	}
757 
758 	conn->state = BT_CLOSED;
759 
760 	/* If the status indicates successful cancellation of
761 	 * the attempt (i.e. Unknown Connection Id) there's no point of
762 	 * notifying failure since we'll go back to keep trying to
763 	 * connect. The only exception is explicit connect requests
764 	 * where a timeout + cancel does indicate an actual failure.
765 	 */
766 	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
767 	    (params && params->explicit_connect))
768 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
769 				    conn->dst_type, status);
770 
771 	hci_connect_cfm(conn, status);
772 
773 	hci_conn_del(conn);
774 
775 	/* The suspend notifier is waiting for all devices to disconnect and an
776 	 * LE connect cancel will result in an hci_le_conn_failed. Once the last
777 	 * connection is deleted, we should also wake the suspend queue to
778 	 * complete suspend operations.
779 	 */
780 	if (list_empty(&hdev->conn_hash.list) &&
781 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
782 		wake_up(&hdev->suspend_wait_q);
783 	}
784 
785 	/* Since we may have temporarily stopped the background scanning in
786 	 * favor of connection establishment, we should restart it.
787 	 */
788 	hci_update_background_scan(hdev);
789 
790 	/* Re-enable advertising in case this was a failed connection
791 	 * attempt as a peripheral.
792 	 */
793 	hci_req_reenable_advertising(hdev);
794 }
795 
create_le_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)796 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
797 {
798 	struct hci_conn *conn;
799 
800 	hci_dev_lock(hdev);
801 
802 	conn = hci_lookup_le_connect(hdev);
803 
804 	if (hdev->adv_instance_cnt)
805 		hci_req_resume_adv_instances(hdev);
806 
807 	if (!status) {
808 		hci_connect_le_scan_cleanup(conn);
809 		goto done;
810 	}
811 
812 	bt_dev_err(hdev, "request failed to create LE connection: "
813 		   "status 0x%2.2x", status);
814 
815 	if (!conn)
816 		goto done;
817 
818 	hci_le_conn_failed(conn, status);
819 
820 done:
821 	hci_dev_unlock(hdev);
822 }
823 
conn_use_rpa(struct hci_conn * conn)824 static bool conn_use_rpa(struct hci_conn *conn)
825 {
826 	struct hci_dev *hdev = conn->hdev;
827 
828 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
829 }
830 
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)831 static void set_ext_conn_params(struct hci_conn *conn,
832 				struct hci_cp_le_ext_conn_param *p)
833 {
834 	struct hci_dev *hdev = conn->hdev;
835 
836 	memset(p, 0, sizeof(*p));
837 
838 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
839 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
840 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
841 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
842 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
843 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
844 	p->min_ce_len = cpu_to_le16(0x0000);
845 	p->max_ce_len = cpu_to_le16(0x0000);
846 }
847 
hci_req_add_le_create_conn(struct hci_request * req,struct hci_conn * conn,bdaddr_t * direct_rpa)848 static void hci_req_add_le_create_conn(struct hci_request *req,
849 				       struct hci_conn *conn,
850 				       bdaddr_t *direct_rpa)
851 {
852 	struct hci_dev *hdev = conn->hdev;
853 	u8 own_addr_type;
854 
855 	/* If direct address was provided we use it instead of current
856 	 * address.
857 	 */
858 	if (direct_rpa) {
859 		if (bacmp(&req->hdev->random_addr, direct_rpa))
860 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
861 								direct_rpa);
862 
863 		/* direct address is always RPA */
864 		own_addr_type = ADDR_LE_DEV_RANDOM;
865 	} else {
866 		/* Update random address, but set require_privacy to false so
867 		 * that we never connect with an non-resolvable address.
868 		 */
869 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
870 					      &own_addr_type))
871 			return;
872 	}
873 
874 	if (use_ext_conn(hdev)) {
875 		struct hci_cp_le_ext_create_conn *cp;
876 		struct hci_cp_le_ext_conn_param *p;
877 		u8 data[sizeof(*cp) + sizeof(*p) * 3];
878 		u32 plen;
879 
880 		cp = (void *) data;
881 		p = (void *) cp->data;
882 
883 		memset(cp, 0, sizeof(*cp));
884 
885 		bacpy(&cp->peer_addr, &conn->dst);
886 		cp->peer_addr_type = conn->dst_type;
887 		cp->own_addr_type = own_addr_type;
888 
889 		plen = sizeof(*cp);
890 
891 		if (scan_1m(hdev)) {
892 			cp->phys |= LE_SCAN_PHY_1M;
893 			set_ext_conn_params(conn, p);
894 
895 			p++;
896 			plen += sizeof(*p);
897 		}
898 
899 		if (scan_2m(hdev)) {
900 			cp->phys |= LE_SCAN_PHY_2M;
901 			set_ext_conn_params(conn, p);
902 
903 			p++;
904 			plen += sizeof(*p);
905 		}
906 
907 		if (scan_coded(hdev)) {
908 			cp->phys |= LE_SCAN_PHY_CODED;
909 			set_ext_conn_params(conn, p);
910 
911 			plen += sizeof(*p);
912 		}
913 
914 		hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
915 
916 	} else {
917 		struct hci_cp_le_create_conn cp;
918 
919 		memset(&cp, 0, sizeof(cp));
920 
921 		cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
922 		cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
923 
924 		bacpy(&cp.peer_addr, &conn->dst);
925 		cp.peer_addr_type = conn->dst_type;
926 		cp.own_address_type = own_addr_type;
927 		cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
928 		cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
929 		cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
930 		cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
931 		cp.min_ce_len = cpu_to_le16(0x0000);
932 		cp.max_ce_len = cpu_to_le16(0x0000);
933 
934 		hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
935 	}
936 
937 	conn->state = BT_CONNECT;
938 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
939 }
940 
hci_req_directed_advertising(struct hci_request * req,struct hci_conn * conn)941 static void hci_req_directed_advertising(struct hci_request *req,
942 					 struct hci_conn *conn)
943 {
944 	struct hci_dev *hdev = req->hdev;
945 	u8 own_addr_type;
946 	u8 enable;
947 
948 	if (ext_adv_capable(hdev)) {
949 		struct hci_cp_le_set_ext_adv_params cp;
950 		bdaddr_t random_addr;
951 
952 		/* Set require_privacy to false so that the remote device has a
953 		 * chance of identifying us.
954 		 */
955 		if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
956 					   &own_addr_type, &random_addr) < 0)
957 			return;
958 
959 		memset(&cp, 0, sizeof(cp));
960 
961 		cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
962 		cp.own_addr_type = own_addr_type;
963 		cp.channel_map = hdev->le_adv_channel_map;
964 		cp.tx_power = HCI_TX_POWER_INVALID;
965 		cp.primary_phy = HCI_ADV_PHY_1M;
966 		cp.secondary_phy = HCI_ADV_PHY_1M;
967 		cp.handle = 0; /* Use instance 0 for directed adv */
968 		cp.own_addr_type = own_addr_type;
969 		cp.peer_addr_type = conn->dst_type;
970 		bacpy(&cp.peer_addr, &conn->dst);
971 
972 		/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
973 		 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
974 		 * does not supports advertising data when the advertising set already
975 		 * contains some, the controller shall return erroc code 'Invalid
976 		 * HCI Command Parameters(0x12).
977 		 * So it is required to remove adv set for handle 0x00. since we use
978 		 * instance 0 for directed adv.
979 		 */
980 		__hci_req_remove_ext_adv_instance(req, cp.handle);
981 
982 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
983 
984 		if (own_addr_type == ADDR_LE_DEV_RANDOM &&
985 		    bacmp(&random_addr, BDADDR_ANY) &&
986 		    bacmp(&random_addr, &hdev->random_addr)) {
987 			struct hci_cp_le_set_adv_set_rand_addr cp;
988 
989 			memset(&cp, 0, sizeof(cp));
990 
991 			cp.handle = 0;
992 			bacpy(&cp.bdaddr, &random_addr);
993 
994 			hci_req_add(req,
995 				    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
996 				    sizeof(cp), &cp);
997 		}
998 
999 		__hci_req_enable_ext_advertising(req, 0x00);
1000 	} else {
1001 		struct hci_cp_le_set_adv_param cp;
1002 
1003 		/* Clear the HCI_LE_ADV bit temporarily so that the
1004 		 * hci_update_random_address knows that it's safe to go ahead
1005 		 * and write a new random address. The flag will be set back on
1006 		 * as soon as the SET_ADV_ENABLE HCI command completes.
1007 		 */
1008 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1009 
1010 		/* Set require_privacy to false so that the remote device has a
1011 		 * chance of identifying us.
1012 		 */
1013 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
1014 					      &own_addr_type) < 0)
1015 			return;
1016 
1017 		memset(&cp, 0, sizeof(cp));
1018 
1019 		/* Some controllers might reject command if intervals are not
1020 		 * within range for undirected advertising.
1021 		 * BCM20702A0 is known to be affected by this.
1022 		 */
1023 		cp.min_interval = cpu_to_le16(0x0020);
1024 		cp.max_interval = cpu_to_le16(0x0020);
1025 
1026 		cp.type = LE_ADV_DIRECT_IND;
1027 		cp.own_address_type = own_addr_type;
1028 		cp.direct_addr_type = conn->dst_type;
1029 		bacpy(&cp.direct_addr, &conn->dst);
1030 		cp.channel_map = hdev->le_adv_channel_map;
1031 
1032 		hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1033 
1034 		enable = 0x01;
1035 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1036 			    &enable);
1037 	}
1038 
1039 	conn->state = BT_CONNECT;
1040 }
1041 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,u8 role,bdaddr_t * direct_rpa)1042 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1043 				u8 dst_type, u8 sec_level, u16 conn_timeout,
1044 				u8 role, bdaddr_t *direct_rpa)
1045 {
1046 	struct hci_conn_params *params;
1047 	struct hci_conn *conn;
1048 	struct smp_irk *irk;
1049 	struct hci_request req;
1050 	int err;
1051 
1052 	/* This ensures that during disable le_scan address resolution
1053 	 * will not be disabled if it is followed by le_create_conn
1054 	 */
1055 	bool rpa_le_conn = true;
1056 
1057 	/* Let's make sure that le is enabled.*/
1058 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1059 		if (lmp_le_capable(hdev))
1060 			return ERR_PTR(-ECONNREFUSED);
1061 
1062 		return ERR_PTR(-EOPNOTSUPP);
1063 	}
1064 
1065 	/* Since the controller supports only one LE connection attempt at a
1066 	 * time, we return -EBUSY if there is any connection attempt running.
1067 	 */
1068 	if (hci_lookup_le_connect(hdev))
1069 		return ERR_PTR(-EBUSY);
1070 
1071 	/* If there's already a connection object but it's not in
1072 	 * scanning state it means it must already be established, in
1073 	 * which case we can't do anything else except report a failure
1074 	 * to connect.
1075 	 */
1076 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1077 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1078 		return ERR_PTR(-EBUSY);
1079 	}
1080 
1081 	/* When given an identity address with existing identity
1082 	 * resolving key, the connection needs to be established
1083 	 * to a resolvable random address.
1084 	 *
1085 	 * Storing the resolvable random address is required here
1086 	 * to handle connection failures. The address will later
1087 	 * be resolved back into the original identity address
1088 	 * from the connect request.
1089 	 */
1090 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1091 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1092 		dst = &irk->rpa;
1093 		dst_type = ADDR_LE_DEV_RANDOM;
1094 	}
1095 
1096 	if (conn) {
1097 		bacpy(&conn->dst, dst);
1098 	} else {
1099 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1100 		if (!conn)
1101 			return ERR_PTR(-ENOMEM);
1102 		hci_conn_hold(conn);
1103 		conn->pending_sec_level = sec_level;
1104 	}
1105 
1106 	conn->dst_type = dst_type;
1107 	conn->sec_level = BT_SECURITY_LOW;
1108 	conn->conn_timeout = conn_timeout;
1109 
1110 	hci_req_init(&req, hdev);
1111 
1112 	/* Disable advertising if we're active. For central role
1113 	 * connections most controllers will refuse to connect if
1114 	 * advertising is enabled, and for peripheral role connections we
1115 	 * anyway have to disable it in order to start directed
1116 	 * advertising. Any registered advertisements will be
1117 	 * re-enabled after the connection attempt is finished.
1118 	 */
1119 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1120 		__hci_req_pause_adv_instances(&req);
1121 
1122 	/* If requested to connect as peripheral use directed advertising */
1123 	if (conn->role == HCI_ROLE_SLAVE) {
1124 		/* If we're active scanning most controllers are unable
1125 		 * to initiate advertising. Simply reject the attempt.
1126 		 */
1127 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1128 		    hdev->le_scan_type == LE_SCAN_ACTIVE) {
1129 			hci_req_purge(&req);
1130 			hci_conn_del(conn);
1131 			return ERR_PTR(-EBUSY);
1132 		}
1133 
1134 		hci_req_directed_advertising(&req, conn);
1135 		goto create_conn;
1136 	}
1137 
1138 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1139 	if (params) {
1140 		conn->le_conn_min_interval = params->conn_min_interval;
1141 		conn->le_conn_max_interval = params->conn_max_interval;
1142 		conn->le_conn_latency = params->conn_latency;
1143 		conn->le_supv_timeout = params->supervision_timeout;
1144 	} else {
1145 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
1146 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
1147 		conn->le_conn_latency = hdev->le_conn_latency;
1148 		conn->le_supv_timeout = hdev->le_supv_timeout;
1149 	}
1150 
1151 	/* If controller is scanning, we stop it since some controllers are
1152 	 * not able to scan and connect at the same time. Also set the
1153 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1154 	 * handler for scan disabling knows to set the correct discovery
1155 	 * state.
1156 	 */
1157 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1158 		hci_req_add_le_scan_disable(&req, rpa_le_conn);
1159 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1160 	}
1161 
1162 	hci_req_add_le_create_conn(&req, conn, direct_rpa);
1163 
1164 create_conn:
1165 	err = hci_req_run(&req, create_le_conn_complete);
1166 	if (err) {
1167 		hci_conn_del(conn);
1168 
1169 		if (hdev->adv_instance_cnt)
1170 			hci_req_resume_adv_instances(hdev);
1171 
1172 		return ERR_PTR(err);
1173 	}
1174 
1175 	return conn;
1176 }
1177 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1178 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1179 {
1180 	struct hci_conn *conn;
1181 
1182 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1183 	if (!conn)
1184 		return false;
1185 
1186 	if (conn->state != BT_CONNECTED)
1187 		return false;
1188 
1189 	return true;
1190 }
1191 
1192 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1193 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1194 					bdaddr_t *addr, u8 addr_type)
1195 {
1196 	struct hci_conn_params *params;
1197 
1198 	if (is_connected(hdev, addr, addr_type))
1199 		return -EISCONN;
1200 
1201 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1202 	if (!params) {
1203 		params = hci_conn_params_add(hdev, addr, addr_type);
1204 		if (!params)
1205 			return -ENOMEM;
1206 
1207 		/* If we created new params, mark them to be deleted in
1208 		 * hci_connect_le_scan_cleanup. It's different case than
1209 		 * existing disabled params, those will stay after cleanup.
1210 		 */
1211 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1212 	}
1213 
1214 	/* We're trying to connect, so make sure params are at pend_le_conns */
1215 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1216 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1217 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1218 		list_del_init(&params->action);
1219 		list_add(&params->action, &hdev->pend_le_conns);
1220 	}
1221 
1222 	params->explicit_connect = true;
1223 
1224 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1225 	       params->auto_connect);
1226 
1227 	return 0;
1228 }
1229 
1230 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1231 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1232 				     u8 dst_type, u8 sec_level,
1233 				     u16 conn_timeout,
1234 				     enum conn_reasons conn_reason)
1235 {
1236 	struct hci_conn *conn;
1237 
1238 	/* Let's make sure that le is enabled.*/
1239 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1240 		if (lmp_le_capable(hdev))
1241 			return ERR_PTR(-ECONNREFUSED);
1242 
1243 		return ERR_PTR(-EOPNOTSUPP);
1244 	}
1245 
1246 	/* Some devices send ATT messages as soon as the physical link is
1247 	 * established. To be able to handle these ATT messages, the user-
1248 	 * space first establishes the connection and then starts the pairing
1249 	 * process.
1250 	 *
1251 	 * So if a hci_conn object already exists for the following connection
1252 	 * attempt, we simply update pending_sec_level and auth_type fields
1253 	 * and return the object found.
1254 	 */
1255 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1256 	if (conn) {
1257 		if (conn->pending_sec_level < sec_level)
1258 			conn->pending_sec_level = sec_level;
1259 		goto done;
1260 	}
1261 
1262 	BT_DBG("requesting refresh of dst_addr");
1263 
1264 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1265 	if (!conn)
1266 		return ERR_PTR(-ENOMEM);
1267 
1268 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1269 		hci_conn_del(conn);
1270 		return ERR_PTR(-EBUSY);
1271 	}
1272 
1273 	conn->state = BT_CONNECT;
1274 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1275 	conn->dst_type = dst_type;
1276 	conn->sec_level = BT_SECURITY_LOW;
1277 	conn->pending_sec_level = sec_level;
1278 	conn->conn_timeout = conn_timeout;
1279 	conn->conn_reason = conn_reason;
1280 
1281 	hci_update_background_scan(hdev);
1282 
1283 done:
1284 	hci_conn_hold(conn);
1285 	return conn;
1286 }
1287 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1288 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1289 				 u8 sec_level, u8 auth_type,
1290 				 enum conn_reasons conn_reason)
1291 {
1292 	struct hci_conn *acl;
1293 
1294 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1295 		if (lmp_bredr_capable(hdev))
1296 			return ERR_PTR(-ECONNREFUSED);
1297 
1298 		return ERR_PTR(-EOPNOTSUPP);
1299 	}
1300 
1301 	/* Reject outgoing connection to device with same BD ADDR against
1302 	 * CVE-2020-26555
1303 	 */
1304 	if (!bacmp(&hdev->bdaddr, dst)) {
1305 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1306 			   dst);
1307 		return ERR_PTR(-ECONNREFUSED);
1308 	}
1309 
1310 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1311 	if (!acl) {
1312 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1313 		if (!acl)
1314 			return ERR_PTR(-ENOMEM);
1315 	}
1316 
1317 	hci_conn_hold(acl);
1318 
1319 	acl->conn_reason = conn_reason;
1320 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1321 		acl->sec_level = BT_SECURITY_LOW;
1322 		acl->pending_sec_level = sec_level;
1323 		acl->auth_type = auth_type;
1324 		hci_acl_create_connection(acl);
1325 	}
1326 
1327 	return acl;
1328 }
1329 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting)1330 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1331 				 __u16 setting)
1332 {
1333 	struct hci_conn *acl;
1334 	struct hci_conn *sco;
1335 
1336 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1337 			      CONN_REASON_SCO_CONNECT);
1338 	if (IS_ERR(acl))
1339 		return acl;
1340 
1341 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1342 	if (!sco) {
1343 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1344 		if (!sco) {
1345 			hci_conn_drop(acl);
1346 			return ERR_PTR(-ENOMEM);
1347 		}
1348 	}
1349 
1350 	acl->link = sco;
1351 	sco->link = acl;
1352 
1353 	hci_conn_hold(sco);
1354 
1355 	sco->setting = setting;
1356 
1357 	if (acl->state == BT_CONNECTED &&
1358 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1359 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1360 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1361 
1362 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1363 			/* defer SCO setup until mode change completed */
1364 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1365 			return sco;
1366 		}
1367 
1368 		hci_sco_setup(acl, 0x00);
1369 	}
1370 
1371 	return sco;
1372 }
1373 
1374 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)1375 int hci_conn_check_link_mode(struct hci_conn *conn)
1376 {
1377 	BT_DBG("hcon %p", conn);
1378 
1379 	/* In Secure Connections Only mode, it is required that Secure
1380 	 * Connections is used and the link is encrypted with AES-CCM
1381 	 * using a P-256 authenticated combination key.
1382 	 */
1383 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1384 		if (!hci_conn_sc_enabled(conn) ||
1385 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1386 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1387 			return 0;
1388 	}
1389 
1390 	 /* AES encryption is required for Level 4:
1391 	  *
1392 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
1393 	  * page 1319:
1394 	  *
1395 	  * 128-bit equivalent strength for link and encryption keys
1396 	  * required using FIPS approved algorithms (E0 not allowed,
1397 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
1398 	  * not shortened)
1399 	  */
1400 	if (conn->sec_level == BT_SECURITY_FIPS &&
1401 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1402 		bt_dev_err(conn->hdev,
1403 			   "Invalid security: Missing AES-CCM usage");
1404 		return 0;
1405 	}
1406 
1407 	if (hci_conn_ssp_enabled(conn) &&
1408 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1409 		return 0;
1410 
1411 	return 1;
1412 }
1413 
1414 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)1415 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1416 {
1417 	BT_DBG("hcon %p", conn);
1418 
1419 	if (conn->pending_sec_level > sec_level)
1420 		sec_level = conn->pending_sec_level;
1421 
1422 	if (sec_level > conn->sec_level)
1423 		conn->pending_sec_level = sec_level;
1424 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1425 		return 1;
1426 
1427 	/* Make sure we preserve an existing MITM requirement*/
1428 	auth_type |= (conn->auth_type & 0x01);
1429 
1430 	conn->auth_type = auth_type;
1431 
1432 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1433 		struct hci_cp_auth_requested cp;
1434 
1435 		cp.handle = cpu_to_le16(conn->handle);
1436 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1437 			     sizeof(cp), &cp);
1438 
1439 		/* Set the ENCRYPT_PEND to trigger encryption after
1440 		 * authentication.
1441 		 */
1442 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1443 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)1450 static void hci_conn_encrypt(struct hci_conn *conn)
1451 {
1452 	BT_DBG("hcon %p", conn);
1453 
1454 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1455 		struct hci_cp_set_conn_encrypt cp;
1456 		cp.handle  = cpu_to_le16(conn->handle);
1457 		cp.encrypt = 0x01;
1458 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1459 			     &cp);
1460 	}
1461 }
1462 
1463 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)1464 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1465 		      bool initiator)
1466 {
1467 	BT_DBG("hcon %p", conn);
1468 
1469 	if (conn->type == LE_LINK)
1470 		return smp_conn_security(conn, sec_level);
1471 
1472 	/* For sdp we don't need the link key. */
1473 	if (sec_level == BT_SECURITY_SDP)
1474 		return 1;
1475 
1476 	/* For non 2.1 devices and low security level we don't need the link
1477 	   key. */
1478 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1479 		return 1;
1480 
1481 	/* For other security levels we need the link key. */
1482 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1483 		goto auth;
1484 
1485 	switch (conn->key_type) {
1486 	case HCI_LK_AUTH_COMBINATION_P256:
1487 		/* An authenticated FIPS approved combination key has
1488 		 * sufficient security for security level 4 or lower.
1489 		 */
1490 		if (sec_level <= BT_SECURITY_FIPS)
1491 			goto encrypt;
1492 		break;
1493 	case HCI_LK_AUTH_COMBINATION_P192:
1494 		/* An authenticated combination key has sufficient security for
1495 		 * security level 3 or lower.
1496 		 */
1497 		if (sec_level <= BT_SECURITY_HIGH)
1498 			goto encrypt;
1499 		break;
1500 	case HCI_LK_UNAUTH_COMBINATION_P192:
1501 	case HCI_LK_UNAUTH_COMBINATION_P256:
1502 		/* An unauthenticated combination key has sufficient security
1503 		 * for security level 2 or lower.
1504 		 */
1505 		if (sec_level <= BT_SECURITY_MEDIUM)
1506 			goto encrypt;
1507 		break;
1508 	case HCI_LK_COMBINATION:
1509 		/* A combination key has always sufficient security for the
1510 		 * security levels 2 or lower. High security level requires the
1511 		 * combination key is generated using maximum PIN code length
1512 		 * (16). For pre 2.1 units.
1513 		 */
1514 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
1515 			goto encrypt;
1516 		break;
1517 	default:
1518 		break;
1519 	}
1520 
1521 auth:
1522 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1523 		return 0;
1524 
1525 	if (initiator)
1526 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1527 
1528 	if (!hci_conn_auth(conn, sec_level, auth_type))
1529 		return 0;
1530 
1531 encrypt:
1532 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1533 		/* Ensure that the encryption key size has been read,
1534 		 * otherwise stall the upper layer responses.
1535 		 */
1536 		if (!conn->enc_key_size)
1537 			return 0;
1538 
1539 		/* Nothing else needed, all requirements are met */
1540 		return 1;
1541 	}
1542 
1543 	hci_conn_encrypt(conn);
1544 	return 0;
1545 }
1546 EXPORT_SYMBOL(hci_conn_security);
1547 
1548 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)1549 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1550 {
1551 	BT_DBG("hcon %p", conn);
1552 
1553 	/* Accept if non-secure or higher security level is required */
1554 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1555 		return 1;
1556 
1557 	/* Accept if secure or higher security level is already present */
1558 	if (conn->sec_level == BT_SECURITY_HIGH ||
1559 	    conn->sec_level == BT_SECURITY_FIPS)
1560 		return 1;
1561 
1562 	/* Reject not secure link */
1563 	return 0;
1564 }
1565 EXPORT_SYMBOL(hci_conn_check_secure);
1566 
1567 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)1568 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1569 {
1570 	BT_DBG("hcon %p", conn);
1571 
1572 	if (role == conn->role)
1573 		return 1;
1574 
1575 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1576 		struct hci_cp_switch_role cp;
1577 		bacpy(&cp.bdaddr, &conn->dst);
1578 		cp.role = role;
1579 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1580 	}
1581 
1582 	return 0;
1583 }
1584 EXPORT_SYMBOL(hci_conn_switch_role);
1585 
1586 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)1587 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1588 {
1589 	struct hci_dev *hdev = conn->hdev;
1590 
1591 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1592 
1593 	if (conn->mode != HCI_CM_SNIFF)
1594 		goto timer;
1595 
1596 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1597 		goto timer;
1598 
1599 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1600 		struct hci_cp_exit_sniff_mode cp;
1601 		cp.handle = cpu_to_le16(conn->handle);
1602 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1603 	}
1604 
1605 timer:
1606 	if (hdev->idle_timeout > 0)
1607 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1608 				   msecs_to_jiffies(hdev->idle_timeout));
1609 }
1610 
1611 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)1612 void hci_conn_hash_flush(struct hci_dev *hdev)
1613 {
1614 	struct hci_conn_hash *h = &hdev->conn_hash;
1615 	struct hci_conn *c, *n;
1616 
1617 	BT_DBG("hdev %s", hdev->name);
1618 
1619 	list_for_each_entry_safe(c, n, &h->list, list) {
1620 		c->state = BT_CLOSED;
1621 
1622 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1623 		hci_conn_del(c);
1624 	}
1625 }
1626 
1627 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)1628 void hci_conn_check_pending(struct hci_dev *hdev)
1629 {
1630 	struct hci_conn *conn;
1631 
1632 	BT_DBG("hdev %s", hdev->name);
1633 
1634 	hci_dev_lock(hdev);
1635 
1636 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1637 	if (conn)
1638 		hci_acl_create_connection(conn);
1639 
1640 	hci_dev_unlock(hdev);
1641 }
1642 
get_link_mode(struct hci_conn * conn)1643 static u32 get_link_mode(struct hci_conn *conn)
1644 {
1645 	u32 link_mode = 0;
1646 
1647 	if (conn->role == HCI_ROLE_MASTER)
1648 		link_mode |= HCI_LM_MASTER;
1649 
1650 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1651 		link_mode |= HCI_LM_ENCRYPT;
1652 
1653 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1654 		link_mode |= HCI_LM_AUTH;
1655 
1656 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1657 		link_mode |= HCI_LM_SECURE;
1658 
1659 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1660 		link_mode |= HCI_LM_FIPS;
1661 
1662 	return link_mode;
1663 }
1664 
hci_get_conn_list(void __user * arg)1665 int hci_get_conn_list(void __user *arg)
1666 {
1667 	struct hci_conn *c;
1668 	struct hci_conn_list_req req, *cl;
1669 	struct hci_conn_info *ci;
1670 	struct hci_dev *hdev;
1671 	int n = 0, size, err;
1672 
1673 	if (copy_from_user(&req, arg, sizeof(req)))
1674 		return -EFAULT;
1675 
1676 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1677 		return -EINVAL;
1678 
1679 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1680 
1681 	cl = kmalloc(size, GFP_KERNEL);
1682 	if (!cl)
1683 		return -ENOMEM;
1684 
1685 	hdev = hci_dev_get(req.dev_id);
1686 	if (!hdev) {
1687 		kfree(cl);
1688 		return -ENODEV;
1689 	}
1690 
1691 	ci = cl->conn_info;
1692 
1693 	hci_dev_lock(hdev);
1694 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1695 		bacpy(&(ci + n)->bdaddr, &c->dst);
1696 		(ci + n)->handle = c->handle;
1697 		(ci + n)->type  = c->type;
1698 		(ci + n)->out   = c->out;
1699 		(ci + n)->state = c->state;
1700 		(ci + n)->link_mode = get_link_mode(c);
1701 		if (++n >= req.conn_num)
1702 			break;
1703 	}
1704 	hci_dev_unlock(hdev);
1705 
1706 	cl->dev_id = hdev->id;
1707 	cl->conn_num = n;
1708 	size = sizeof(req) + n * sizeof(*ci);
1709 
1710 	hci_dev_put(hdev);
1711 
1712 	err = copy_to_user(arg, cl, size);
1713 	kfree(cl);
1714 
1715 	return err ? -EFAULT : 0;
1716 }
1717 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)1718 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1719 {
1720 	struct hci_conn_info_req req;
1721 	struct hci_conn_info ci;
1722 	struct hci_conn *conn;
1723 	char __user *ptr = arg + sizeof(req);
1724 
1725 	if (copy_from_user(&req, arg, sizeof(req)))
1726 		return -EFAULT;
1727 
1728 	hci_dev_lock(hdev);
1729 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1730 	if (conn) {
1731 		bacpy(&ci.bdaddr, &conn->dst);
1732 		ci.handle = conn->handle;
1733 		ci.type  = conn->type;
1734 		ci.out   = conn->out;
1735 		ci.state = conn->state;
1736 		ci.link_mode = get_link_mode(conn);
1737 	}
1738 	hci_dev_unlock(hdev);
1739 
1740 	if (!conn)
1741 		return -ENOENT;
1742 
1743 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1744 }
1745 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)1746 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1747 {
1748 	struct hci_auth_info_req req;
1749 	struct hci_conn *conn;
1750 
1751 	if (copy_from_user(&req, arg, sizeof(req)))
1752 		return -EFAULT;
1753 
1754 	hci_dev_lock(hdev);
1755 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1756 	if (conn)
1757 		req.type = conn->auth_type;
1758 	hci_dev_unlock(hdev);
1759 
1760 	if (!conn)
1761 		return -ENOENT;
1762 
1763 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1764 }
1765 
hci_chan_create(struct hci_conn * conn)1766 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1767 {
1768 	struct hci_dev *hdev = conn->hdev;
1769 	struct hci_chan *chan;
1770 
1771 	BT_DBG("%s hcon %p", hdev->name, conn);
1772 
1773 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1774 		BT_DBG("Refusing to create new hci_chan");
1775 		return NULL;
1776 	}
1777 
1778 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1779 	if (!chan)
1780 		return NULL;
1781 
1782 	chan->conn = hci_conn_get(conn);
1783 	skb_queue_head_init(&chan->data_q);
1784 	chan->state = BT_CONNECTED;
1785 
1786 	list_add_rcu(&chan->list, &conn->chan_list);
1787 
1788 	return chan;
1789 }
1790 
hci_chan_del(struct hci_chan * chan)1791 void hci_chan_del(struct hci_chan *chan)
1792 {
1793 	struct hci_conn *conn = chan->conn;
1794 	struct hci_dev *hdev = conn->hdev;
1795 
1796 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1797 
1798 	list_del_rcu(&chan->list);
1799 
1800 	synchronize_rcu();
1801 
1802 	/* Prevent new hci_chan's to be created for this hci_conn */
1803 	set_bit(HCI_CONN_DROP, &conn->flags);
1804 
1805 	hci_conn_put(conn);
1806 
1807 	skb_queue_purge(&chan->data_q);
1808 	kfree(chan);
1809 }
1810 
hci_chan_list_flush(struct hci_conn * conn)1811 void hci_chan_list_flush(struct hci_conn *conn)
1812 {
1813 	struct hci_chan *chan, *n;
1814 
1815 	BT_DBG("hcon %p", conn);
1816 
1817 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1818 		hci_chan_del(chan);
1819 }
1820 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)1821 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1822 						 __u16 handle)
1823 {
1824 	struct hci_chan *hchan;
1825 
1826 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1827 		if (hchan->handle == handle)
1828 			return hchan;
1829 	}
1830 
1831 	return NULL;
1832 }
1833 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)1834 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1835 {
1836 	struct hci_conn_hash *h = &hdev->conn_hash;
1837 	struct hci_conn *hcon;
1838 	struct hci_chan *hchan = NULL;
1839 
1840 	rcu_read_lock();
1841 
1842 	list_for_each_entry_rcu(hcon, &h->list, list) {
1843 		hchan = __hci_chan_lookup_handle(hcon, handle);
1844 		if (hchan)
1845 			break;
1846 	}
1847 
1848 	rcu_read_unlock();
1849 
1850 	return hchan;
1851 }
1852 
hci_conn_get_phy(struct hci_conn * conn)1853 u32 hci_conn_get_phy(struct hci_conn *conn)
1854 {
1855 	u32 phys = 0;
1856 
1857 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
1858 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
1859 	 * CPB logical transport types.
1860 	 */
1861 	switch (conn->type) {
1862 	case SCO_LINK:
1863 		/* SCO logical transport (1 Mb/s):
1864 		 * HV1, HV2, HV3 and DV.
1865 		 */
1866 		phys |= BT_PHY_BR_1M_1SLOT;
1867 
1868 		break;
1869 
1870 	case ACL_LINK:
1871 		/* ACL logical transport (1 Mb/s) ptt=0:
1872 		 * DH1, DM3, DH3, DM5 and DH5.
1873 		 */
1874 		phys |= BT_PHY_BR_1M_1SLOT;
1875 
1876 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1877 			phys |= BT_PHY_BR_1M_3SLOT;
1878 
1879 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1880 			phys |= BT_PHY_BR_1M_5SLOT;
1881 
1882 		/* ACL logical transport (2 Mb/s) ptt=1:
1883 		 * 2-DH1, 2-DH3 and 2-DH5.
1884 		 */
1885 		if (!(conn->pkt_type & HCI_2DH1))
1886 			phys |= BT_PHY_EDR_2M_1SLOT;
1887 
1888 		if (!(conn->pkt_type & HCI_2DH3))
1889 			phys |= BT_PHY_EDR_2M_3SLOT;
1890 
1891 		if (!(conn->pkt_type & HCI_2DH5))
1892 			phys |= BT_PHY_EDR_2M_5SLOT;
1893 
1894 		/* ACL logical transport (3 Mb/s) ptt=1:
1895 		 * 3-DH1, 3-DH3 and 3-DH5.
1896 		 */
1897 		if (!(conn->pkt_type & HCI_3DH1))
1898 			phys |= BT_PHY_EDR_3M_1SLOT;
1899 
1900 		if (!(conn->pkt_type & HCI_3DH3))
1901 			phys |= BT_PHY_EDR_3M_3SLOT;
1902 
1903 		if (!(conn->pkt_type & HCI_3DH5))
1904 			phys |= BT_PHY_EDR_3M_5SLOT;
1905 
1906 		break;
1907 
1908 	case ESCO_LINK:
1909 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
1910 		phys |= BT_PHY_BR_1M_1SLOT;
1911 
1912 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1913 			phys |= BT_PHY_BR_1M_3SLOT;
1914 
1915 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
1916 		if (!(conn->pkt_type & ESCO_2EV3))
1917 			phys |= BT_PHY_EDR_2M_1SLOT;
1918 
1919 		if (!(conn->pkt_type & ESCO_2EV5))
1920 			phys |= BT_PHY_EDR_2M_3SLOT;
1921 
1922 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
1923 		if (!(conn->pkt_type & ESCO_3EV3))
1924 			phys |= BT_PHY_EDR_3M_1SLOT;
1925 
1926 		if (!(conn->pkt_type & ESCO_3EV5))
1927 			phys |= BT_PHY_EDR_3M_3SLOT;
1928 
1929 		break;
1930 
1931 	case LE_LINK:
1932 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1933 			phys |= BT_PHY_LE_1M_TX;
1934 
1935 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1936 			phys |= BT_PHY_LE_1M_RX;
1937 
1938 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1939 			phys |= BT_PHY_LE_2M_TX;
1940 
1941 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1942 			phys |= BT_PHY_LE_2M_RX;
1943 
1944 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1945 			phys |= BT_PHY_LE_CODED_TX;
1946 
1947 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1948 			phys |= BT_PHY_LE_CODED_RX;
1949 
1950 		break;
1951 	}
1952 
1953 	return phys;
1954 }
1955