1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/iso.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "a2mp.h"
39 #include "eir.h"
40
41 struct sco_param {
42 u16 pkt_type;
43 u16 max_latency;
44 u8 retrans_effort;
45 };
46
47 struct conn_handle_t {
48 struct hci_conn *conn;
49 __u16 handle;
50 };
51
52 static const struct sco_param esco_param_cvsd[] = {
53 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
55 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
56 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
57 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
58 };
59
60 static const struct sco_param sco_param_cvsd[] = {
61 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
62 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
63 };
64
65 static const struct sco_param esco_param_msbc[] = {
66 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
67 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
68 };
69
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 struct hci_conn_params *params;
74 struct hci_dev *hdev = conn->hdev;
75 struct smp_irk *irk;
76 bdaddr_t *bdaddr;
77 u8 bdaddr_type;
78
79 bdaddr = &conn->dst;
80 bdaddr_type = conn->dst_type;
81
82 /* Check if we need to convert to identity address */
83 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 if (irk) {
85 bdaddr = &irk->bdaddr;
86 bdaddr_type = irk->addr_type;
87 }
88
89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 bdaddr_type);
91 if (!params)
92 return;
93
94 if (params->conn) {
95 hci_conn_drop(params->conn);
96 hci_conn_put(params->conn);
97 params->conn = NULL;
98 }
99
100 if (!params->explicit_connect)
101 return;
102
103 /* If the status indicates successful cancellation of
104 * the attempt (i.e. Unknown Connection Id) there's no point of
105 * notifying failure since we'll go back to keep trying to
106 * connect. The only exception is explicit connect requests
107 * where a timeout + cancel does indicate an actual failure.
108 */
109 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 conn->dst_type, status);
112
113 /* The connection attempt was doing scan for new RPA, and is
114 * in scan phase. If params are not associated with any other
115 * autoconnect action, remove them completely. If they are, just unmark
116 * them as waiting for connection, by clearing explicit_connect field.
117 */
118 params->explicit_connect = false;
119
120 hci_pend_le_list_del_init(params);
121
122 switch (params->auto_connect) {
123 case HCI_AUTO_CONN_EXPLICIT:
124 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 /* return instead of break to avoid duplicate scan update */
126 return;
127 case HCI_AUTO_CONN_DIRECT:
128 case HCI_AUTO_CONN_ALWAYS:
129 hci_pend_le_list_add(params, &hdev->pend_le_conns);
130 break;
131 case HCI_AUTO_CONN_REPORT:
132 hci_pend_le_list_add(params, &hdev->pend_le_reports);
133 break;
134 default:
135 break;
136 }
137
138 hci_update_passive_scan(hdev);
139 }
140
hci_conn_cleanup(struct hci_conn * conn)141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143 struct hci_dev *hdev = conn->hdev;
144
145 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147
148 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 hci_remove_link_key(hdev, &conn->dst);
150
151 hci_chan_list_flush(conn);
152
153 hci_conn_hash_del(hdev, conn);
154
155 if (conn->cleanup)
156 conn->cleanup(conn);
157
158 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
159 switch (conn->setting & SCO_AIRMODE_MASK) {
160 case SCO_AIRMODE_CVSD:
161 case SCO_AIRMODE_TRANSP:
162 if (hdev->notify)
163 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
164 break;
165 }
166 } else {
167 if (hdev->notify)
168 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
169 }
170
171 debugfs_remove_recursive(conn->debugfs);
172
173 hci_conn_del_sysfs(conn);
174
175 hci_dev_put(hdev);
176 }
177
le_scan_cleanup(struct work_struct * work)178 static void le_scan_cleanup(struct work_struct *work)
179 {
180 struct hci_conn *conn = container_of(work, struct hci_conn,
181 le_scan_cleanup);
182 struct hci_dev *hdev = conn->hdev;
183 struct hci_conn *c = NULL;
184
185 BT_DBG("%s hcon %p", hdev->name, conn);
186
187 hci_dev_lock(hdev);
188
189 /* Check that the hci_conn is still around */
190 rcu_read_lock();
191 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
192 if (c == conn)
193 break;
194 }
195 rcu_read_unlock();
196
197 if (c == conn) {
198 hci_connect_le_scan_cleanup(conn, 0x00);
199 hci_conn_cleanup(conn);
200 }
201
202 hci_dev_unlock(hdev);
203 hci_dev_put(hdev);
204 hci_conn_put(conn);
205 }
206
hci_connect_le_scan_remove(struct hci_conn * conn)207 static void hci_connect_le_scan_remove(struct hci_conn *conn)
208 {
209 BT_DBG("%s hcon %p", conn->hdev->name, conn);
210
211 /* We can't call hci_conn_del/hci_conn_cleanup here since that
212 * could deadlock with another hci_conn_del() call that's holding
213 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
214 * Instead, grab temporary extra references to the hci_dev and
215 * hci_conn and perform the necessary cleanup in a separate work
216 * callback.
217 */
218
219 hci_dev_hold(conn->hdev);
220 hci_conn_get(conn);
221
222 /* Even though we hold a reference to the hdev, many other
223 * things might get cleaned up meanwhile, including the hdev's
224 * own workqueue, so we can't use that for scheduling.
225 */
226 schedule_work(&conn->le_scan_cleanup);
227 }
228
hci_acl_create_connection(struct hci_conn * conn)229 static void hci_acl_create_connection(struct hci_conn *conn)
230 {
231 struct hci_dev *hdev = conn->hdev;
232 struct inquiry_entry *ie;
233 struct hci_cp_create_conn cp;
234
235 BT_DBG("hcon %p", conn);
236
237 /* Many controllers disallow HCI Create Connection while it is doing
238 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
239 * Connection. This may cause the MGMT discovering state to become false
240 * without user space's request but it is okay since the MGMT Discovery
241 * APIs do not promise that discovery should be done forever. Instead,
242 * the user space monitors the status of MGMT discovering and it may
243 * request for discovery again when this flag becomes false.
244 */
245 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
246 /* Put this connection to "pending" state so that it will be
247 * executed after the inquiry cancel command complete event.
248 */
249 conn->state = BT_CONNECT2;
250 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
251 return;
252 }
253
254 conn->state = BT_CONNECT;
255 conn->out = true;
256 conn->role = HCI_ROLE_MASTER;
257
258 conn->attempt++;
259
260 conn->link_policy = hdev->link_policy;
261
262 memset(&cp, 0, sizeof(cp));
263 bacpy(&cp.bdaddr, &conn->dst);
264 cp.pscan_rep_mode = 0x02;
265
266 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
267 if (ie) {
268 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
269 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
270 cp.pscan_mode = ie->data.pscan_mode;
271 cp.clock_offset = ie->data.clock_offset |
272 cpu_to_le16(0x8000);
273 }
274
275 memcpy(conn->dev_class, ie->data.dev_class, 3);
276 }
277
278 cp.pkt_type = cpu_to_le16(conn->pkt_type);
279 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
280 cp.role_switch = 0x01;
281 else
282 cp.role_switch = 0x00;
283
284 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
285 }
286
hci_disconnect(struct hci_conn * conn,__u8 reason)287 int hci_disconnect(struct hci_conn *conn, __u8 reason)
288 {
289 BT_DBG("hcon %p", conn);
290
291 /* When we are central of an established connection and it enters
292 * the disconnect timeout, then go ahead and try to read the
293 * current clock offset. Processing of the result is done
294 * within the event handling and hci_clock_offset_evt function.
295 */
296 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
297 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
298 struct hci_dev *hdev = conn->hdev;
299 struct hci_cp_read_clock_offset clkoff_cp;
300
301 clkoff_cp.handle = cpu_to_le16(conn->handle);
302 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
303 &clkoff_cp);
304 }
305
306 return hci_abort_conn(conn, reason);
307 }
308
hci_add_sco(struct hci_conn * conn,__u16 handle)309 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
310 {
311 struct hci_dev *hdev = conn->hdev;
312 struct hci_cp_add_sco cp;
313
314 BT_DBG("hcon %p", conn);
315
316 conn->state = BT_CONNECT;
317 conn->out = true;
318
319 conn->attempt++;
320
321 cp.handle = cpu_to_le16(handle);
322 cp.pkt_type = cpu_to_le16(conn->pkt_type);
323
324 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
325 }
326
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)327 static bool find_next_esco_param(struct hci_conn *conn,
328 const struct sco_param *esco_param, int size)
329 {
330 for (; conn->attempt <= size; conn->attempt++) {
331 if (lmp_esco_2m_capable(conn->link) ||
332 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
333 break;
334 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
335 conn, conn->attempt);
336 }
337
338 return conn->attempt <= size;
339 }
340
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)341 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
342 {
343 int err;
344 __u8 vnd_len, *vnd_data = NULL;
345 struct hci_op_configure_data_path *cmd = NULL;
346
347 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
348 &vnd_data);
349 if (err < 0)
350 goto error;
351
352 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
353 if (!cmd) {
354 err = -ENOMEM;
355 goto error;
356 }
357
358 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
359 if (err < 0)
360 goto error;
361
362 cmd->vnd_len = vnd_len;
363 memcpy(cmd->vnd_data, vnd_data, vnd_len);
364
365 cmd->direction = 0x00;
366 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
367 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
368
369 cmd->direction = 0x01;
370 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
371 sizeof(*cmd) + vnd_len, cmd,
372 HCI_CMD_TIMEOUT);
373 error:
374
375 kfree(cmd);
376 kfree(vnd_data);
377 return err;
378 }
379
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)380 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
381 {
382 struct conn_handle_t *conn_handle = data;
383 struct hci_conn *conn = conn_handle->conn;
384 __u16 handle = conn_handle->handle;
385 struct hci_cp_enhanced_setup_sync_conn cp;
386 const struct sco_param *param;
387
388 kfree(conn_handle);
389
390 bt_dev_dbg(hdev, "hcon %p", conn);
391
392 /* for offload use case, codec needs to configured before opening SCO */
393 if (conn->codec.data_path)
394 configure_datapath_sync(hdev, &conn->codec);
395
396 conn->state = BT_CONNECT;
397 conn->out = true;
398
399 conn->attempt++;
400
401 memset(&cp, 0x00, sizeof(cp));
402
403 cp.handle = cpu_to_le16(handle);
404
405 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
406 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
407
408 switch (conn->codec.id) {
409 case BT_CODEC_MSBC:
410 if (!find_next_esco_param(conn, esco_param_msbc,
411 ARRAY_SIZE(esco_param_msbc)))
412 return -EINVAL;
413
414 param = &esco_param_msbc[conn->attempt - 1];
415 cp.tx_coding_format.id = 0x05;
416 cp.rx_coding_format.id = 0x05;
417 cp.tx_codec_frame_size = __cpu_to_le16(60);
418 cp.rx_codec_frame_size = __cpu_to_le16(60);
419 cp.in_bandwidth = __cpu_to_le32(32000);
420 cp.out_bandwidth = __cpu_to_le32(32000);
421 cp.in_coding_format.id = 0x04;
422 cp.out_coding_format.id = 0x04;
423 cp.in_coded_data_size = __cpu_to_le16(16);
424 cp.out_coded_data_size = __cpu_to_le16(16);
425 cp.in_pcm_data_format = 2;
426 cp.out_pcm_data_format = 2;
427 cp.in_pcm_sample_payload_msb_pos = 0;
428 cp.out_pcm_sample_payload_msb_pos = 0;
429 cp.in_data_path = conn->codec.data_path;
430 cp.out_data_path = conn->codec.data_path;
431 cp.in_transport_unit_size = 1;
432 cp.out_transport_unit_size = 1;
433 break;
434
435 case BT_CODEC_TRANSPARENT:
436 if (!find_next_esco_param(conn, esco_param_msbc,
437 ARRAY_SIZE(esco_param_msbc)))
438 return false;
439 param = &esco_param_msbc[conn->attempt - 1];
440 cp.tx_coding_format.id = 0x03;
441 cp.rx_coding_format.id = 0x03;
442 cp.tx_codec_frame_size = __cpu_to_le16(60);
443 cp.rx_codec_frame_size = __cpu_to_le16(60);
444 cp.in_bandwidth = __cpu_to_le32(0x1f40);
445 cp.out_bandwidth = __cpu_to_le32(0x1f40);
446 cp.in_coding_format.id = 0x03;
447 cp.out_coding_format.id = 0x03;
448 cp.in_coded_data_size = __cpu_to_le16(16);
449 cp.out_coded_data_size = __cpu_to_le16(16);
450 cp.in_pcm_data_format = 2;
451 cp.out_pcm_data_format = 2;
452 cp.in_pcm_sample_payload_msb_pos = 0;
453 cp.out_pcm_sample_payload_msb_pos = 0;
454 cp.in_data_path = conn->codec.data_path;
455 cp.out_data_path = conn->codec.data_path;
456 cp.in_transport_unit_size = 1;
457 cp.out_transport_unit_size = 1;
458 break;
459
460 case BT_CODEC_CVSD:
461 if (lmp_esco_capable(conn->link)) {
462 if (!find_next_esco_param(conn, esco_param_cvsd,
463 ARRAY_SIZE(esco_param_cvsd)))
464 return -EINVAL;
465 param = &esco_param_cvsd[conn->attempt - 1];
466 } else {
467 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
468 return -EINVAL;
469 param = &sco_param_cvsd[conn->attempt - 1];
470 }
471 cp.tx_coding_format.id = 2;
472 cp.rx_coding_format.id = 2;
473 cp.tx_codec_frame_size = __cpu_to_le16(60);
474 cp.rx_codec_frame_size = __cpu_to_le16(60);
475 cp.in_bandwidth = __cpu_to_le32(16000);
476 cp.out_bandwidth = __cpu_to_le32(16000);
477 cp.in_coding_format.id = 4;
478 cp.out_coding_format.id = 4;
479 cp.in_coded_data_size = __cpu_to_le16(16);
480 cp.out_coded_data_size = __cpu_to_le16(16);
481 cp.in_pcm_data_format = 2;
482 cp.out_pcm_data_format = 2;
483 cp.in_pcm_sample_payload_msb_pos = 0;
484 cp.out_pcm_sample_payload_msb_pos = 0;
485 cp.in_data_path = conn->codec.data_path;
486 cp.out_data_path = conn->codec.data_path;
487 cp.in_transport_unit_size = 16;
488 cp.out_transport_unit_size = 16;
489 break;
490 default:
491 return -EINVAL;
492 }
493
494 cp.retrans_effort = param->retrans_effort;
495 cp.pkt_type = __cpu_to_le16(param->pkt_type);
496 cp.max_latency = __cpu_to_le16(param->max_latency);
497
498 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
499 return -EIO;
500
501 return 0;
502 }
503
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)504 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
505 {
506 struct hci_dev *hdev = conn->hdev;
507 struct hci_cp_setup_sync_conn cp;
508 const struct sco_param *param;
509
510 bt_dev_dbg(hdev, "hcon %p", conn);
511
512 conn->state = BT_CONNECT;
513 conn->out = true;
514
515 conn->attempt++;
516
517 cp.handle = cpu_to_le16(handle);
518
519 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
520 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
521 cp.voice_setting = cpu_to_le16(conn->setting);
522
523 switch (conn->setting & SCO_AIRMODE_MASK) {
524 case SCO_AIRMODE_TRANSP:
525 if (!find_next_esco_param(conn, esco_param_msbc,
526 ARRAY_SIZE(esco_param_msbc)))
527 return false;
528 param = &esco_param_msbc[conn->attempt - 1];
529 break;
530 case SCO_AIRMODE_CVSD:
531 if (lmp_esco_capable(conn->link)) {
532 if (!find_next_esco_param(conn, esco_param_cvsd,
533 ARRAY_SIZE(esco_param_cvsd)))
534 return false;
535 param = &esco_param_cvsd[conn->attempt - 1];
536 } else {
537 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
538 return false;
539 param = &sco_param_cvsd[conn->attempt - 1];
540 }
541 break;
542 default:
543 return false;
544 }
545
546 cp.retrans_effort = param->retrans_effort;
547 cp.pkt_type = __cpu_to_le16(param->pkt_type);
548 cp.max_latency = __cpu_to_le16(param->max_latency);
549
550 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
551 return false;
552
553 return true;
554 }
555
hci_setup_sync(struct hci_conn * conn,__u16 handle)556 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
557 {
558 int result;
559 struct conn_handle_t *conn_handle;
560
561 if (enhanced_sync_conn_capable(conn->hdev)) {
562 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
563
564 if (!conn_handle)
565 return false;
566
567 conn_handle->conn = conn;
568 conn_handle->handle = handle;
569 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
570 conn_handle, NULL);
571 if (result < 0)
572 kfree(conn_handle);
573
574 return result == 0;
575 }
576
577 return hci_setup_sync_conn(conn, handle);
578 }
579
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)580 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
581 u16 to_multiplier)
582 {
583 struct hci_dev *hdev = conn->hdev;
584 struct hci_conn_params *params;
585 struct hci_cp_le_conn_update cp;
586
587 hci_dev_lock(hdev);
588
589 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
590 if (params) {
591 params->conn_min_interval = min;
592 params->conn_max_interval = max;
593 params->conn_latency = latency;
594 params->supervision_timeout = to_multiplier;
595 }
596
597 hci_dev_unlock(hdev);
598
599 memset(&cp, 0, sizeof(cp));
600 cp.handle = cpu_to_le16(conn->handle);
601 cp.conn_interval_min = cpu_to_le16(min);
602 cp.conn_interval_max = cpu_to_le16(max);
603 cp.conn_latency = cpu_to_le16(latency);
604 cp.supervision_timeout = cpu_to_le16(to_multiplier);
605 cp.min_ce_len = cpu_to_le16(0x0000);
606 cp.max_ce_len = cpu_to_le16(0x0000);
607
608 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
609
610 if (params)
611 return 0x01;
612
613 return 0x00;
614 }
615
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)616 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
617 __u8 ltk[16], __u8 key_size)
618 {
619 struct hci_dev *hdev = conn->hdev;
620 struct hci_cp_le_start_enc cp;
621
622 BT_DBG("hcon %p", conn);
623
624 memset(&cp, 0, sizeof(cp));
625
626 cp.handle = cpu_to_le16(conn->handle);
627 cp.rand = rand;
628 cp.ediv = ediv;
629 memcpy(cp.ltk, ltk, key_size);
630
631 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
632 }
633
634 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)635 void hci_sco_setup(struct hci_conn *conn, __u8 status)
636 {
637 struct hci_conn *sco = conn->link;
638
639 if (!sco)
640 return;
641
642 BT_DBG("hcon %p", conn);
643
644 if (!status) {
645 if (lmp_esco_capable(conn->hdev))
646 hci_setup_sync(sco, conn->handle);
647 else
648 hci_add_sco(sco, conn->handle);
649 } else {
650 hci_connect_cfm(sco, status);
651 hci_conn_del(sco);
652 }
653 }
654
hci_conn_timeout(struct work_struct * work)655 static void hci_conn_timeout(struct work_struct *work)
656 {
657 struct hci_conn *conn = container_of(work, struct hci_conn,
658 disc_work.work);
659 int refcnt = atomic_read(&conn->refcnt);
660
661 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
662
663 WARN_ON(refcnt < 0);
664
665 /* FIXME: It was observed that in pairing failed scenario, refcnt
666 * drops below 0. Probably this is because l2cap_conn_del calls
667 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
668 * dropped. After that loop hci_chan_del is called which also drops
669 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
670 * otherwise drop it.
671 */
672 if (refcnt > 0)
673 return;
674
675 /* LE connections in scanning state need special handling */
676 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
677 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
678 hci_connect_le_scan_remove(conn);
679 return;
680 }
681
682 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
683 }
684
685 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)686 static void hci_conn_idle(struct work_struct *work)
687 {
688 struct hci_conn *conn = container_of(work, struct hci_conn,
689 idle_work.work);
690 struct hci_dev *hdev = conn->hdev;
691
692 BT_DBG("hcon %p mode %d", conn, conn->mode);
693
694 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
695 return;
696
697 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
698 return;
699
700 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
701 struct hci_cp_sniff_subrate cp;
702 cp.handle = cpu_to_le16(conn->handle);
703 cp.max_latency = cpu_to_le16(0);
704 cp.min_remote_timeout = cpu_to_le16(0);
705 cp.min_local_timeout = cpu_to_le16(0);
706 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
707 }
708
709 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
710 struct hci_cp_sniff_mode cp;
711 cp.handle = cpu_to_le16(conn->handle);
712 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
713 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
714 cp.attempt = cpu_to_le16(4);
715 cp.timeout = cpu_to_le16(1);
716 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
717 }
718 }
719
hci_conn_auto_accept(struct work_struct * work)720 static void hci_conn_auto_accept(struct work_struct *work)
721 {
722 struct hci_conn *conn = container_of(work, struct hci_conn,
723 auto_accept_work.work);
724
725 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
726 &conn->dst);
727 }
728
le_disable_advertising(struct hci_dev * hdev)729 static void le_disable_advertising(struct hci_dev *hdev)
730 {
731 if (ext_adv_capable(hdev)) {
732 struct hci_cp_le_set_ext_adv_enable cp;
733
734 cp.enable = 0x00;
735 cp.num_of_sets = 0x00;
736
737 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
738 &cp);
739 } else {
740 u8 enable = 0x00;
741 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
742 &enable);
743 }
744 }
745
le_conn_timeout(struct work_struct * work)746 static void le_conn_timeout(struct work_struct *work)
747 {
748 struct hci_conn *conn = container_of(work, struct hci_conn,
749 le_conn_timeout.work);
750 struct hci_dev *hdev = conn->hdev;
751
752 BT_DBG("");
753
754 /* We could end up here due to having done directed advertising,
755 * so clean up the state if necessary. This should however only
756 * happen with broken hardware or if low duty cycle was used
757 * (which doesn't have a timeout of its own).
758 */
759 if (conn->role == HCI_ROLE_SLAVE) {
760 /* Disable LE Advertising */
761 le_disable_advertising(hdev);
762 hci_dev_lock(hdev);
763 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
764 hci_dev_unlock(hdev);
765 return;
766 }
767
768 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
769 }
770
771 struct iso_cig_params {
772 struct hci_cp_le_set_cig_params cp;
773 struct hci_cis_params cis[0x1f];
774 };
775
776 struct iso_list_data {
777 union {
778 u8 cig;
779 u8 big;
780 };
781 union {
782 u8 cis;
783 u8 bis;
784 u16 sync_handle;
785 };
786 int count;
787 struct iso_cig_params pdu;
788 };
789
bis_list(struct hci_conn * conn,void * data)790 static void bis_list(struct hci_conn *conn, void *data)
791 {
792 struct iso_list_data *d = data;
793
794 /* Skip if not broadcast/ANY address */
795 if (bacmp(&conn->dst, BDADDR_ANY))
796 return;
797
798 if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
799 d->bis != conn->iso_qos.bis)
800 return;
801
802 d->count++;
803 }
804
find_bis(struct hci_conn * conn,void * data)805 static void find_bis(struct hci_conn *conn, void *data)
806 {
807 struct iso_list_data *d = data;
808
809 /* Ignore unicast */
810 if (bacmp(&conn->dst, BDADDR_ANY))
811 return;
812
813 d->count++;
814 }
815
terminate_big_sync(struct hci_dev * hdev,void * data)816 static int terminate_big_sync(struct hci_dev *hdev, void *data)
817 {
818 struct iso_list_data *d = data;
819
820 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
821
822 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
823
824 /* Check if ISO connection is a BIS and terminate BIG if there are
825 * no other connections using it.
826 */
827 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
828 if (d->count)
829 return 0;
830
831 return hci_le_terminate_big_sync(hdev, d->big,
832 HCI_ERROR_LOCAL_HOST_TERM);
833 }
834
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)835 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
836 {
837 kfree(data);
838 }
839
hci_le_terminate_big(struct hci_dev * hdev,u8 big,u8 bis)840 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
841 {
842 struct iso_list_data *d;
843 int ret;
844
845 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
846
847 d = kmalloc(sizeof(*d), GFP_KERNEL);
848 if (!d)
849 return -ENOMEM;
850
851 memset(d, 0, sizeof(*d));
852 d->big = big;
853 d->bis = bis;
854
855 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
856 terminate_big_destroy);
857 if (ret)
858 kfree(d);
859
860 return ret;
861 }
862
big_terminate_sync(struct hci_dev * hdev,void * data)863 static int big_terminate_sync(struct hci_dev *hdev, void *data)
864 {
865 struct iso_list_data *d = data;
866
867 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
868 d->sync_handle);
869
870 /* Check if ISO connection is a BIS and terminate BIG if there are
871 * no other connections using it.
872 */
873 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
874 if (d->count)
875 return 0;
876
877 hci_le_big_terminate_sync(hdev, d->big);
878
879 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
880 }
881
hci_le_big_terminate(struct hci_dev * hdev,u8 big,u16 sync_handle)882 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
883 {
884 struct iso_list_data *d;
885 int ret;
886
887 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
888
889 d = kmalloc(sizeof(*d), GFP_KERNEL);
890 if (!d)
891 return -ENOMEM;
892
893 memset(d, 0, sizeof(*d));
894 d->big = big;
895 d->sync_handle = sync_handle;
896
897 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
898 terminate_big_destroy);
899 if (ret)
900 kfree(d);
901
902 return ret;
903 }
904
905 /* Cleanup BIS connection
906 *
907 * Detects if there any BIS left connected in a BIG
908 * broadcaster: Remove advertising instance and terminate BIG.
909 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
910 */
bis_cleanup(struct hci_conn * conn)911 static void bis_cleanup(struct hci_conn *conn)
912 {
913 struct hci_dev *hdev = conn->hdev;
914
915 bt_dev_dbg(hdev, "conn %p", conn);
916
917 if (conn->role == HCI_ROLE_MASTER) {
918 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
919 return;
920
921 hci_le_terminate_big(hdev, conn->iso_qos.big,
922 conn->iso_qos.bis);
923 } else {
924 hci_le_big_terminate(hdev, conn->iso_qos.big,
925 conn->sync_handle);
926 }
927 }
928
remove_cig_sync(struct hci_dev * hdev,void * data)929 static int remove_cig_sync(struct hci_dev *hdev, void *data)
930 {
931 u8 handle = PTR_ERR(data);
932
933 return hci_le_remove_cig_sync(hdev, handle);
934 }
935
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)936 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
937 {
938 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
939
940 return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
941 }
942
find_cis(struct hci_conn * conn,void * data)943 static void find_cis(struct hci_conn *conn, void *data)
944 {
945 struct iso_list_data *d = data;
946
947 /* Ignore broadcast */
948 if (!bacmp(&conn->dst, BDADDR_ANY))
949 return;
950
951 d->count++;
952 }
953
954 /* Cleanup CIS connection:
955 *
956 * Detects if there any CIS left connected in a CIG and remove it.
957 */
cis_cleanup(struct hci_conn * conn)958 static void cis_cleanup(struct hci_conn *conn)
959 {
960 struct hci_dev *hdev = conn->hdev;
961 struct iso_list_data d;
962
963 memset(&d, 0, sizeof(d));
964 d.cig = conn->iso_qos.cig;
965
966 /* Check if ISO connection is a CIS and remove CIG if there are
967 * no other connections using it.
968 */
969 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
970 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
971 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
972 if (d.count)
973 return;
974
975 hci_le_remove_cig(hdev, conn->iso_qos.cig);
976 }
977
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)978 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
979 u8 role)
980 {
981 struct hci_conn *conn;
982
983 BT_DBG("%s dst %pMR", hdev->name, dst);
984
985 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
986 if (!conn)
987 return NULL;
988
989 bacpy(&conn->dst, dst);
990 bacpy(&conn->src, &hdev->bdaddr);
991 conn->handle = HCI_CONN_HANDLE_UNSET;
992 conn->hdev = hdev;
993 conn->type = type;
994 conn->role = role;
995 conn->mode = HCI_CM_ACTIVE;
996 conn->state = BT_OPEN;
997 conn->auth_type = HCI_AT_GENERAL_BONDING;
998 conn->io_capability = hdev->io_capability;
999 conn->remote_auth = 0xff;
1000 conn->key_type = 0xff;
1001 conn->rssi = HCI_RSSI_INVALID;
1002 conn->tx_power = HCI_TX_POWER_INVALID;
1003 conn->max_tx_power = HCI_TX_POWER_INVALID;
1004
1005 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1006 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1007
1008 /* Set Default Authenticated payload timeout to 30s */
1009 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1010
1011 if (conn->role == HCI_ROLE_MASTER)
1012 conn->out = true;
1013
1014 switch (type) {
1015 case ACL_LINK:
1016 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1017 break;
1018 case LE_LINK:
1019 /* conn->src should reflect the local identity address */
1020 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1021 break;
1022 case ISO_LINK:
1023 /* conn->src should reflect the local identity address */
1024 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1025
1026 /* set proper cleanup function */
1027 if (!bacmp(dst, BDADDR_ANY))
1028 conn->cleanup = bis_cleanup;
1029 else if (conn->role == HCI_ROLE_MASTER)
1030 conn->cleanup = cis_cleanup;
1031
1032 break;
1033 case SCO_LINK:
1034 if (lmp_esco_capable(hdev))
1035 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1036 (hdev->esco_type & EDR_ESCO_MASK);
1037 else
1038 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1039 break;
1040 case ESCO_LINK:
1041 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1042 break;
1043 }
1044
1045 skb_queue_head_init(&conn->data_q);
1046
1047 INIT_LIST_HEAD(&conn->chan_list);
1048
1049 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1050 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1051 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1052 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1053 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1054
1055 atomic_set(&conn->refcnt, 0);
1056
1057 hci_dev_hold(hdev);
1058
1059 hci_conn_hash_add(hdev, conn);
1060
1061 /* The SCO and eSCO connections will only be notified when their
1062 * setup has been completed. This is different to ACL links which
1063 * can be notified right away.
1064 */
1065 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1066 if (hdev->notify)
1067 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1068 }
1069
1070 hci_conn_init_sysfs(conn);
1071
1072 return conn;
1073 }
1074
hci_conn_unlink(struct hci_conn * conn)1075 static bool hci_conn_unlink(struct hci_conn *conn)
1076 {
1077 if (!conn->link)
1078 return false;
1079
1080 conn->link->link = NULL;
1081 conn->link = NULL;
1082
1083 return true;
1084 }
1085
hci_conn_del(struct hci_conn * conn)1086 int hci_conn_del(struct hci_conn *conn)
1087 {
1088 struct hci_dev *hdev = conn->hdev;
1089
1090 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1091
1092 cancel_delayed_work_sync(&conn->disc_work);
1093 cancel_delayed_work_sync(&conn->auto_accept_work);
1094 cancel_delayed_work_sync(&conn->idle_work);
1095
1096 if (conn->type == ACL_LINK) {
1097 struct hci_conn *link = conn->link;
1098
1099 if (link) {
1100 hci_conn_unlink(conn);
1101 /* Due to race, SCO connection might be not established
1102 * yet at this point. Delete it now, otherwise it is
1103 * possible for it to be stuck and can't be deleted.
1104 */
1105 if (link->handle == HCI_CONN_HANDLE_UNSET)
1106 hci_conn_del(link);
1107 }
1108
1109 /* Unacked frames */
1110 hdev->acl_cnt += conn->sent;
1111 } else if (conn->type == LE_LINK) {
1112 cancel_delayed_work(&conn->le_conn_timeout);
1113
1114 if (hdev->le_pkts)
1115 hdev->le_cnt += conn->sent;
1116 else
1117 hdev->acl_cnt += conn->sent;
1118 } else {
1119 struct hci_conn *acl = conn->link;
1120
1121 if (acl) {
1122 hci_conn_unlink(conn);
1123 hci_conn_drop(acl);
1124 }
1125
1126 /* Unacked ISO frames */
1127 if (conn->type == ISO_LINK) {
1128 if (hdev->iso_pkts)
1129 hdev->iso_cnt += conn->sent;
1130 else if (hdev->le_pkts)
1131 hdev->le_cnt += conn->sent;
1132 else
1133 hdev->acl_cnt += conn->sent;
1134 }
1135 }
1136
1137 if (conn->amp_mgr)
1138 amp_mgr_put(conn->amp_mgr);
1139
1140 skb_queue_purge(&conn->data_q);
1141
1142 /* Remove the connection from the list and cleanup its remaining
1143 * state. This is a separate function since for some cases like
1144 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1145 * rest of hci_conn_del.
1146 */
1147 hci_conn_cleanup(conn);
1148
1149 return 0;
1150 }
1151
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1152 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1153 {
1154 int use_src = bacmp(src, BDADDR_ANY);
1155 struct hci_dev *hdev = NULL, *d;
1156
1157 BT_DBG("%pMR -> %pMR", src, dst);
1158
1159 read_lock(&hci_dev_list_lock);
1160
1161 list_for_each_entry(d, &hci_dev_list, list) {
1162 if (!test_bit(HCI_UP, &d->flags) ||
1163 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1164 d->dev_type != HCI_PRIMARY)
1165 continue;
1166
1167 /* Simple routing:
1168 * No source address - find interface with bdaddr != dst
1169 * Source address - find interface with bdaddr == src
1170 */
1171
1172 if (use_src) {
1173 bdaddr_t id_addr;
1174 u8 id_addr_type;
1175
1176 if (src_type == BDADDR_BREDR) {
1177 if (!lmp_bredr_capable(d))
1178 continue;
1179 bacpy(&id_addr, &d->bdaddr);
1180 id_addr_type = BDADDR_BREDR;
1181 } else {
1182 if (!lmp_le_capable(d))
1183 continue;
1184
1185 hci_copy_identity_address(d, &id_addr,
1186 &id_addr_type);
1187
1188 /* Convert from HCI to three-value type */
1189 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1190 id_addr_type = BDADDR_LE_PUBLIC;
1191 else
1192 id_addr_type = BDADDR_LE_RANDOM;
1193 }
1194
1195 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1196 hdev = d; break;
1197 }
1198 } else {
1199 if (bacmp(&d->bdaddr, dst)) {
1200 hdev = d; break;
1201 }
1202 }
1203 }
1204
1205 if (hdev)
1206 hdev = hci_dev_hold(hdev);
1207
1208 read_unlock(&hci_dev_list_lock);
1209 return hdev;
1210 }
1211 EXPORT_SYMBOL(hci_get_route);
1212
1213 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1214 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1215 {
1216 struct hci_dev *hdev = conn->hdev;
1217
1218 hci_connect_le_scan_cleanup(conn, status);
1219
1220 /* Enable advertising in case this was a failed connection
1221 * attempt as a peripheral.
1222 */
1223 hci_enable_advertising(hdev);
1224 }
1225
1226 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1227 void hci_conn_failed(struct hci_conn *conn, u8 status)
1228 {
1229 struct hci_dev *hdev = conn->hdev;
1230
1231 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1232
1233 switch (conn->type) {
1234 case LE_LINK:
1235 hci_le_conn_failed(conn, status);
1236 break;
1237 case ACL_LINK:
1238 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1239 conn->dst_type, status);
1240 break;
1241 }
1242
1243 conn->state = BT_CLOSED;
1244 hci_connect_cfm(conn, status);
1245 hci_conn_del(conn);
1246 }
1247
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)1248 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1249 {
1250 struct hci_conn *conn = data;
1251
1252 bt_dev_dbg(hdev, "err %d", err);
1253
1254 hci_dev_lock(hdev);
1255
1256 if (!err) {
1257 hci_connect_le_scan_cleanup(conn, 0x00);
1258 goto done;
1259 }
1260
1261 /* Check if connection is still pending */
1262 if (conn != hci_lookup_le_connect(hdev))
1263 goto done;
1264
1265 hci_conn_failed(conn, bt_status(err));
1266
1267 done:
1268 hci_dev_unlock(hdev);
1269 }
1270
hci_connect_le_sync(struct hci_dev * hdev,void * data)1271 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1272 {
1273 struct hci_conn *conn = data;
1274
1275 bt_dev_dbg(hdev, "conn %p", conn);
1276
1277 return hci_le_create_conn_sync(hdev, conn);
1278 }
1279
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role)1280 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1281 u8 dst_type, bool dst_resolved, u8 sec_level,
1282 u16 conn_timeout, u8 role)
1283 {
1284 struct hci_conn *conn;
1285 struct smp_irk *irk;
1286 int err;
1287
1288 /* Let's make sure that le is enabled.*/
1289 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1290 if (lmp_le_capable(hdev))
1291 return ERR_PTR(-ECONNREFUSED);
1292
1293 return ERR_PTR(-EOPNOTSUPP);
1294 }
1295
1296 /* Since the controller supports only one LE connection attempt at a
1297 * time, we return -EBUSY if there is any connection attempt running.
1298 */
1299 if (hci_lookup_le_connect(hdev))
1300 return ERR_PTR(-EBUSY);
1301
1302 /* If there's already a connection object but it's not in
1303 * scanning state it means it must already be established, in
1304 * which case we can't do anything else except report a failure
1305 * to connect.
1306 */
1307 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1308 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1309 return ERR_PTR(-EBUSY);
1310 }
1311
1312 /* Check if the destination address has been resolved by the controller
1313 * since if it did then the identity address shall be used.
1314 */
1315 if (!dst_resolved) {
1316 /* When given an identity address with existing identity
1317 * resolving key, the connection needs to be established
1318 * to a resolvable random address.
1319 *
1320 * Storing the resolvable random address is required here
1321 * to handle connection failures. The address will later
1322 * be resolved back into the original identity address
1323 * from the connect request.
1324 */
1325 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1326 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1327 dst = &irk->rpa;
1328 dst_type = ADDR_LE_DEV_RANDOM;
1329 }
1330 }
1331
1332 if (conn) {
1333 bacpy(&conn->dst, dst);
1334 } else {
1335 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1336 if (!conn)
1337 return ERR_PTR(-ENOMEM);
1338 hci_conn_hold(conn);
1339 conn->pending_sec_level = sec_level;
1340 }
1341
1342 conn->dst_type = dst_type;
1343 conn->sec_level = BT_SECURITY_LOW;
1344 conn->conn_timeout = conn_timeout;
1345
1346 conn->state = BT_CONNECT;
1347 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1348
1349 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1350 create_le_conn_complete);
1351 if (err) {
1352 hci_conn_del(conn);
1353 return ERR_PTR(err);
1354 }
1355
1356 return conn;
1357 }
1358
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1359 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1360 {
1361 struct hci_conn *conn;
1362
1363 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1364 if (!conn)
1365 return false;
1366
1367 if (conn->state != BT_CONNECTED)
1368 return false;
1369
1370 return true;
1371 }
1372
1373 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1374 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1375 bdaddr_t *addr, u8 addr_type)
1376 {
1377 struct hci_conn_params *params;
1378
1379 if (is_connected(hdev, addr, addr_type))
1380 return -EISCONN;
1381
1382 params = hci_conn_params_lookup(hdev, addr, addr_type);
1383 if (!params) {
1384 params = hci_conn_params_add(hdev, addr, addr_type);
1385 if (!params)
1386 return -ENOMEM;
1387
1388 /* If we created new params, mark them to be deleted in
1389 * hci_connect_le_scan_cleanup. It's different case than
1390 * existing disabled params, those will stay after cleanup.
1391 */
1392 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1393 }
1394
1395 /* We're trying to connect, so make sure params are at pend_le_conns */
1396 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1397 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1398 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1399 hci_pend_le_list_del_init(params);
1400 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1401 }
1402
1403 params->explicit_connect = true;
1404
1405 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1406 params->auto_connect);
1407
1408 return 0;
1409 }
1410
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1411 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1412 {
1413 struct iso_list_data data;
1414
1415 /* Allocate a BIG if not set */
1416 if (qos->big == BT_ISO_QOS_BIG_UNSET) {
1417 for (data.big = 0x00; data.big < 0xef; data.big++) {
1418 data.count = 0;
1419 data.bis = 0xff;
1420
1421 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1422 BT_BOUND, &data);
1423 if (!data.count)
1424 break;
1425 }
1426
1427 if (data.big == 0xef)
1428 return -EADDRNOTAVAIL;
1429
1430 /* Update BIG */
1431 qos->big = data.big;
1432 }
1433
1434 return 0;
1435 }
1436
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1437 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1438 {
1439 struct iso_list_data data;
1440
1441 /* Allocate BIS if not set */
1442 if (qos->bis == BT_ISO_QOS_BIS_UNSET) {
1443 /* Find an unused adv set to advertise BIS, skip instance 0x00
1444 * since it is reserved as general purpose set.
1445 */
1446 for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1447 data.bis++) {
1448 data.count = 0;
1449
1450 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1451 BT_BOUND, &data);
1452 if (!data.count)
1453 break;
1454 }
1455
1456 if (data.bis == hdev->le_num_of_adv_sets)
1457 return -EADDRNOTAVAIL;
1458
1459 /* Update BIS */
1460 qos->bis = data.bis;
1461 }
1462
1463 return 0;
1464 }
1465
1466 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos)1467 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1468 struct bt_iso_qos *qos)
1469 {
1470 struct hci_conn *conn;
1471 struct iso_list_data data;
1472 int err;
1473
1474 /* Let's make sure that le is enabled.*/
1475 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1476 if (lmp_le_capable(hdev))
1477 return ERR_PTR(-ECONNREFUSED);
1478 return ERR_PTR(-EOPNOTSUPP);
1479 }
1480
1481 err = qos_set_big(hdev, qos);
1482 if (err)
1483 return ERR_PTR(err);
1484
1485 err = qos_set_bis(hdev, qos);
1486 if (err)
1487 return ERR_PTR(err);
1488
1489 data.big = qos->big;
1490 data.bis = qos->bis;
1491 data.count = 0;
1492
1493 /* Check if there is already a matching BIG/BIS */
1494 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1495 if (data.count)
1496 return ERR_PTR(-EADDRINUSE);
1497
1498 conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis);
1499 if (conn)
1500 return ERR_PTR(-EADDRINUSE);
1501
1502 conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1503 if (!conn)
1504 return ERR_PTR(-ENOMEM);
1505
1506 set_bit(HCI_CONN_PER_ADV, &conn->flags);
1507 conn->state = BT_CONNECT;
1508
1509 hci_conn_hold(conn);
1510 return conn;
1511 }
1512
1513 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1514 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1515 u8 dst_type, u8 sec_level,
1516 u16 conn_timeout,
1517 enum conn_reasons conn_reason)
1518 {
1519 struct hci_conn *conn;
1520
1521 /* Let's make sure that le is enabled.*/
1522 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1523 if (lmp_le_capable(hdev))
1524 return ERR_PTR(-ECONNREFUSED);
1525
1526 return ERR_PTR(-EOPNOTSUPP);
1527 }
1528
1529 /* Some devices send ATT messages as soon as the physical link is
1530 * established. To be able to handle these ATT messages, the user-
1531 * space first establishes the connection and then starts the pairing
1532 * process.
1533 *
1534 * So if a hci_conn object already exists for the following connection
1535 * attempt, we simply update pending_sec_level and auth_type fields
1536 * and return the object found.
1537 */
1538 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1539 if (conn) {
1540 if (conn->pending_sec_level < sec_level)
1541 conn->pending_sec_level = sec_level;
1542 goto done;
1543 }
1544
1545 BT_DBG("requesting refresh of dst_addr");
1546
1547 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1548 if (!conn)
1549 return ERR_PTR(-ENOMEM);
1550
1551 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1552 hci_conn_del(conn);
1553 return ERR_PTR(-EBUSY);
1554 }
1555
1556 conn->state = BT_CONNECT;
1557 set_bit(HCI_CONN_SCANNING, &conn->flags);
1558 conn->dst_type = dst_type;
1559 conn->sec_level = BT_SECURITY_LOW;
1560 conn->pending_sec_level = sec_level;
1561 conn->conn_timeout = conn_timeout;
1562 conn->conn_reason = conn_reason;
1563
1564 hci_update_passive_scan(hdev);
1565
1566 done:
1567 hci_conn_hold(conn);
1568 return conn;
1569 }
1570
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1571 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1572 u8 sec_level, u8 auth_type,
1573 enum conn_reasons conn_reason)
1574 {
1575 struct hci_conn *acl;
1576
1577 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1578 if (lmp_bredr_capable(hdev))
1579 return ERR_PTR(-ECONNREFUSED);
1580
1581 return ERR_PTR(-EOPNOTSUPP);
1582 }
1583
1584 /* Reject outgoing connection to device with same BD ADDR against
1585 * CVE-2020-26555
1586 */
1587 if (!bacmp(&hdev->bdaddr, dst)) {
1588 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1589 dst);
1590 return ERR_PTR(-ECONNREFUSED);
1591 }
1592
1593 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1594 if (!acl) {
1595 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1596 if (!acl)
1597 return ERR_PTR(-ENOMEM);
1598 }
1599
1600 hci_conn_hold(acl);
1601
1602 acl->conn_reason = conn_reason;
1603 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1604 acl->sec_level = BT_SECURITY_LOW;
1605 acl->pending_sec_level = sec_level;
1606 acl->auth_type = auth_type;
1607 hci_acl_create_connection(acl);
1608 }
1609
1610 return acl;
1611 }
1612
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec)1613 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1614 __u16 setting, struct bt_codec *codec)
1615 {
1616 struct hci_conn *acl;
1617 struct hci_conn *sco;
1618
1619 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1620 CONN_REASON_SCO_CONNECT);
1621 if (IS_ERR(acl))
1622 return acl;
1623
1624 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1625 if (!sco) {
1626 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1627 if (!sco) {
1628 hci_conn_drop(acl);
1629 return ERR_PTR(-ENOMEM);
1630 }
1631 }
1632
1633 acl->link = sco;
1634 sco->link = acl;
1635
1636 hci_conn_hold(sco);
1637
1638 sco->setting = setting;
1639 sco->codec = *codec;
1640
1641 if (acl->state == BT_CONNECTED &&
1642 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1643 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1644 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1645
1646 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1647 /* defer SCO setup until mode change completed */
1648 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1649 return sco;
1650 }
1651
1652 hci_sco_setup(acl, 0x00);
1653 }
1654
1655 return sco;
1656 }
1657
cis_add(struct iso_list_data * d,struct bt_iso_qos * qos)1658 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1659 {
1660 struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1661
1662 cis->cis_id = qos->cis;
1663 cis->c_sdu = cpu_to_le16(qos->out.sdu);
1664 cis->p_sdu = cpu_to_le16(qos->in.sdu);
1665 cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy;
1666 cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy;
1667 cis->c_rtn = qos->out.rtn;
1668 cis->p_rtn = qos->in.rtn;
1669
1670 d->pdu.cp.num_cis++;
1671 }
1672
cis_list(struct hci_conn * conn,void * data)1673 static void cis_list(struct hci_conn *conn, void *data)
1674 {
1675 struct iso_list_data *d = data;
1676
1677 /* Skip if broadcast/ANY address */
1678 if (!bacmp(&conn->dst, BDADDR_ANY))
1679 return;
1680
1681 if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1682 d->cis != conn->iso_qos.cis)
1683 return;
1684
1685 d->count++;
1686
1687 if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1688 d->count >= ARRAY_SIZE(d->pdu.cis))
1689 return;
1690
1691 cis_add(d, &conn->iso_qos);
1692 }
1693
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1694 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1695 {
1696 struct hci_dev *hdev = conn->hdev;
1697 struct hci_cp_le_create_big cp;
1698
1699 memset(&cp, 0, sizeof(cp));
1700
1701 cp.handle = qos->big;
1702 cp.adv_handle = qos->bis;
1703 cp.num_bis = 0x01;
1704 hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval);
1705 cp.bis.sdu = cpu_to_le16(qos->out.sdu);
1706 cp.bis.latency = cpu_to_le16(qos->out.latency);
1707 cp.bis.rtn = qos->out.rtn;
1708 cp.bis.phy = qos->out.phy;
1709 cp.bis.packing = qos->packing;
1710 cp.bis.framing = qos->framing;
1711 cp.bis.encryption = 0x00;
1712 memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode));
1713
1714 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1715 }
1716
set_cig_params_complete(struct hci_dev * hdev,void * data,int err)1717 static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err)
1718 {
1719 struct iso_cig_params *pdu = data;
1720
1721 bt_dev_dbg(hdev, "");
1722
1723 if (err)
1724 bt_dev_err(hdev, "Unable to set CIG parameters: %d", err);
1725
1726 kfree(pdu);
1727 }
1728
set_cig_params_sync(struct hci_dev * hdev,void * data)1729 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1730 {
1731 struct iso_cig_params *pdu = data;
1732 u32 plen;
1733
1734 plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]);
1735 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu,
1736 HCI_CMD_TIMEOUT);
1737 }
1738
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1739 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1740 {
1741 struct hci_dev *hdev = conn->hdev;
1742 struct iso_list_data data;
1743 struct iso_cig_params *pdu;
1744
1745 memset(&data, 0, sizeof(data));
1746
1747 /* Allocate a CIG if not set */
1748 if (qos->cig == BT_ISO_QOS_CIG_UNSET) {
1749 for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1750 data.count = 0;
1751 data.cis = 0xff;
1752
1753 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1754 BT_BOUND, &data);
1755 if (data.count)
1756 continue;
1757
1758 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1759 BT_CONNECTED, &data);
1760 if (!data.count)
1761 break;
1762 }
1763
1764 if (data.cig == 0xff)
1765 return false;
1766
1767 /* Update CIG */
1768 qos->cig = data.cig;
1769 }
1770
1771 data.pdu.cp.cig_id = qos->cig;
1772 hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval);
1773 hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval);
1774 data.pdu.cp.sca = qos->sca;
1775 data.pdu.cp.packing = qos->packing;
1776 data.pdu.cp.framing = qos->framing;
1777 data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency);
1778 data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency);
1779
1780 if (qos->cis != BT_ISO_QOS_CIS_UNSET) {
1781 data.count = 0;
1782 data.cig = qos->cig;
1783 data.cis = qos->cis;
1784
1785 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1786 &data);
1787 if (data.count)
1788 return false;
1789
1790 cis_add(&data, qos);
1791 }
1792
1793 /* Reprogram all CIS(s) with the same CIG */
1794 for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11;
1795 data.cis++) {
1796 data.count = 0;
1797
1798 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1799 &data);
1800 if (data.count)
1801 continue;
1802
1803 /* Allocate a CIS if not set */
1804 if (qos->cis == BT_ISO_QOS_CIS_UNSET) {
1805 /* Update CIS */
1806 qos->cis = data.cis;
1807 cis_add(&data, qos);
1808 }
1809 }
1810
1811 if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1812 return false;
1813
1814 pdu = kzalloc(sizeof(*pdu), GFP_KERNEL);
1815 if (!pdu)
1816 return false;
1817
1818 memcpy(pdu, &data.pdu, sizeof(*pdu));
1819
1820 if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu,
1821 set_cig_params_complete) < 0) {
1822 kfree(pdu);
1823 return false;
1824 }
1825
1826 return true;
1827 }
1828
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1829 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1830 __u8 dst_type, struct bt_iso_qos *qos)
1831 {
1832 struct hci_conn *cis;
1833
1834 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type);
1835 if (!cis) {
1836 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1837 if (!cis)
1838 return ERR_PTR(-ENOMEM);
1839 cis->cleanup = cis_cleanup;
1840 cis->dst_type = dst_type;
1841 }
1842
1843 if (cis->state == BT_CONNECTED)
1844 return cis;
1845
1846 /* Check if CIS has been set and the settings matches */
1847 if (cis->state == BT_BOUND &&
1848 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1849 return cis;
1850
1851 /* Update LINK PHYs according to QoS preference */
1852 cis->le_tx_phy = qos->out.phy;
1853 cis->le_rx_phy = qos->in.phy;
1854
1855 /* If output interval is not set use the input interval as it cannot be
1856 * 0x000000.
1857 */
1858 if (!qos->out.interval)
1859 qos->out.interval = qos->in.interval;
1860
1861 /* If input interval is not set use the output interval as it cannot be
1862 * 0x000000.
1863 */
1864 if (!qos->in.interval)
1865 qos->in.interval = qos->out.interval;
1866
1867 /* If output latency is not set use the input latency as it cannot be
1868 * 0x0000.
1869 */
1870 if (!qos->out.latency)
1871 qos->out.latency = qos->in.latency;
1872
1873 /* If input latency is not set use the output latency as it cannot be
1874 * 0x0000.
1875 */
1876 if (!qos->in.latency)
1877 qos->in.latency = qos->out.latency;
1878
1879 if (!hci_le_set_cig_params(cis, qos)) {
1880 hci_conn_drop(cis);
1881 return ERR_PTR(-EINVAL);
1882 }
1883
1884 cis->iso_qos = *qos;
1885 cis->state = BT_BOUND;
1886
1887 return cis;
1888 }
1889
hci_iso_setup_path(struct hci_conn * conn)1890 bool hci_iso_setup_path(struct hci_conn *conn)
1891 {
1892 struct hci_dev *hdev = conn->hdev;
1893 struct hci_cp_le_setup_iso_path cmd;
1894
1895 memset(&cmd, 0, sizeof(cmd));
1896
1897 if (conn->iso_qos.out.sdu) {
1898 cmd.handle = cpu_to_le16(conn->handle);
1899 cmd.direction = 0x00; /* Input (Host to Controller) */
1900 cmd.path = 0x00; /* HCI path if enabled */
1901 cmd.codec = 0x03; /* Transparent Data */
1902
1903 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1904 &cmd) < 0)
1905 return false;
1906 }
1907
1908 if (conn->iso_qos.in.sdu) {
1909 cmd.handle = cpu_to_le16(conn->handle);
1910 cmd.direction = 0x01; /* Output (Controller to Host) */
1911 cmd.path = 0x00; /* HCI path if enabled */
1912 cmd.codec = 0x03; /* Transparent Data */
1913
1914 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1915 &cmd) < 0)
1916 return false;
1917 }
1918
1919 return true;
1920 }
1921
hci_create_cis_sync(struct hci_dev * hdev,void * data)1922 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1923 {
1924 struct {
1925 struct hci_cp_le_create_cis cp;
1926 struct hci_cis cis[0x1f];
1927 } cmd;
1928 struct hci_conn *conn = data;
1929 u8 cig;
1930
1931 memset(&cmd, 0, sizeof(cmd));
1932 cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle);
1933 cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
1934 cmd.cp.num_cis++;
1935 cig = conn->iso_qos.cig;
1936
1937 hci_dev_lock(hdev);
1938
1939 rcu_read_lock();
1940
1941 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1942 struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
1943
1944 if (conn == data || conn->type != ISO_LINK ||
1945 conn->state == BT_CONNECTED || conn->iso_qos.cig != cig)
1946 continue;
1947
1948 /* Check if all CIS(s) belonging to a CIG are ready */
1949 if (!conn->link || conn->link->state != BT_CONNECTED ||
1950 conn->state != BT_CONNECT) {
1951 cmd.cp.num_cis = 0;
1952 break;
1953 }
1954
1955 /* Group all CIS with state BT_CONNECT since the spec don't
1956 * allow to send them individually:
1957 *
1958 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
1959 * page 2566:
1960 *
1961 * If the Host issues this command before all the
1962 * HCI_LE_CIS_Established events from the previous use of the
1963 * command have been generated, the Controller shall return the
1964 * error code Command Disallowed (0x0C).
1965 */
1966 cis->acl_handle = cpu_to_le16(conn->link->handle);
1967 cis->cis_handle = cpu_to_le16(conn->handle);
1968 cmd.cp.num_cis++;
1969 }
1970
1971 rcu_read_unlock();
1972
1973 hci_dev_unlock(hdev);
1974
1975 if (!cmd.cp.num_cis)
1976 return 0;
1977
1978 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) +
1979 sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd);
1980 }
1981
hci_le_create_cis(struct hci_conn * conn)1982 int hci_le_create_cis(struct hci_conn *conn)
1983 {
1984 struct hci_conn *cis;
1985 struct hci_dev *hdev = conn->hdev;
1986 int err;
1987
1988 switch (conn->type) {
1989 case LE_LINK:
1990 if (!conn->link || conn->state != BT_CONNECTED)
1991 return -EINVAL;
1992 cis = conn->link;
1993 break;
1994 case ISO_LINK:
1995 cis = conn;
1996 break;
1997 default:
1998 return -EINVAL;
1999 }
2000
2001 if (cis->state == BT_CONNECT)
2002 return 0;
2003
2004 /* Queue Create CIS */
2005 err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
2006 if (err)
2007 return err;
2008
2009 cis->state = BT_CONNECT;
2010
2011 return 0;
2012 }
2013
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)2014 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2015 struct bt_iso_io_qos *qos, __u8 phy)
2016 {
2017 /* Only set MTU if PHY is enabled */
2018 if (!qos->sdu && qos->phy) {
2019 if (hdev->iso_mtu > 0)
2020 qos->sdu = hdev->iso_mtu;
2021 else if (hdev->le_mtu > 0)
2022 qos->sdu = hdev->le_mtu;
2023 else
2024 qos->sdu = hdev->acl_mtu;
2025 }
2026
2027 /* Use the same PHY as ACL if set to any */
2028 if (qos->phy == BT_ISO_PHY_ANY)
2029 qos->phy = phy;
2030
2031 /* Use LE ACL connection interval if not set */
2032 if (!qos->interval)
2033 /* ACL interval unit in 1.25 ms to us */
2034 qos->interval = conn->le_conn_interval * 1250;
2035
2036 /* Use LE ACL connection latency if not set */
2037 if (!qos->latency)
2038 qos->latency = conn->le_conn_latency;
2039 }
2040
hci_bind_bis(struct hci_conn * conn,struct bt_iso_qos * qos)2041 static void hci_bind_bis(struct hci_conn *conn,
2042 struct bt_iso_qos *qos)
2043 {
2044 /* Update LINK PHYs according to QoS preference */
2045 conn->le_tx_phy = qos->out.phy;
2046 conn->le_tx_phy = qos->out.phy;
2047 conn->iso_qos = *qos;
2048 conn->state = BT_BOUND;
2049 }
2050
create_big_sync(struct hci_dev * hdev,void * data)2051 static int create_big_sync(struct hci_dev *hdev, void *data)
2052 {
2053 struct hci_conn *conn = data;
2054 struct bt_iso_qos *qos = &conn->iso_qos;
2055 u16 interval, sync_interval = 0;
2056 u32 flags = 0;
2057 int err;
2058
2059 if (qos->out.phy == 0x02)
2060 flags |= MGMT_ADV_FLAG_SEC_2M;
2061
2062 /* Align intervals */
2063 interval = qos->out.interval / 1250;
2064
2065 if (qos->bis)
2066 sync_interval = qos->sync_interval * 1600;
2067
2068 err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len,
2069 conn->le_per_adv_data, flags, interval,
2070 interval, sync_interval);
2071 if (err)
2072 return err;
2073
2074 return hci_le_create_big(conn, &conn->iso_qos);
2075 }
2076
create_pa_complete(struct hci_dev * hdev,void * data,int err)2077 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2078 {
2079 struct hci_cp_le_pa_create_sync *cp = data;
2080
2081 bt_dev_dbg(hdev, "");
2082
2083 if (err)
2084 bt_dev_err(hdev, "Unable to create PA: %d", err);
2085
2086 kfree(cp);
2087 }
2088
create_pa_sync(struct hci_dev * hdev,void * data)2089 static int create_pa_sync(struct hci_dev *hdev, void *data)
2090 {
2091 struct hci_cp_le_pa_create_sync *cp = data;
2092 int err;
2093
2094 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2095 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2096 if (err) {
2097 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2098 return err;
2099 }
2100
2101 return hci_update_passive_scan_sync(hdev);
2102 }
2103
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid)2104 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2105 __u8 sid)
2106 {
2107 struct hci_cp_le_pa_create_sync *cp;
2108
2109 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2110 return -EBUSY;
2111
2112 cp = kmalloc(sizeof(*cp), GFP_KERNEL);
2113 if (!cp) {
2114 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2115 return -ENOMEM;
2116 }
2117
2118 /* Convert from ISO socket address type to HCI address type */
2119 if (dst_type == BDADDR_LE_PUBLIC)
2120 dst_type = ADDR_LE_DEV_PUBLIC;
2121 else
2122 dst_type = ADDR_LE_DEV_RANDOM;
2123
2124 memset(cp, 0, sizeof(*cp));
2125 cp->sid = sid;
2126 cp->addr_type = dst_type;
2127 bacpy(&cp->addr, dst);
2128
2129 /* Queue start pa_create_sync and scan */
2130 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2131 }
2132
hci_le_big_create_sync(struct hci_dev * hdev,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2133 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2134 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2135 {
2136 struct _packed {
2137 struct hci_cp_le_big_create_sync cp;
2138 __u8 bis[0x11];
2139 } pdu;
2140 int err;
2141
2142 if (num_bis > sizeof(pdu.bis))
2143 return -EINVAL;
2144
2145 err = qos_set_big(hdev, qos);
2146 if (err)
2147 return err;
2148
2149 memset(&pdu, 0, sizeof(pdu));
2150 pdu.cp.handle = qos->big;
2151 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2152 pdu.cp.num_bis = num_bis;
2153 memcpy(pdu.bis, bis, num_bis);
2154
2155 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2156 sizeof(pdu.cp) + num_bis, &pdu);
2157 }
2158
create_big_complete(struct hci_dev * hdev,void * data,int err)2159 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2160 {
2161 struct hci_conn *conn = data;
2162
2163 bt_dev_dbg(hdev, "conn %p", conn);
2164
2165 if (err) {
2166 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2167 hci_connect_cfm(conn, err);
2168 hci_conn_del(conn);
2169 }
2170 }
2171
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2172 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2173 __u8 dst_type, struct bt_iso_qos *qos,
2174 __u8 base_len, __u8 *base)
2175 {
2176 struct hci_conn *conn;
2177 int err;
2178
2179 /* We need hci_conn object using the BDADDR_ANY as dst */
2180 conn = hci_add_bis(hdev, dst, qos);
2181 if (IS_ERR(conn))
2182 return conn;
2183
2184 hci_bind_bis(conn, qos);
2185
2186 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2187 if (base_len && base) {
2188 base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2189 0x1851, base, base_len);
2190 conn->le_per_adv_data_len = base_len;
2191 }
2192
2193 /* Queue start periodic advertising and create BIG */
2194 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2195 create_big_complete);
2196 if (err < 0) {
2197 hci_conn_drop(conn);
2198 return ERR_PTR(err);
2199 }
2200
2201 hci_iso_qos_setup(hdev, conn, &qos->out,
2202 conn->le_tx_phy ? conn->le_tx_phy :
2203 hdev->le_tx_def_phys);
2204
2205 return conn;
2206 }
2207
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2208 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2209 __u8 dst_type, struct bt_iso_qos *qos)
2210 {
2211 struct hci_conn *le;
2212 struct hci_conn *cis;
2213
2214 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2215 le = hci_connect_le(hdev, dst, dst_type, false,
2216 BT_SECURITY_LOW,
2217 HCI_LE_CONN_TIMEOUT,
2218 HCI_ROLE_SLAVE);
2219 else
2220 le = hci_connect_le_scan(hdev, dst, dst_type,
2221 BT_SECURITY_LOW,
2222 HCI_LE_CONN_TIMEOUT,
2223 CONN_REASON_ISO_CONNECT);
2224 if (IS_ERR(le))
2225 return le;
2226
2227 hci_iso_qos_setup(hdev, le, &qos->out,
2228 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2229 hci_iso_qos_setup(hdev, le, &qos->in,
2230 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2231
2232 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2233 if (IS_ERR(cis)) {
2234 hci_conn_drop(le);
2235 return cis;
2236 }
2237
2238 le->link = cis;
2239 cis->link = le;
2240
2241 hci_conn_hold(cis);
2242
2243 /* If LE is already connected and CIS handle is already set proceed to
2244 * Create CIS immediately.
2245 */
2246 if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2247 hci_le_create_cis(le);
2248
2249 return cis;
2250 }
2251
2252 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2253 int hci_conn_check_link_mode(struct hci_conn *conn)
2254 {
2255 BT_DBG("hcon %p", conn);
2256
2257 /* In Secure Connections Only mode, it is required that Secure
2258 * Connections is used and the link is encrypted with AES-CCM
2259 * using a P-256 authenticated combination key.
2260 */
2261 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2262 if (!hci_conn_sc_enabled(conn) ||
2263 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2264 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2265 return 0;
2266 }
2267
2268 /* AES encryption is required for Level 4:
2269 *
2270 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2271 * page 1319:
2272 *
2273 * 128-bit equivalent strength for link and encryption keys
2274 * required using FIPS approved algorithms (E0 not allowed,
2275 * SAFER+ not allowed, and P-192 not allowed; encryption key
2276 * not shortened)
2277 */
2278 if (conn->sec_level == BT_SECURITY_FIPS &&
2279 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2280 bt_dev_err(conn->hdev,
2281 "Invalid security: Missing AES-CCM usage");
2282 return 0;
2283 }
2284
2285 if (hci_conn_ssp_enabled(conn) &&
2286 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2287 return 0;
2288
2289 return 1;
2290 }
2291
2292 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2293 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2294 {
2295 BT_DBG("hcon %p", conn);
2296
2297 if (conn->pending_sec_level > sec_level)
2298 sec_level = conn->pending_sec_level;
2299
2300 if (sec_level > conn->sec_level)
2301 conn->pending_sec_level = sec_level;
2302 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2303 return 1;
2304
2305 /* Make sure we preserve an existing MITM requirement*/
2306 auth_type |= (conn->auth_type & 0x01);
2307
2308 conn->auth_type = auth_type;
2309
2310 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2311 struct hci_cp_auth_requested cp;
2312
2313 cp.handle = cpu_to_le16(conn->handle);
2314 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2315 sizeof(cp), &cp);
2316
2317 /* Set the ENCRYPT_PEND to trigger encryption after
2318 * authentication.
2319 */
2320 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2321 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2322 }
2323
2324 return 0;
2325 }
2326
2327 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2328 static void hci_conn_encrypt(struct hci_conn *conn)
2329 {
2330 BT_DBG("hcon %p", conn);
2331
2332 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2333 struct hci_cp_set_conn_encrypt cp;
2334 cp.handle = cpu_to_le16(conn->handle);
2335 cp.encrypt = 0x01;
2336 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2337 &cp);
2338 }
2339 }
2340
2341 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2342 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2343 bool initiator)
2344 {
2345 BT_DBG("hcon %p", conn);
2346
2347 if (conn->type == LE_LINK)
2348 return smp_conn_security(conn, sec_level);
2349
2350 /* For sdp we don't need the link key. */
2351 if (sec_level == BT_SECURITY_SDP)
2352 return 1;
2353
2354 /* For non 2.1 devices and low security level we don't need the link
2355 key. */
2356 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2357 return 1;
2358
2359 /* For other security levels we need the link key. */
2360 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2361 goto auth;
2362
2363 switch (conn->key_type) {
2364 case HCI_LK_AUTH_COMBINATION_P256:
2365 /* An authenticated FIPS approved combination key has
2366 * sufficient security for security level 4 or lower.
2367 */
2368 if (sec_level <= BT_SECURITY_FIPS)
2369 goto encrypt;
2370 break;
2371 case HCI_LK_AUTH_COMBINATION_P192:
2372 /* An authenticated combination key has sufficient security for
2373 * security level 3 or lower.
2374 */
2375 if (sec_level <= BT_SECURITY_HIGH)
2376 goto encrypt;
2377 break;
2378 case HCI_LK_UNAUTH_COMBINATION_P192:
2379 case HCI_LK_UNAUTH_COMBINATION_P256:
2380 /* An unauthenticated combination key has sufficient security
2381 * for security level 2 or lower.
2382 */
2383 if (sec_level <= BT_SECURITY_MEDIUM)
2384 goto encrypt;
2385 break;
2386 case HCI_LK_COMBINATION:
2387 /* A combination key has always sufficient security for the
2388 * security levels 2 or lower. High security level requires the
2389 * combination key is generated using maximum PIN code length
2390 * (16). For pre 2.1 units.
2391 */
2392 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2393 goto encrypt;
2394 break;
2395 default:
2396 break;
2397 }
2398
2399 auth:
2400 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2401 return 0;
2402
2403 if (initiator)
2404 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2405
2406 if (!hci_conn_auth(conn, sec_level, auth_type))
2407 return 0;
2408
2409 encrypt:
2410 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2411 /* Ensure that the encryption key size has been read,
2412 * otherwise stall the upper layer responses.
2413 */
2414 if (!conn->enc_key_size)
2415 return 0;
2416
2417 /* Nothing else needed, all requirements are met */
2418 return 1;
2419 }
2420
2421 hci_conn_encrypt(conn);
2422 return 0;
2423 }
2424 EXPORT_SYMBOL(hci_conn_security);
2425
2426 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2427 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2428 {
2429 BT_DBG("hcon %p", conn);
2430
2431 /* Accept if non-secure or higher security level is required */
2432 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2433 return 1;
2434
2435 /* Accept if secure or higher security level is already present */
2436 if (conn->sec_level == BT_SECURITY_HIGH ||
2437 conn->sec_level == BT_SECURITY_FIPS)
2438 return 1;
2439
2440 /* Reject not secure link */
2441 return 0;
2442 }
2443 EXPORT_SYMBOL(hci_conn_check_secure);
2444
2445 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2446 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2447 {
2448 BT_DBG("hcon %p", conn);
2449
2450 if (role == conn->role)
2451 return 1;
2452
2453 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2454 struct hci_cp_switch_role cp;
2455 bacpy(&cp.bdaddr, &conn->dst);
2456 cp.role = role;
2457 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2458 }
2459
2460 return 0;
2461 }
2462 EXPORT_SYMBOL(hci_conn_switch_role);
2463
2464 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2465 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2466 {
2467 struct hci_dev *hdev = conn->hdev;
2468
2469 BT_DBG("hcon %p mode %d", conn, conn->mode);
2470
2471 if (conn->mode != HCI_CM_SNIFF)
2472 goto timer;
2473
2474 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2475 goto timer;
2476
2477 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2478 struct hci_cp_exit_sniff_mode cp;
2479 cp.handle = cpu_to_le16(conn->handle);
2480 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2481 }
2482
2483 timer:
2484 if (hdev->idle_timeout > 0)
2485 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2486 msecs_to_jiffies(hdev->idle_timeout));
2487 }
2488
2489 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2490 void hci_conn_hash_flush(struct hci_dev *hdev)
2491 {
2492 struct hci_conn_hash *h = &hdev->conn_hash;
2493 struct hci_conn *c, *n;
2494
2495 BT_DBG("hdev %s", hdev->name);
2496
2497 list_for_each_entry_safe(c, n, &h->list, list) {
2498 c->state = BT_CLOSED;
2499
2500 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2501
2502 /* Unlink before deleting otherwise it is possible that
2503 * hci_conn_del removes the link which may cause the list to
2504 * contain items already freed.
2505 */
2506 hci_conn_unlink(c);
2507 hci_conn_del(c);
2508 }
2509 }
2510
2511 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)2512 void hci_conn_check_pending(struct hci_dev *hdev)
2513 {
2514 struct hci_conn *conn;
2515
2516 BT_DBG("hdev %s", hdev->name);
2517
2518 hci_dev_lock(hdev);
2519
2520 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2521 if (conn)
2522 hci_acl_create_connection(conn);
2523
2524 hci_dev_unlock(hdev);
2525 }
2526
get_link_mode(struct hci_conn * conn)2527 static u32 get_link_mode(struct hci_conn *conn)
2528 {
2529 u32 link_mode = 0;
2530
2531 if (conn->role == HCI_ROLE_MASTER)
2532 link_mode |= HCI_LM_MASTER;
2533
2534 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2535 link_mode |= HCI_LM_ENCRYPT;
2536
2537 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2538 link_mode |= HCI_LM_AUTH;
2539
2540 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2541 link_mode |= HCI_LM_SECURE;
2542
2543 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2544 link_mode |= HCI_LM_FIPS;
2545
2546 return link_mode;
2547 }
2548
hci_get_conn_list(void __user * arg)2549 int hci_get_conn_list(void __user *arg)
2550 {
2551 struct hci_conn *c;
2552 struct hci_conn_list_req req, *cl;
2553 struct hci_conn_info *ci;
2554 struct hci_dev *hdev;
2555 int n = 0, size, err;
2556
2557 if (copy_from_user(&req, arg, sizeof(req)))
2558 return -EFAULT;
2559
2560 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2561 return -EINVAL;
2562
2563 size = sizeof(req) + req.conn_num * sizeof(*ci);
2564
2565 cl = kmalloc(size, GFP_KERNEL);
2566 if (!cl)
2567 return -ENOMEM;
2568
2569 hdev = hci_dev_get(req.dev_id);
2570 if (!hdev) {
2571 kfree(cl);
2572 return -ENODEV;
2573 }
2574
2575 ci = cl->conn_info;
2576
2577 hci_dev_lock(hdev);
2578 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2579 bacpy(&(ci + n)->bdaddr, &c->dst);
2580 (ci + n)->handle = c->handle;
2581 (ci + n)->type = c->type;
2582 (ci + n)->out = c->out;
2583 (ci + n)->state = c->state;
2584 (ci + n)->link_mode = get_link_mode(c);
2585 if (++n >= req.conn_num)
2586 break;
2587 }
2588 hci_dev_unlock(hdev);
2589
2590 cl->dev_id = hdev->id;
2591 cl->conn_num = n;
2592 size = sizeof(req) + n * sizeof(*ci);
2593
2594 hci_dev_put(hdev);
2595
2596 err = copy_to_user(arg, cl, size);
2597 kfree(cl);
2598
2599 return err ? -EFAULT : 0;
2600 }
2601
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2602 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2603 {
2604 struct hci_conn_info_req req;
2605 struct hci_conn_info ci;
2606 struct hci_conn *conn;
2607 char __user *ptr = arg + sizeof(req);
2608
2609 if (copy_from_user(&req, arg, sizeof(req)))
2610 return -EFAULT;
2611
2612 hci_dev_lock(hdev);
2613 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2614 if (conn) {
2615 bacpy(&ci.bdaddr, &conn->dst);
2616 ci.handle = conn->handle;
2617 ci.type = conn->type;
2618 ci.out = conn->out;
2619 ci.state = conn->state;
2620 ci.link_mode = get_link_mode(conn);
2621 }
2622 hci_dev_unlock(hdev);
2623
2624 if (!conn)
2625 return -ENOENT;
2626
2627 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2628 }
2629
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2630 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2631 {
2632 struct hci_auth_info_req req;
2633 struct hci_conn *conn;
2634
2635 if (copy_from_user(&req, arg, sizeof(req)))
2636 return -EFAULT;
2637
2638 hci_dev_lock(hdev);
2639 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2640 if (conn)
2641 req.type = conn->auth_type;
2642 hci_dev_unlock(hdev);
2643
2644 if (!conn)
2645 return -ENOENT;
2646
2647 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2648 }
2649
hci_chan_create(struct hci_conn * conn)2650 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2651 {
2652 struct hci_dev *hdev = conn->hdev;
2653 struct hci_chan *chan;
2654
2655 BT_DBG("%s hcon %p", hdev->name, conn);
2656
2657 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2658 BT_DBG("Refusing to create new hci_chan");
2659 return NULL;
2660 }
2661
2662 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2663 if (!chan)
2664 return NULL;
2665
2666 chan->conn = hci_conn_get(conn);
2667 skb_queue_head_init(&chan->data_q);
2668 chan->state = BT_CONNECTED;
2669
2670 list_add_rcu(&chan->list, &conn->chan_list);
2671
2672 return chan;
2673 }
2674
hci_chan_del(struct hci_chan * chan)2675 void hci_chan_del(struct hci_chan *chan)
2676 {
2677 struct hci_conn *conn = chan->conn;
2678 struct hci_dev *hdev = conn->hdev;
2679
2680 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2681
2682 list_del_rcu(&chan->list);
2683
2684 synchronize_rcu();
2685
2686 /* Prevent new hci_chan's to be created for this hci_conn */
2687 set_bit(HCI_CONN_DROP, &conn->flags);
2688
2689 hci_conn_put(conn);
2690
2691 skb_queue_purge(&chan->data_q);
2692 kfree(chan);
2693 }
2694
hci_chan_list_flush(struct hci_conn * conn)2695 void hci_chan_list_flush(struct hci_conn *conn)
2696 {
2697 struct hci_chan *chan, *n;
2698
2699 BT_DBG("hcon %p", conn);
2700
2701 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2702 hci_chan_del(chan);
2703 }
2704
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2705 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2706 __u16 handle)
2707 {
2708 struct hci_chan *hchan;
2709
2710 list_for_each_entry(hchan, &hcon->chan_list, list) {
2711 if (hchan->handle == handle)
2712 return hchan;
2713 }
2714
2715 return NULL;
2716 }
2717
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2718 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2719 {
2720 struct hci_conn_hash *h = &hdev->conn_hash;
2721 struct hci_conn *hcon;
2722 struct hci_chan *hchan = NULL;
2723
2724 rcu_read_lock();
2725
2726 list_for_each_entry_rcu(hcon, &h->list, list) {
2727 hchan = __hci_chan_lookup_handle(hcon, handle);
2728 if (hchan)
2729 break;
2730 }
2731
2732 rcu_read_unlock();
2733
2734 return hchan;
2735 }
2736
hci_conn_get_phy(struct hci_conn * conn)2737 u32 hci_conn_get_phy(struct hci_conn *conn)
2738 {
2739 u32 phys = 0;
2740
2741 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2742 * Table 6.2: Packets defined for synchronous, asynchronous, and
2743 * CPB logical transport types.
2744 */
2745 switch (conn->type) {
2746 case SCO_LINK:
2747 /* SCO logical transport (1 Mb/s):
2748 * HV1, HV2, HV3 and DV.
2749 */
2750 phys |= BT_PHY_BR_1M_1SLOT;
2751
2752 break;
2753
2754 case ACL_LINK:
2755 /* ACL logical transport (1 Mb/s) ptt=0:
2756 * DH1, DM3, DH3, DM5 and DH5.
2757 */
2758 phys |= BT_PHY_BR_1M_1SLOT;
2759
2760 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2761 phys |= BT_PHY_BR_1M_3SLOT;
2762
2763 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2764 phys |= BT_PHY_BR_1M_5SLOT;
2765
2766 /* ACL logical transport (2 Mb/s) ptt=1:
2767 * 2-DH1, 2-DH3 and 2-DH5.
2768 */
2769 if (!(conn->pkt_type & HCI_2DH1))
2770 phys |= BT_PHY_EDR_2M_1SLOT;
2771
2772 if (!(conn->pkt_type & HCI_2DH3))
2773 phys |= BT_PHY_EDR_2M_3SLOT;
2774
2775 if (!(conn->pkt_type & HCI_2DH5))
2776 phys |= BT_PHY_EDR_2M_5SLOT;
2777
2778 /* ACL logical transport (3 Mb/s) ptt=1:
2779 * 3-DH1, 3-DH3 and 3-DH5.
2780 */
2781 if (!(conn->pkt_type & HCI_3DH1))
2782 phys |= BT_PHY_EDR_3M_1SLOT;
2783
2784 if (!(conn->pkt_type & HCI_3DH3))
2785 phys |= BT_PHY_EDR_3M_3SLOT;
2786
2787 if (!(conn->pkt_type & HCI_3DH5))
2788 phys |= BT_PHY_EDR_3M_5SLOT;
2789
2790 break;
2791
2792 case ESCO_LINK:
2793 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2794 phys |= BT_PHY_BR_1M_1SLOT;
2795
2796 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2797 phys |= BT_PHY_BR_1M_3SLOT;
2798
2799 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2800 if (!(conn->pkt_type & ESCO_2EV3))
2801 phys |= BT_PHY_EDR_2M_1SLOT;
2802
2803 if (!(conn->pkt_type & ESCO_2EV5))
2804 phys |= BT_PHY_EDR_2M_3SLOT;
2805
2806 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2807 if (!(conn->pkt_type & ESCO_3EV3))
2808 phys |= BT_PHY_EDR_3M_1SLOT;
2809
2810 if (!(conn->pkt_type & ESCO_3EV5))
2811 phys |= BT_PHY_EDR_3M_3SLOT;
2812
2813 break;
2814
2815 case LE_LINK:
2816 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2817 phys |= BT_PHY_LE_1M_TX;
2818
2819 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2820 phys |= BT_PHY_LE_1M_RX;
2821
2822 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2823 phys |= BT_PHY_LE_2M_TX;
2824
2825 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2826 phys |= BT_PHY_LE_2M_RX;
2827
2828 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2829 phys |= BT_PHY_LE_CODED_TX;
2830
2831 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2832 phys |= BT_PHY_LE_CODED_RX;
2833
2834 break;
2835 }
2836
2837 return phys;
2838 }
2839
hci_abort_conn(struct hci_conn * conn,u8 reason)2840 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2841 {
2842 int r = 0;
2843
2844 if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2845 return 0;
2846
2847 switch (conn->state) {
2848 case BT_CONNECTED:
2849 case BT_CONFIG:
2850 if (conn->type == AMP_LINK) {
2851 struct hci_cp_disconn_phy_link cp;
2852
2853 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2854 cp.reason = reason;
2855 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2856 sizeof(cp), &cp);
2857 } else {
2858 struct hci_cp_disconnect dc;
2859
2860 dc.handle = cpu_to_le16(conn->handle);
2861 dc.reason = reason;
2862 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2863 sizeof(dc), &dc);
2864 }
2865
2866 conn->state = BT_DISCONN;
2867
2868 break;
2869 case BT_CONNECT:
2870 if (conn->type == LE_LINK) {
2871 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2872 break;
2873 r = hci_send_cmd(conn->hdev,
2874 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2875 } else if (conn->type == ACL_LINK) {
2876 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2877 break;
2878 r = hci_send_cmd(conn->hdev,
2879 HCI_OP_CREATE_CONN_CANCEL,
2880 6, &conn->dst);
2881 }
2882 break;
2883 case BT_CONNECT2:
2884 if (conn->type == ACL_LINK) {
2885 struct hci_cp_reject_conn_req rej;
2886
2887 bacpy(&rej.bdaddr, &conn->dst);
2888 rej.reason = reason;
2889
2890 r = hci_send_cmd(conn->hdev,
2891 HCI_OP_REJECT_CONN_REQ,
2892 sizeof(rej), &rej);
2893 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2894 struct hci_cp_reject_sync_conn_req rej;
2895
2896 bacpy(&rej.bdaddr, &conn->dst);
2897
2898 /* SCO rejection has its own limited set of
2899 * allowed error values (0x0D-0x0F) which isn't
2900 * compatible with most values passed to this
2901 * function. To be safe hard-code one of the
2902 * values that's suitable for SCO.
2903 */
2904 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2905
2906 r = hci_send_cmd(conn->hdev,
2907 HCI_OP_REJECT_SYNC_CONN_REQ,
2908 sizeof(rej), &rej);
2909 }
2910 break;
2911 default:
2912 conn->state = BT_CLOSED;
2913 break;
2914 }
2915
2916 return r;
2917 }
2918